From c412af3de0a3bd423e8a688373684554f9dca765 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20B=C3=BChler?= Date: Fri, 28 Apr 2023 19:11:12 +0200 Subject: [PATCH] run sort internally, refactor table output into separate method --- src/ldaptool/_main.py | 55 ++++++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 21 deletions(-) diff --git a/src/ldaptool/_main.py b/src/ldaptool/_main.py index 94b070c..a380231 100644 --- a/src/ldaptool/_main.py +++ b/src/ldaptool/_main.py @@ -98,22 +98,49 @@ class _Context: try: if self.arguments.table: add_filter(["csvlook"]) - if self.arguments.sort: - add_filter(["csvsort", "--blanks"]) - self._run_search(search_iterator, stream=output) + if self.arguments.csv: + self._table_output(search_iterator, stream=output) + else: + self._ldif_or_json_output(search_iterator, stream=output) finally: if procs: output.close() for proc in reversed(procs): proc.wait() - def _run_search(self, search_iterator: typing.Iterable[Result], *, stream: typing.IO[str]) -> None: + def _to_table_lines(self, search_iterator: typing.Iterable[Result]) -> typing.Iterable[tuple[str, ...]]: + decoder = decode.Decoder(arguments=self.arguments) + # "human" (json) dicts contain data by lower case key: + column_keys = [col.lower() for col in self.arguments.columns] + try: + for dn, entry in search_iterator: + if dn is None: + continue + # normal entry + assert not isinstance(entry, list) + obj = decoder.human(dn=dn, entry=decoder.read(dn=dn, entry=entry)) + yield tuple(obj.get(key, "") for key in column_keys) + except SizeLimitExceeded as e: + raise SystemExit(f"Error: {e}") + + def _table_output(self, search_iterator: typing.Iterable[Result], *, stream: typing.IO[str]) -> None: + line_iterator = self._to_table_lines(search_iterator) + if self.arguments.sort: + line_iterator = sorted(line_iterator) + + csv_out = csv.writer(stream, lineterminator="\n") + csv_out.writerow(self.arguments.columns) + + for line in line_iterator: + csv_out.writerow(line) + + def _ldif_or_json_output(self, search_iterator: typing.Iterable[Result], *, stream: typing.IO[str]) -> None: decoder = decode.Decoder(arguments=self.arguments) num_responses = 0 num_entries = 0 - ldif_output = not (self.arguments.csv or self.arguments.json or self.arguments.human) + ldif_output = not (self.arguments.json or self.arguments.human) if ldif_output: print("# extended LDIF") @@ -128,22 +155,11 @@ class _Context: print("#") print() - if self.arguments.csv: - csv_out = csv.DictWriter( - stream, - fieldnames=self.arguments.columns, - lineterminator="\n", - extrasaction="ignore", - ) - csv_out.writeheader() - # dicts contain data by lower case key - csv_out.fieldnames = [col.lower() for col in self.arguments.columns] - try: for dn, entry in search_iterator: num_responses += 1 if dn is None: - if not self.arguments.csv: + if ldif_output: print("# search reference") for ref in entry: assert isinstance(ref, str) @@ -154,10 +170,7 @@ class _Context: assert not isinstance(entry, list) num_entries += 1 obj = decoder.read(dn=dn, entry=entry) - if self.arguments.csv: - csv_out.writerow(decoder.human(dn=dn, entry=obj)) - else: - decoder.emit(dn=dn, entry=obj) + decoder.emit(dn=dn, entry=obj) except SizeLimitExceeded as e: raise SystemExit(f"Error: {e}")