Coverage for lib/datou/datou_exec.py: 62%
256 statements
« prev ^ index » next coverage.py v7.9.1, created at 2026-02-10 01:10 +0100
« prev ^ index » next coverage.py v7.9.1, created at 2026-02-10 01:10 +0100
1import os.path
3# TODO VR Ou met-on les param_json => c'est l'instanciation ? grrr on veut des noms différents, et oui
4# TODO VR : il faut aussi mettre les lists d'input qqpart ! => dans la DB, mais pourquoi ?
8from lib.brick_layers.lib_abstract_generic_layer import LayerPrompt
10from lib.datou.lib_datou_step_template import datou_safia_step_image_to_text, \
11 datou_safia_step_speech_to_text, \
12 datou_safia_step_load_existing_graph, \
13 datou_safia_step_request_gpt, \
14 datou_safia_step_send_mail, \
15 datou_safia_step_git_action, \
16 datou_safia_step_doc_to_json, \
17 datou_safia_step_import_json, \
18 datou_safia_step_get_embedding, \
19 datou_safia_step_search_doc_NN, \
20 datou_safia_step_result_to_json, \
21 datou_safia_step_append_to_doc_content, \
22 datou_safia_step_load_url_content_text, \
23 datou_safia_step_map_reduce, \
24 datou_safia_step_load_tab, \
25 datou_safia_step_anon, \
26 datou_safia_step_format, \
27 datou_safia_step_classify_doc, \
28 datou_safia_step_client, \
29 datou_safia_step_TEMPLATE
33# VR TOOD : a mon avis il faudrait aussi charger les map_datou_function de la DB, grrr !
34# Ou bien on s'en fout, reflechissons, en tout cas ca fait doublon !
35map_datou_function = \
36 {
37 "image_to_text" : datou_safia_step_image_to_text,
38 "speech_to_text" : datou_safia_step_speech_to_text,
39 "load_existing_graph" : datou_safia_step_load_existing_graph,
40 "request_gpt" : datou_safia_step_request_gpt,
41 "send_mail" : datou_safia_step_send_mail,
42 "git_action" : datou_safia_step_git_action,
43 "get_embedding" : datou_safia_step_get_embedding, # TODO VR 16-6-23 : comment avoir différents input en entrée => besoin de datou avec entrée sortie spécifique
44 "search_doc_NN" : datou_safia_step_search_doc_NN,
45 "doc_to_json" : datou_safia_step_doc_to_json,
46 "result_to_json" : datou_safia_step_result_to_json,
47 "import_json" : datou_safia_step_import_json,
48 "append_to_doc" : datou_safia_step_append_to_doc_content,
49 "load_url" : datou_safia_step_load_url_content_text,
50 "map_reduce" : datou_safia_step_map_reduce,
51 "load_tab" : datou_safia_step_load_tab,
52 "anon" : datou_safia_step_anon,
53 "format" : datou_safia_step_format,
54 "classify_doc" : datou_safia_step_classify_doc,
55 "step_client" : datou_safia_step_client # Dans la DB on a mis step_client
56 }
58map_datou_external_layer = \
59 {
60 "request_gpt" : "nlp_chat", #LayerPrompt,
61 "image_to_text" : "nlp_chat" #LayerPrompt,
62 }
64# TODO VR 16-6-23 : to be moved !
65# TODO rename list_default_datou_on_input => et d'après ce que je vois sur le jpg c'est ailleurs que cela se trouve (le image_to_text ecrase le preprompt de manière hard-code en fait)
66list_datous = \
67 {
68 "jpg" : ["image_to_text", "request_gpt", "send_mail"], # "result_to_json", "import_json"
69 "amr" : ["speech_to_text", "request_gpt", "send_mail"], # "result_to_json", "import_json"
70 "pdf" : ["doc_to_json", "import_json"],
71 "query" : ["get_embedding", "search_doc_NN", "request_gpt"],
72 "query_voice" : ["voice", "get_embedding", "search_doc_NN", "request_gpt"], # TODO VR 3-12-23 : a mon avis cela ne fonctionne pas
73 "json" : ["import_json"]
74 }
75# A rajouter hard-coder qqpart ou dans un datou specifique par defaut mais pas pour la step ! : "Merci d'estimer l'impact carbone des produits se trouvant ici, ainsi que leur nombre de calories et le prix si possible, meme de manière approximative, ou incomplet ou que tu fasses un raisonnement ouvert pour estimer tu mettras n/c quand tu ne peux pas estimer et de la renvoyer sous forme de tableau avec pour colonnes : PRODUIT, CO2, CALORIES, PRIX :\n"
78# TODO 4-12-23 Est-ce vraiment utile, c'est un contexte lié à quoi ? En tout cas il faut le renommer contexte qqchose
79map_datou_param_json = \
80 {
81 "image_to_text" : ["google_token"],
82 "speech_to_text" : ["openai_token"],
83 "load_existing_graph" : [],
84 "request_gpt" : ["openai_token", "gpt_model"],
85 "send_mail" : ["info_auth", "hash_id_treatment", "privacy", "from_mail_to_send"],
86 "git_action" : ["defaut_github_issue", "github_token", "privacy"],
87 "get_embedding" : ["openai_token"], # TODO VR 16-6-23 : comment avoir différents input en entrée => besoin de datou avec entrée sortie spécifique
88 "search_doc_NN" : ["match_page_sections"],
89 "doc_to_json" : [],
90 "result_to_json" : ["user"],
91 "import_json" : ["table_documents", "openai_token"],
92 "append_to_doc" : ["openai_token", "project_id", "user_id"],
93 "load_url" : [],
94 "map_reduce" : ["map_type_layer_inst", "openai_token", "user_id", "config_project", "gpt_model", "openai_organization"], # todo 181223 to add in sql, en fait on devrait faire tous, non ?
95 "load_tab" : ["col_to_input"], # 14-1-23 : a mon avis on peut virer cela
96 "anon" : ["json_anon_info_temp"],
97 "format" : ["config_project"],
98 "classify_doc" : [],
99 "step_client" : []
100 }
104# manage_graph
105# TODO VR 15-6-23 REFACTO datou_safia_step_load_existing_graph
108from lib.lib_util import count_and_display_elapsed_time
111def datou_exec(datou_linear_list_steps : list = [], input : dict = {},
112 complete_param_json : dict = {},
113 verbose : bool = False,
114 with_audit : bool = False,
115 privacy : bool = False,
116 map_type_layer_inst : dict = {},
117 list_param_json_steps : list = [],
118 id_step_incomplete_args = None,
119 hash_id_treatment_rerun = None) -> dict :
120 from auth.lib_auth import create_id
121 if hash_id_treatment_rerun != None and hash_id_treatment_rerun != "":
122 hash_id_treatment = hash_id_treatment_rerun
123 else:
124 hash_id_treatment = create_id()
126 import time
127 begin_time = time.time()
129 import logging
130 logger = logging.getLogger()
132 from auth.lib_cost import CostEstimation as CE
133 ce = CE()
135 if verbose:
136 print("About to treat " + str(datou_linear_list_steps) + " with " + hash_id_treatment + " treatment id")
138 input["hash_id_treatment"] = hash_id_treatment
140 complete_param_json["hash_id_treatment"] = hash_id_treatment
141 complete_param_json["privacy"] = privacy
143 # In order to inject the configuration here :
144 if map_type_layer_inst == {} and "map_type_layer_inst" in complete_param_json:
145 map_type_layer_inst = complete_param_json["map_type_layer_inst"]
147 datou_audit_data = {"config": {"complete_param_json": complete_param_json,
148 "datou_linear_list_steps": datou_linear_list_steps},
149 "io_exec": {}}
151 output = input
153 from server.safia import lpgss_singleton, lib_right_singleton
155 # TODO VR : est-ce bien ce que l'on veut faire ? on ne veut pas plutot récupérer le lss dans le contexte d'execution (on aurait alors le user_id)
156 from lib.lib_safia_system import LibSafiaSystem
157 lss = LibSafiaSystem(lib_user_data_internal=lpgss_singleton, lib_right=lib_right_singleton)
158 user_id = complete_param_json["user_id"] if "user_id" in complete_param_json else 0
159 lss.user_id = user_id # CA c'est un hack
161 # VR42 : to move before loop assez clairment
162 try:
163 lss.lib_user_data_internal.upsert_audit_info(hash_id_treatment,
164 input_values={"datou_audit_data": datou_audit_data,
165 # Commented on 19/5/25 due to the fact that it seems useless and harmful and at the end the datou is correct
166 "mtr_datou_id": input["datou_exec_info"][
167 "mtr_datou_id"] if "datou_exec_info" in input and "mtr_datou_id" in
168 input["datou_exec_info"] else None,
169 # "mtr_datou_id": param_json["datou_int_id"] if "datou_int_id" in param_json else input["datou_exec_info"]["mtr_datou_id"] if "datou_exec_info" in input and "mtr_datou_id" in input["datou_exec_info"] else None,
170 "id_file": input["datou_exec_info"][
171 "id_file"] if "datou_exec_info" in input and "id_file" in
172 input["datou_exec_info"] else None,
173 "user_id": input["datou_exec_info"][
174 "user_id"] if "datou_exec_info" in input and "user_id" in
175 input["datou_exec_info"] else None,
176 "project_id": input["datou_exec_info"][
177 "project_id"] if "datou_exec_info" in input and "project_id" in
178 input[
179 "datou_exec_info"] else None},
180 verbose=verbose)
181 except Exception as e:
182 print(" BUG PB VOILA AUDIT LINK TO SOMETHING ELSE NOT COOL")
183 print(str(e))
184 import traceback
185 print(traceback.format_exc())
187 # VR TODO bug des tests depuis une semaine GRRRR GRRRR GRRR 1-2-24
188 if list_param_json_steps == []:
189 list_param_json_steps += [{}] * len(datou_linear_list_steps) # TODO VR PB 1-2-24 : on a en fait n fois le meme pointeur, for l'evite (copy aussi)
190 if len(list_param_json_steps) < len(datou_linear_list_steps):
191 print("ERROR TREATED AS WARNING : list_param_json_steps is too short, we add empty dict")
192 logger.info("ERROR TREATED AS WARNING : list_param_json_steps is too short, we add empty dict")
193 list_param_json_steps += [{}] * (len(datou_linear_list_steps) - len(list_param_json_steps))
194 list_data = list(zip(datou_linear_list_steps, list_param_json_steps))
195 id_step = 0
196 if id_step_incomplete_args != None :
197 print("We only complete the execution")
198 id_step = id_step_incomplete_args
199 id_try = 0
200 param_json = {} # for no step datou (just allocating the chits)
201 while id_step < len(datou_linear_list_steps) :
202 datou_step, param_json_context_exec = list_data[id_step]
203 # for datou_step, param_json_context_exec, id_step in zip(datou_linear_list_steps, list_param_json_steps, range(len(datou_linear_list_steps))):
204 if datou_step in map_datou_function and datou_step in map_datou_param_json:
205 list_pj_from_app_conf = map_datou_param_json[datou_step] if datou_step in map_datou_param_json else []
206 param_json_context_app = {key : complete_param_json[key] for key in list_pj_from_app_conf if key in complete_param_json}
207 logger.info("Before " + str(datou_step) + " with " + hash_id_treatment)
209 param_json = param_json_context_app
210# param_json.update(param_json_context_exec)
211 for k in param_json_context_exec:
212 if param_json_context_exec[k] != None and param_json_context_exec[k] != "":
213 param_json[k] = param_json_context_exec[k]
215 # TODO VR 13-12-23 : en fait cela ne peut qu'etre dans le contexte d'execution
216 if "assoc" in param_json:
217 for outio in param_json["assoc"]:
218 inio = param_json["assoc"][outio]
219 if outio in input:
220 input[inio] = input[outio]
221 del input[outio]
223 print(" datou_step : " + str(id_step) + " : " + str(datou_step) + " hit " + str(hash_id_treatment[:10]) + " param_json : " + str(param_json)[:100] + " input.keys() : " + str(input.keys()))
224 logger.info(" datou_step : " + str(id_step) + " : " + str(datou_step) + " param_json : " + str(param_json)[:100] + " input.keys() : " + str(input.keys()))
226 from lib.util.lib_formal_conf import formal_conf_exec
228 # VR 10-11-24 : je crois que c'est nécessaire pour quelquechose mais je ne sais plus quoi, oui pour le send_mail, je pense que j'utilise les param_jsons pour définir l'objet, mais du coup, ce sont les param_jsons qu'il faut instancier et puis voilà, le sinput, cela n'a pas de sens !
229# input_configured_at_exec = formal_conf_exec(param_json, input)
230# input.update(input_configured_at_exec)
231 # VR 10-11-24 : je crains que cela fasse planter qqchose mais je ne sais plus quoi
232# input = formal_conf_exec(input, input)
233 # VR 15-12-24 : on pourrait soit configurer la partie projet des param json à l'execution ou au chargement des datous
234 project_id = input["project_id"] if "project_id" in input else complete_param_json["project_id"] if "project_id" in complete_param_json else 0
236 if "input_for_lfc_from_pj" in param_json:
237 input_configured_at_exec = formal_conf_exec(param_json["input_for_lfc_from_pj"], input, hit = hash_id_treatment)
238 input.update(input_configured_at_exec)
240 layer_type_external_api = map_datou_external_layer[datou_step] if datou_step in map_datou_external_layer else None
241 inst_layer_api = map_type_layer_inst[layer_type_external_api] if layer_type_external_api in map_type_layer_inst else None
243 if with_audit:
244 datou_audit_data["io_exec"][id_step] = {"datou_step" : datou_step,
245 "input" : input.copy(),
246 "param_json" : param_json}
247 input["with_audit"] = with_audit
249 if "text" in input and input["text"] != None and "text" in input["text"] and type(input["text"]) != str and input["text"] == input["text"]["text"]:
250 print(" l 222 CIRCULAR REFRENCE FROM HERE or worst, id_step : " + str(id_step) + " BIG PROBLEM BUT HOW COME, IS THIS LING TO input = formal_conf_exec(input, input) no if we arrive here without, so to test")
252 try:
253 output = map_datou_function[datou_step](input, param_json, ce, verbose, layer_api = inst_layer_api)
254 except Exception as e:
255 import traceback
256 stack_trace = traceback.format_exc()
257 print(stack_trace)
258 print(str(e))
259 output.update({"exception":str(e), "stack_trace" : stack_trace})
262 from lib.lib_util import filter_key_deep, parse_key_and_size
263 try:
264 parse_key_and_size(data=output)
265 output_wo_circular = filter_key_deep(data=output)
266 parse_key_and_size(data=output_wo_circular)
267 except Exception as e:
268 print(str(e))
269 print("Bug in reduce output")
271 if with_audit:
272 print(" About to compute live_audit ")
273 try :
274 from lib.util.lib_audit_prediag import parse_error_case
275 live_audit = parse_error_case(output,# map_data_func_error_case_input={},
276 during_exec=True,
277 id_step=id_step)
278 if not "live_audit" in output:
279 output["live_audit"] = live_audit
280 else:
281 output["live_audit"].update(live_audit)
282 print(" live_audit computed ! ")
283 except Exception as e:
284 print(" Error during live audit ")
285 print(str(e))
287 datou_audit_data["io_exec"][id_step]["output"] = output.copy()
288 begin_time, message = count_and_display_elapsed_time(begin_time, "one_step", verbose=False)
289 datou_audit_data["io_exec"][id_step]["time"] = message
291 if "assoc_output" in param_json:
292 for outio in param_json["assoc_output"]:
293 inio = param_json["assoc_output"][outio]
294 if "/" in outio:
295 from lib.manaudit.lib_datou_audit import load_sub_json
296 val = load_sub_json(output, outio)
297 output[inio] = val
298 elif outio in output:
299 output[inio] = output[outio]
300 del output[outio]
302 if "text" in output and output["text"] == input:
303 print("Circular reference detected l257 Circularity comes from map_reduce on line 1078 when {'text' : reduced_result :::: We remove from output !" )
304 del output["text"]
305 from pydantic.utils import deep_update
307 input = deep_update(input, output)
308# input.update(output) # car le deuxième est prioritaire # TODO VR faire des datous d'execution en arbre (ou pas !)
309 if "text" in input and input["text"] != None and "text" in input["text"] and type(input["text"]) != str and input["text"] == input["text"]["text"]:
310 print(" l 255 CIRCULAR REFRENCE FROM HERE or worst, id_step : " + str(id_step) + " BIG PROBLEM BUT HOW COME, IS THIS LING TO input = formal_conf_exec(input, input) no if we arrive here without, so to test")
311 if verbose :
312 print("After " + str(datou_step) + " with " + hash_id_treatment)
313 logger.info("After " + str(datou_step) + " with " + hash_id_treatment)
314 trigger_retry = output["retry"] if "retry" in output else False
315 if trigger_retry:
316 # user id_try to tidy audit if audit
317 max_nb_try = output["max_nb_try"] if "max_nb_try" in output else 3
318 id_step_to_set = output["retry_step_id"] if "retry_step_id" in output else id_step
319 if id_try < max_nb_try and id_step > id_step_to_set:
320 if with_audit:
321 print("recording with_audit with try")
322 if "try" not in datou_audit_data["io_exec"]:
323 datou_audit_data["io_exec"]["try"] = {}
324 datou_audit_data["io_exec"]["try"][id_try] = {}
325 for id_step_to_sort in range(id_step_to_set, id_step):
326 datou_audit_data["io_exec"]["try"][id_try][id_step_to_sort] = datou_audit_data["io_exec"][id_step_to_sort].copy()
327 id_try = id_try + 1
328 id_step = id_step_to_set
329 else:
330 print("WARNING COULD BE ERROR : max nb try attained ! ")
331 id_step = id_step + 1
332 else :
333 print("TODO when refacto as loop")
334 id_step = id_step + 1
335 else :
336 print(str(datou_step) + " is not implemented, choose in " + str(map_datou_function.keys()))
337 logger.info(str(datou_step) + " is not implemented !")
338 id_step = id_step + 1
340 print(ce.cost)
341 file_audit_treatment = ""
342 if with_audit:
344 from server.safia import lpgss_singleton, lib_right_singleton
346 # TODO VR : est-ce bien ce que l'on veut faire ? on ne veut pas plutot récupérer le lss dans le contexte d'execution (on aurait alors le user_id)
347 from lib.lib_safia_system import LibSafiaSystem
348 lss = LibSafiaSystem(lib_user_data_internal=lpgss_singleton, lib_right=lib_right_singleton)
349 user_id = complete_param_json["user_id"] if "user_id" in complete_param_json else 0
350 lss.user_id = user_id # CA c'est un hack
352 # VR 14-1-25 : this behavior existed already at the end of datou_step
353 from lib.lib_util import filter_key_deep, parse_key_and_size
354 try :
355 parse_key_and_size(data=datou_audit_data)
356 datou_audit_data = filter_key_deep(data=datou_audit_data)
357 parse_key_and_size(data=datou_audit_data)
358 except Exception as e:
359 print(str(e))
360 print("Bug in reduce output")
362 with_audit_in_safia_doc = False
363 if with_audit_in_safia_doc:
364 from lib.lib_util import create_prefix_file_name_from_json_prefix
365 prefix_file = create_prefix_file_name_from_json_prefix(input["prefix_file"]) if "prefix_file" in input else ""
367 file_audit_treatment = "audit_" + prefix_file + "_" + hash_id_treatment + ".json"
368 print("file_audit_treatment : " + str(file_audit_treatment))
369 input["file_audit_treatment"] = file_audit_treatment
370 with (open(file_audit_treatment, "w")) as f:
371 import json
372 json.dump(datou_audit_data, f, default=str)
374 save_document_data = {"document_id": os.path.basename(file_audit_treatment), "document_content": json.dumps(datou_audit_data, default=str)}
375 # TODO VR : a mon avis ce serait mieux d'abstraire cela (dans le layer machin et de ne pas avoir ce openai_token dans process_json ni autre !)
377 project_id = input["project_id"] if "project_id" in input else complete_param_json["project_id"] if "project_id" in complete_param_json else 0
378 openai_token = complete_param_json["openai_token"] if "openai_token" in complete_param_json else ""
379 try:
380 total_nb_token, used_model = lss.save_document(save_document_data, project_id, openai_token=openai_token, verbose = verbose)
381 input["doc_audit"] = str(os.path.basename(file_audit_treatment)) + ":" + str(project_id)
382 except Exception as e:
383 print(str(e))
384 print("Failed to save audit in document, de toute facon on le sauve dans pg et voila")
386 # input_datou["batch_exec_info"] = {"project_id": safia_project_id, "safia_doc_id": safia_document_id}
387 print(" About to save audit_info datou_exec_info in input !")
388 if "datou_exec_info" in input:
389 import datetime
390 info_date = {"input_file_available_at":
391 input["input_file_available_at"] if "input_file_available_at" in input else None,
392 "exec_at" : datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
393 "filename_at" : input["filename_at"] if "filename_at" in input else None,
394 "output_hit" : input["output_hit"] if "output_hit" in input else None}
395 info_back = {"in_folder" : input["in_folder"] if "in_folder" in input else None,
396 "work_folder_images" : input["work_folder_images"] if "work_folder_images" in input else None,
397 "out_folder" : input["out_folder"] if "out_folder" in input else None,
398 "work_folder" : os.getcwd(),
399 "out_file" : input["out_file"] if "out_file" in input else None}
401 print(" Calling save_audit_info !")
402 list_json_update = [
403 {"variable": "project_id", "type": "text"},
404 {"variable": "safia_doc_id", "type": "text"},
405 {"variable": "audit_info", "type": "json"},
406 {"variable": "mtr_datou_id", "type": "text"},
407 {"variable": "user_id", "type": "text"},
408 {"variable": "launched_at", "type": "text"},
409 {"variable": "id_file", "type": "text"},
410 {"variable": "nb_page", "type": "text"},
411 {"variable": "info_date", "type": "json"},
412 {"variable": "info_back", "type": "json"},
413 {"variable": "info_customer", "type": "json"}
414 # VR 20-11-25 : je ne sais plus comment gérer cette nouvelle variable output_hit
415 # ,{"variable": "output_hit", "type": "text"}
416 ]
418 from lib.lib_util import change_nan_to_string, remove_circular_refs
419 datou_audit_data = remove_circular_refs(datou_audit_data)
420 datou_audit_data = change_nan_to_string(datou_audit_data)
422 input_values = {
423 "project_id": input["datou_exec_info"]["project_id"] if "project_id" in input["datou_exec_info"] else None,
424 "safia_doc_id" : input["datou_exec_info"]["safia_doc_id"] if "safia_doc_id" in input["datou_exec_info"] else None,
425 "audit_info" : datou_audit_data,
426 "hash_id_treatment" : hash_id_treatment,
427 "mtr_datou_id" : input["datou_exec_info"]["mtr_datou_id"] if "mtr_datou_id" in input["datou_exec_info"] else None,
428 "user_id" : user_id,
429 "launched_at" : input["datou_exec_info"]["launched_at"] if "launched_at" in input["datou_exec_info"] else None,
430 "id_file" : input["id_file"] if "id_file" in input else None,
431 "nb_page" : input["nb_page"] if "nb_page" in input else None,
432 "info_date" : info_date,
433 "info_back" : info_back,
434 "info_customer" : {"feedback" : input["input_file"] if "input_file" in input else input["file"] if "file" in input else input["id_file"] if "id_file" in input else "default"}
435 }
437 print( str(hash_id_treatment) + " input_values mtr_datou_id : " + str(input_values["mtr_datou_id"]))
439 if "audit_info" in param_json:
440 if "col_name" in param_json["audit_info"] and "list_keys" in param_json["audit_info"]:
441 col_name = param_json["audit_info"]["col_name"]
442 custom_info_data = {}
443 for key in param_json["audit_info"]["list_keys"]:
444 if key in input:
445 custom_info_data[key] = input[key]
446 else :
447 print("Unexpected missing output ! " + str(key))
449 if col_name in input_values:
450 found_in_json_to_update = False
451 for search_data in list_json_update:
452 if "variable" in search_data and search_data["variable"] == col_name:
453 found_in_json_to_update = True
454 break
455 if found_in_json_to_update:
456 if search_data["type"] == "json":
457 print("Everything OK !")
458 input_values[col_name].update(custom_info_data)
459 else :
460 print("WARNING IGNRONIG DATA")
461 else:
462 print("WARNING IGNRONIG DATA")
463 else:
464 input_values[col_name] = custom_info_data
465 list_json_update.append({"variable": col_name, "type": "json"})
467 try:
468 lss.lib_user_data_internal.upsert_audit_info(hash_id_treatment,
469 input_values=input_values,
470 verbose=verbose,
471 list_json_update = list_json_update)
472 except Exception as e:
473 print(" BUG SAVING AUDIT INFO BUG PB VOILA AUDIT LINK TO SOMETHING ELSE NOT COOL")
474 print(str(e))
475 print(" hash_id_treatment : " + str(hash_id_treatment) + " input_values.keys : " + str(list(input_values.keys())))
476 import traceback
477 print(traceback.format_exc())
478 filename = "audit_" + hash_id_treatment + ".json"
479 data = input_values["audit_info"] if "audit_info" in input_values else {}
480 with (open(filename, "w")) as f:
481 import json
482 json.dump(data, f, default=str)
483 print("Saved in file")
484 input_values["audit_info"] = {}
485 lss.lib_user_data_internal.upsert_audit_info(hash_id_treatment,
486 input_values=input_values,
487 verbose=verbose,
488 list_json_update=list_json_update)
489 print("And updated !")
492 # lss.lib_user_data_internal.save_audit_info(project_id = input["datou_exec_info"]["project_id"] if "project_id" in input["datou_exec_info"] else None,
493 # safia_doc_id = input["datou_exec_info"]["safia_doc_id"] if "safia_doc_id" in input["datou_exec_info"] else None,
494 # audit_info = datou_audit_data,
495 # hash_id_treatment = hash_id_treatment,
496 # mtr_datou_id = input["datou_exec_info"]["mtr_datou_id"] if "mtr_datou_id" in input["datou_exec_info"] else None,
497 # mtr_user_id = user_id,
498 # launched_at = input["datou_exec_info"]["launched_at"] if "launched_at" in input["datou_exec_info"] else None)
499 print(" Saved !")
501 # TODO VR 15-6-23 : Is this a hack ?
502 input["cost"] = ce.cost
504 info_context_exec = input["info_context_exec"] if "info_context_exec" in input else {}
505 display_info = info_context_exec["display_info"] if "display_info" in info_context_exec else param_json["display_info"] if "display_info" in param_json else {}
507 input = post_exec_display_output(input, display_info)
509 return input, datou_audit_data
513def post_exec_display_output(output, display_info = {}):
515 for k in display_info:
516 if display_info[k] == "delete":
517 if k in output:
518 print("We delete " + str(k) + " in output : " + str(output[k]))
519# print(" TO TEST : echo -n " + str(output[k]) + " | xclip -selection clipboard")
520# print("echo -n \"ad" + ".".join(list(map(lambda x : str(x).replace(".",","), output[k][0][0]))) + "T8500|eof\" | nc 127.0.0.1 1234")
521 del output[k]
522 elif display_info[k] == "keep":
523 print("We keep " + str(k) + " in output ")
524# Implicit and avoid RuntimeError: dictionary changed size during iteration
526# else :
527# print("No rules for display of " + str(k) + ", we keep : the default behavior !")
529 return output