Skip to content

Commit

Permalink
Better formatting of processing times.
Browse files Browse the repository at this point in the history
  • Loading branch information
Daryl Wilding-McBride committed Sep 18, 2018
1 parent d5020b2 commit f423a6c
Showing 1 changed file with 12 additions and 12 deletions.
24 changes: 12 additions & 12 deletions generate-search-mgf-from-instrument-db.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def continue_processing(this_step, final_step, databases=[], tables=[]):

def store_info(info, processing_times):
processing_stop_time = time.time()
info.append(("total processing", processing_stop_time-processing_start_time))
info.append(("total processing", round(processing_stop_time-processing_start_time,1)))
info.append(("processing times", json.dumps(processing_times)))
# store it in the database
info_entry_df = pd.DataFrame(info, columns=['item', 'value'])
Expand Down Expand Up @@ -249,7 +249,7 @@ def chunks(l, n):
source_conn.close()

step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=[converted_database_name], tables=['convert_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand Down Expand Up @@ -365,7 +365,7 @@ def chunks(l, n):
pool.map(run_process, cluster_detect_ms1_processes)

step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=frame_batch_df.db.tolist(), tables=['summing_info','peak_detect_info','cluster_detect_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand All @@ -390,7 +390,7 @@ def chunks(l, n):
merge_summed_regions(source_db_name, frame_database_name, exceptions=[])

step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand Down Expand Up @@ -421,7 +421,7 @@ def chunks(l, n):
run_process("python -u ./otf-peak-detect/feature-detect-ms1.py -db '{}' -fps {} -mnf {} -es {} -ee {}".format(feature_database_name, frames_per_second, args.minimum_number_of_frames, args.elution_start_sec, args.elution_end_sec))

step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=[feature_database_name], tables=['feature_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand Down Expand Up @@ -484,7 +484,7 @@ def chunks(l, n):
pool.map(run_process, feature_region_ms1_sum_processes)
pool.map(run_process, feature_region_ms1_peak_processes)
step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=feature_batch_df.db.tolist(), tables=['summed_ms1_regions_info','ms1_feature_region_peak_detect_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand Down Expand Up @@ -524,7 +524,7 @@ def chunks(l, n):
db_conn.close()

step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=feature_batch_df.db.tolist(), tables=['resolve_feature_list_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand Down Expand Up @@ -552,7 +552,7 @@ def chunks(l, n):
print("detecting ms2 peaks in the feature region...")
pool.map(run_process, feature_region_ms2_sum_peak_processes)
step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=feature_batch_df.db.tolist(), tables=['summed_ms2_regions_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand All @@ -579,7 +579,7 @@ def chunks(l, n):
print("matching precursor ms2 peaks...")
pool.map(run_process, match_precursor_ms2_peaks_processes)
step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=feature_batch_df.db.tolist(), tables=['precursor_ms2_peak_matches_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand All @@ -606,7 +606,7 @@ def chunks(l, n):
run_process("python -u ./otf-peak-detect/correlate-ms2-peaks-prep.py -cdb '{}'".format(converted_database_name))
pool.map(run_process, peak_correlation_processes)
step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=feature_batch_df.db.tolist(), tables=['peak_correlation_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand All @@ -631,7 +631,7 @@ def chunks(l, n):
run_process("python -u ./otf-peak-detect/deconvolve-ms2-spectra-prep.py -dbd '{}'".format(args.data_directory))
pool.map(run_process, deconvolve_ms2_spectra_processes)
step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=feature_batch_df.db.tolist(), tables=['deconvolve_ms2_spectra_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand Down Expand Up @@ -674,7 +674,7 @@ def chunks(l, n):
run_process("cat {} >> {}".format(mgf_filename, combined_mgf_filename))

step_stop_time = time.time()
processing_times.append((step_name, step_stop_time-step_start_time))
processing_times.append((step_name, round(step_stop_time-step_start_time,1)))

if continue_processing(this_step=step_name, final_step=args.final_operation, databases=feature_batch_df.db.tolist(), tables=['search_mgf_info']):
print("Completed {}. Continuing to the next step.".format(step_name))
Expand Down

0 comments on commit f423a6c

Please sign in to comment.