thr = None thr_result = None trains_to_update = [ train_next[n_train]['more_files'] for n_train in range(len(train_next)) ] for epoch in range(EPOCHS_TO_RUN): """ update files after each epoch, all 4. Convert to threads after testing """ if (FILE_UPDATE_EPOCHS > 0) and (epoch % FILE_UPDATE_EPOCHS == 0): if not thr is None: if thr.is_alive(): qsf.print_time("***WAITING*** until tfrecord gets loaded", end=" ") else: qsf.print_time("tfrecord is ***ALREADY LOADED*** ", end=" ") thr.join() qsf.print_time("Done") qsf.print_time("Inserting new data", end=" ") for n_train in range(len(trains_to_update)): if trains_to_update[n_train]: qsf.add_file_to_dataset( dataset=dataset_train, new_dataset=thr_result[n_train], train_next=train_next[n_train]) qsf.print_time("Done") thr_result = []
dataset_img = qsf.readImageData( image_data = image_data, files = files, indx = nimg, cluster_radius = 0, # CLUSTER_RADIUS, tile_layers = TILE_LAYERS, tile_side = TILE_SIDE, width = IMG_WIDTH, replace_nans = True, infer = True, keep_gt = True) # to generate same output files img_corr2d = dataset_img['corr2d'] # (?,324) img_target = dataset_img['target_disparity'] # (?,1) img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32 #run first stage network qsf.print_time("Running inferred model, stage1", end=" ") _ = sess.run([stage1done], feed_dict={ph_corr2d: img_corr2d, ph_target_disparity: img_target, ph_ntile: img_ntile }) qsf.print_time("Done.") qsf.print_time("Running inferred model, stage2", end=" ") disp_out, = sess.run([stage2_out_sparse], feed_dict={ph_ntile_out: img_ntile }) qsf.print_time("Done.") result_file = files['result'][nimg].replace('.npy','-infer.npy') #not to overwrite training result files that are more complete try: os.makedirs(os.path.dirname(result_file)) except: pass rslt = np.concatenate(