def main(): params = parameters.Parameters() params.read(sys.argv[1:]) sim = False for task in params.task: if task == "app": dash_ui.launcher.launch_app(params) elif task == "help": if (len(sys.argv) > 2): params.help(sys.argv[2]) else: params.help() elif task == "track": tracking.track(params) elif task == "simulate": simulation.simulate(params) sim = True elif task == "postprocess": postprocessing.postprocess(params, simulated=sim) elif task == "view": visualisation.render(params) elif task == "compare": trajectories.compare_trajectories(params) else: sys.exit(f"ERROR: Task {task} is not yet implemented. Aborting...")
def main(settings, metrics): #Begin processing validation images # troubled_ones = [3, 14, 22, 43, 66, 83, 97, 114, 161] # troubled_ones = [137] for i in range(0, len(settings['validation_files'])): # for i in troubled_ones: if 'Rink-Isbrae' in settings['validation_files'][ i] or 'Upernavik' in settings['validation_files'][ i] or 'Umiammakku' in settings['validation_files'][ i] or 'Inngia' in settings['validation_files'][i]: # if 'Inngia' in settings['validation_files'][i]: # if i == 62: preprocess(i, settings, metrics) process(settings, metrics) postprocess(settings, metrics) # break #Print statistics # print_calfin_domain_metrics(settings, metrics) # print_calfin_all_metrics(settings, metrics) # plt.show() return settings, metrics
def run_postprocessing_task(nclicks, active_file, params_json, post_update): params = Parameters(json.loads(params_json)) params.display_figures = False if ".tif" in active_file: params.name = active_file.replace('.tif', '') postprocessing.postprocess(params) return ["", post_update + 1]
def main(settings, metrics): #Begin processing validation images # troubled_ones = [3, 14, 22, 43, 66, 83, 97, 114, 161] # troubled_ones = [161] for i in range(0, len(settings['validation_files'])): # for i in troubled_ones: preprocess(i, settings, metrics) process(settings, metrics) postprocess(settings, metrics) #Print statistics print_calfin_domain_metrics(settings, metrics) print_calfin_all_metrics(settings, metrics) return settings, metrics
def ml_pipeline(response): output_preprocessing = preprocessing.preprocess() if output_preprocessing: # Update any related flows already in the dataset with the latest data. preprocessing.update_related() # Data normalization into a [0,1] scale. preprocessing.normalization() if config.df.shape[0] >= 10: if config.args.kmeans: kmeans.kmeans() if config.args.dbscan: dbscan.dbscan() postprocessing.postprocess() eval_counter.counter() return response
def rake(filePath): ''' Main function of our project params: filePath | string : Path of file which we have to read. return: keywordsList | string : List of index keywords. ''' rawText = readFile.readFile(filePath) preObj = preprocessing.Preprocess() candidateKeywordList = preObj.preprocess(rawText) indexKeywordList = postprocessing.postprocess(candidateKeywordList) return indexKeywordList
def main(settings, metrics): #Begin processing validation images # troubled_ones = [3, 14, 22, 43, 66, 83, 97, 114, 161] # troubled_ones = [10234] # 10302-10405 # for i in range(10233, 10234): # for i in troubled_ones: for i in range(21142, len(settings['validation_files'])): # if 'Rink-Isbrae' in settings['validation_files'][i]: if 'Upernavik' in settings['validation_files'][ i] or 'Umiammakku' in settings['validation_files'][ i] or 'Inngia' in settings['validation_files'][i]: # preprocess(i, settings, metrics) process(settings, metrics) postprocess(settings, metrics) #Print statistics # print_calfin_domain_metrics(settings, metrics) # print_calfin_all_metrics(settings, metrics) return settings, metrics
def _save_image(email_address, capture, cache_dir): image = capture.image _mkdir(cache_dir) img_file = NamedTemporaryFile(dir=cache_dir, suffix='.jpg', delete=False) img_file.close() # postprocessing custom_args = STYLE_POSTPROCESSING.get(capture.style_name, {}) image = postprocessing.qt_to_pil_image(image) image = postprocessing.postprocess(image, length=IMAGE_RESIZE_WIDTH, **custom_args) paste_logo(image, capture.style_name) image.save(img_file.name, 'JPEG', quality=80) with open(os.path.join(cache_dir, 'email.log'), 'a') as f: f.write('"{}",{},{:%Y-%m-%d %H:%M:%S}\n'.format(email_address, img_file.name, datetime.now())) return img_file.name, image.size
def get_rng(z): generator = postprocessing.postprocess(z[0:math.floor(len(z))], 6, 3.9 + 0 / (10.01 * 3)) while True: try: xorbytearray = generator.__next__() byte_memory = memoryview(xorbytearray) byte_list = [ sum([byte[b] << b for b in range(0, 32)]) for byte in zip(*(iter(byte_memory), ) * 32) ] for i in byte_list: yield i except StopIteration: # get new generator break print("finito")
def main(): """ PETra's main function calls three functions: process_inputs sp.integrate.odeint (a RK45 ODE integrator included in scipy)] postprocess It returns two Dictionaries: InputDict and ResultsDict which contain all inputs used in the run and all the outputs generated """ earth, mass, areas, normals, centroids, I, t, x0, scLengthScale, aero_params, InputDict = PI.process_inputs( InFile.get_inputs) sol = sp.integrate.odeint(tr.traj_uvw, x0, t, args=(earth, mass, areas, normals, centroids, I, scLengthScale, aero_params)) ResultsDict = pp.postprocess(t, sol, earth, mass, areas, normals, centroids, I, x0, scLengthScale) return InputDict, ResultsDict
def cheat_wrapper(query, request_options=None, output_format='ansi'): """ Function that delivers cheat sheet for `query`. If `html` is True, the answer is formatted as HTML. Additional request options specified in `request_options`. """ def _add_section_name(query): # temporary solution before we don't find a fixed one if ' ' not in query and '+' not in query: return query if '/' in query: return query if ' ' in query: # for standalone queries only that may contain ' ' return "%s/%s" % tuple(query.split(' ', 1)) return "%s/%s" % tuple(query.split('+', 1)) def _rewrite_aliases(word): if word == ':bash.completion': return ':bash_completion' return word def _rewrite_section_name(query): """ Rewriting special section names: * EDITOR:NAME => emacs:go-mode """ if '/' not in query: return query section_name, rest = query.split('/', 1) if ':' in section_name: section_name = rewrite_editor_section_name(section_name) section_name = LANGUAGE_ALIAS.get(section_name, section_name) return "%s/%s" % (section_name, rest) def _sanitize_query(query): return re.sub('[<>"]', '', query) def _strip_hyperlink(query): return re.sub('(,[0-9]+)+$', '', query) def _parse_query(query): topic = query keyword = None search_options = "" keyword = None if '~' in query: topic = query pos = topic.index('~') keyword = topic[pos+1:] topic = topic[:pos] if '/' in keyword: search_options = keyword[::-1] search_options = search_options[:search_options.index('/')] keyword = keyword[:-len(search_options)-1] return topic, keyword, search_options query = _sanitize_query(query) query = _add_section_name(query) query = _rewrite_aliases(query) query = _rewrite_section_name(query) # at the moment, we just remove trailing slashes # so queries python/ and python are equal query = _strip_hyperlink(query.rstrip('/')) topic, keyword, search_options = _parse_query(query) if keyword: answers = find_answers_by_keyword( topic, keyword, options=search_options, request_options=request_options) else: answers = [get_answer_dict(topic, request_options=request_options)] answers = [ postprocessing.postprocess( answer, keyword, search_options, request_options=request_options) for answer in answers ] answer_data = { 'query': query, 'keyword': keyword, 'answers': answers, } if output_format == 'html': answer_data['topics_list'] = get_topics_list() return frontend.html.visualize(answer_data, request_options) elif output_format == 'json': return json.dumps(answer_data, indent=4) return frontend.ansi.visualize(answer_data, request_options)
def main(args): # get datasets dataset = data.get_dataset(args.dataset, args.split, image_size=args.image_size, data_dir=args.data_dir, is_training=True) im_x = preprocess(dataset.x, args.preprocessing_a, image_size=args.image_size, output_channels=args.num_channels) im_y = preprocess(dataset.y, args.preprocessing_b, image_size=args.image_size) im_batch_x, im_batch_y = data.create_batch([im_x, im_y], batch_size=args.batch_size, shuffle=args.shuffle, queue_size=2, min_queue_size=1) # build models transformed_x = model.transformer(im_batch_x, output_channels=dataset.num_classes, output_fn=None, scope='model/AtoB') transformed_y = model.transformer(im_batch_y, output_channels=args.num_channels, scope='model/BtoA') cycled_x = model.transformer(tf.nn.softmax(transformed_x), output_channels=args.num_channels, scope='model/BtoA', reuse=True) cycled_y = model.transformer(transformed_y, output_channels=dataset.num_classes, output_fn=None, scope='model/AtoB', reuse=True) # create loss functions cycle_loss_x = tf.losses.absolute_difference(im_batch_x, cycled_x, scope='cycle_loss_x') cycle_loss_y = tf.losses.softmax_cross_entropy(im_batch_y, cycled_y, scope='cycle_loss_y') transform_loss_xy = tf.losses.absolute_difference( im_batch_x, transformed_y, scope='transform_loss_xy') transform_loss_yx = tf.losses.softmax_cross_entropy( im_batch_y, transformed_x, scope='transform_loss_yx') total_loss = cycle_loss_x + cycle_loss_y + transform_loss_xy + transform_loss_yx optimizer = tf.train.AdamOptimizer(args.learning_rate, args.beta1, args.beta2, args.epsilon) inc_global_step = tf.assign_add(tf.train.get_or_create_global_step(), 1) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, inc_global_step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_tensor = optimizer.minimize(total_loss) # Set up train op to return loss with tf.control_dependencies([train_tensor]): train_op = tf.identity(total_loss, name='train_op') # set up logging # Gather initial summaries. summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) # Add summaries for losses. for loss in tf.get_collection(tf.GraphKeys.LOSSES): summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss)) # Add summaries for variables. for variable in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES): summaries.add(tf.summary.histogram(variable.op.name, variable)) color_map = np.array( list(map(lambda x: x.color, labels[:dataset.num_classes]))).astype(np.float32) segmentation_y = postprocess(tf.argmax(im_batch_y, -1), 'segmentation_to_rgb', dataset.num_classes, color_map) segmentation_transformed_x = postprocess(tf.argmax(transformed_x, -1), 'segmentation_to_rgb', dataset.num_classes, color_map) segmentation_cycled_y = postprocess(tf.argmax(cycled_y, -1), 'segmentation_to_rgb', dataset.num_classes, color_map) summaries.add(tf.summary.image('x', im_batch_x)) summaries.add(tf.summary.image('y', segmentation_y)) summaries.add(tf.summary.image('transformed_x', segmentation_transformed_x)) summaries.add(tf.summary.image('transformed_y', transformed_y)) summaries.add(tf.summary.image('cycled_x', cycled_x)) summaries.add(tf.summary.image('cycled_y', segmentation_cycled_y)) # Merge all summaries together. summary_op = tf.summary.merge(list(summaries), name='summary_op') # create train loop if not os.path.isdir(args.output_dir): os.makedirs(args.output_dir) saver = tf.train.Saver(var_list=tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='model')) checkpoint_path = os.path.join(args.output_dir, 'model.ckpt') writer = tf.summary.FileWriter(args.output_dir) with tf.Session() as sess: # Tensorflow initializations sess.run(tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)) tf.train.start_queue_runners(sess=sess) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) last_log_time = 0 last_save_time = 0 for i in tqdm(range(args.num_batches)): if last_log_time < time.time() - args.log_every_n_seconds: last_log_time = time.time() summary, loss_val, global_step = sess.run( [summary_op, train_op, tf.train.get_global_step()]) writer.add_summary(summary, global_step) writer.flush() else: loss_val, global_step = sess.run( [train_op, tf.train.get_global_step()]) if last_save_time < time.time() - args.save_every_n_seconds: last_save_time = time.time() saver.save(sess, checkpoint_path, global_step=global_step) saver.save(sess, checkpoint_path, global_step=args.num_batches)
def main(args): # get datasets dataset = data.get_dataset(args.dataset, args.split, image_size=args.image_size, data_dir=args.data_dir, is_training=True) im_x = preprocess(dataset.x, args.preprocessing_a, image_size=args.image_size, output_channels=args.num_channels) im_y = preprocess(dataset.y, args.preprocessing_b, image_size=args.image_size) # No need to use tf.train.batch im_x = tf.expand_dims(im_x, 0) im_y = tf.expand_dims(im_y, 0) # build models transformed_x = model.transformer(im_x, output_channels=dataset.num_classes, output_fn=None, scope='model/AtoB') transformed_y = model.transformer(im_y, output_channels=args.num_channels, scope='model/BtoA') cycled_x = model.transformer(transformed_x, output_channels=args.num_channels, scope='model/BtoA', reuse=True) cycled_y = model.transformer(transformed_y, output_channels=dataset.num_classes, output_fn=None, scope='model/AtoB', reuse=True) # Correct colors for outputting color_map = np.array( list(map(lambda x: x.color, labels[:dataset.num_classes]))).astype(np.float32) image_x = (im_x + 1.0) / 2.0 image_transformed_y = (transformed_y + 1.0) / 2.0 image_cycled_x = (cycled_x + 1.0) / 2.0 segmentation_y = postprocess(tf.argmax(im_y, -1), 'segmentation_to_rgb', dataset.num_classes, color_map) segmentation_transformed_x = postprocess(tf.argmax(transformed_x, -1), 'segmentation_to_rgb', dataset.num_classes, color_map) segmentation_cycled_y = postprocess(tf.argmax(cycled_y, -1), 'segmentation_to_rgb', dataset.num_classes, color_map) saver = tf.train.Saver(var_list=tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='model')) with tf.Session() as sess: # Tensorflow initializations sess.run(tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)) tf.train.start_queue_runners(sess=sess) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) saver.restore(sess, tf.train.latest_checkpoint(args.checkpoint_dir)) for i in tqdm(range(args.num_batches)): x, y, x_t, y_t, x_c, y_c = sess.run([ image_x, segmentation_y, segmentation_transformed_x, image_transformed_y, image_cycled_x, segmentation_cycled_y ]) plt.subplot(231) plt.imshow(x[0]) plt.subplot(232) plt.imshow(x_t[0]) plt.subplot(233) plt.imshow(x_c[0]) plt.subplot(234) plt.imshow(y[0]) plt.subplot(235) plt.imshow(y_t[0]) plt.subplot(236) plt.imshow(y_c[0]) plt.show()
def run_detection(data_dir, coco_gt, im_ids): model, priorbox = create_model(cfg.MODEL) priors = Variable(priorbox.forward(), volatile=True) detector = Detect(cfg.POST_PROCESS, priors) # Utilize GPUs for computation model.cuda() priors.cuda() cudnn.benchmark = True preprocess = preproc(cfg.DATASET.IMAGE_SIZE, cfg.DATASET.PIXEL_MEANS, -2) resume_checkpoint(model, args.weight) num_classes = 2 results = [] time_all = [] time_per_step = { "nms_time": [], "cpu_tims": [], "scores_time": [], "box_time": [], "gpunms_time": [], "base_time": [], "extra_time": [], "head_time": [] } for i, index in enumerate(im_ids): # load img print('evaluating image {}/{}'.format(i, len(im_ids))) im_data = coco_gt.loadImgs(ids=index)[0] img = cv2.imread(os.path.join(data_dir, 'frames', im_data['file_name'])) scale = [img.shape[1], img.shape[0], img.shape[1], img.shape[0]] img_shape = img.shape images = Variable(preprocess(img)[0].unsqueeze(0).cuda(), volatile=True) img_dict = {'version': 0, 'time': 0., 'camera_id': 0, 'image': img} # run detection model torch.cuda.synchronize() time_all_start = time.perf_counter() # forward out = model(images, phase='eval') # detect detections = detector.forward(out) torch.cuda.synchronize() time_all_end = time.perf_counter() time_all.append(1000 * (time_all_end - time_all_start)) scores = [] cls_boxes = [] for det in detections[0][1]: if det[0] > 0: d = det.cpu().numpy() score, box = d[0], d[1:] box *= scale scores.append(score) cls_boxes.append(box) #print(score) #print(box) output_blobs = {} output_blobs['scores'] = scores output_blobs['cls_boxes'] = cls_boxes print(np.array(cls_boxes).shape) output_dict = postprocess(output_blobs, 1., img_shape) if len(output_dict['people']) == 0: continue # save result entry_index = 0 for person in output_dict['people']: entry_result = { "image_id": index, "category_id": 1, "bbox": person['bbox_ltwh'].tolist(), "score": person['score'] } results.append(entry_result) # save results as json file with open(json_dt, 'w') as f: json.dump(results, f) print('detection results saved in {}'.format(json_dt)) print('average running time: {}ms'.format(sum(time_all) / len(time_all)))
parser.add_argument('--data_path', type=str, help='path of raw data folder') parser.add_argument('--image_path', type=str, help='path of image npy array') parser.add_argument('--mask_path', type=str, help='path of mask npy array') parser.add_argument('--model_path', type=str, help='path of saved model') parser.add_argument('--save_path', type=str, help='path to save model or prediction') args = parser.parse_args() if (args.preprocess == "True"): print("Preprocessing Images") preprocess(args.mode, args.data_path, args.save_path) print("Preprocessing Done!") elif (args.postprocess == "True"): print("postprocessing Images") postprocess(args.image_path, args.save_path) print("Submission File saved as submission.csv at ", args.save_path) elif (args.mode == "fit"): print("Fitting Model") fit_predict(args.mode, args.image_path, args.mask_path, "None", args.save_path) print("Done!") elif (args.mode == "predict"): print("Predicting Mask") fit_predict(args.mode, args.image_path, "None", args.model_path, args.save_path) print("Done!")
clf.fit(all_1, all_2) holdout_predictions = clf.predict(data[columns]) return holdout_predictions # load the data test_dataSet = pd.read_csv("test.csv") train_dataSet = pd.read_csv("train.csv") cleanup_test = preprocess(test_dataSet) test_dataSet = cleanup_test[0] cleanup_train = preprocess(train_dataSet) train_dataSet = cleanup_train[0] # learn out model with columns columns = cleanup_train[1] all_X = train_dataSet[columns] all_y = train_dataSet['Survived'] holdout = test_dataSet linearReg(holdout, all_X, all_y, columns) holdout_predictions = treePredict(holdout, all_X, all_y, columns) submission = postprocess(holdout, holdout_predictions) submission.to_csv('titanic_submission.csv', index=False) accTestLinearReg(all_X,all_y) accTestTreePredict(all_X, all_y)