def get_data(searcher): dbstr = normalize('NFKD', searcher).encode('ascii', 'ignore') try: val = searchers[dbstr] except KeyError: val = None data = val['info']() els = {} for el in request.args: if el in data: els[el] = data[el] db_name = None for el in els: if db_name: if utils.compare_db_strings(els[el]['db_name'], db_name): return Response( utils.build_api_error(dbstr, utils.api_err_incompat_search)) else: db_name = els[el]['db_name'] splits = db_name.split('/') search = {} #Recover the database name and collection name search['database'] = splits[0] search['collection'] = splits[1] search['query'] = {} for el in els: utils.interpret(search['query'], el, request.args[el]) return Response(utils.simple_search(search))
def live_page_pg(db): if (db.startswith("<") and db.endswith(">")): title = "API Description" return render_template("example.html", **locals()) search = utils.create_search_dict(table = db, request = request) for el in request.args: utils.interpret(search['query'], el, request.args[el], None) search_tuple = utils.simple_search(search) return Response(utils.build_api_search(db, search_tuple, request = request), mimetype='application/json')
def set_pid_speed(self, speed): data = utils.decTo256(speed) resp = self.brook.write(self.cid, 28, data) print(utils.interpret(resp)) self.desired_speed = speed if (self.collision): self.collision_detect()
resolution = 16 ########################################################################## slice = 'inline' #Inline, crossline, timeslice or full slice_no = 339 #Log to tensorboard logger = tb_logger.TBLogger('log', 'Test') logger.log_images(slice + '_' + str(slice_no), get_slice(data, data_info, slice, slice_no), cm='gray') """ Plot extracted features, class probabilities and salt-predictions for slice """ #features (attributes) from layer 5 im = interpret(network.f5, data, data_info, slice, slice_no, im_size, resolution, use_gpu=use_gpu) logger.log_images(slice + '_' + str(slice_no) + ' _f5', im) #features from layer 4 im = interpret(network.f4, data, data_info, slice, slice_no, im_size, resolution, use_gpu=use_gpu) logger.log_images(slice + '_' + str(slice_no) + ' _f4', im)
d2 = tf.layers.dense(d1, n_filters, activation=tf.nn.relu) logits = tf.layers.dense(d2, n_classes) _loss, raw_probs = wordtree_loss(logits=logits, labels=y_onehot, word_tree=word_tree) loss = tf.reduce_mean(_loss) train_op = tf.train.AdamOptimizer(1e-2).minimize(loss) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(1000): idxs = np.random.randint(0, num_ex, 8) _, l, rp = sess.run([train_op, loss, raw_probs], feed_dict={ x: x_b[idxs], y: y_b[idxs] }) if (step + 1) % 100 == 0: print('Step {}: loss = {:.03e}'.format(step + 1, l)) print('===TEST===') rp, = sess.run([raw_probs], feed_dict={x: gen_ex(childless, parents, children, 0)}) for r, gt in zip(rp, childless): pred_class = interpret(r, num_root, children) if not pred_class == gt: print('Truth: {}\t|\tPred: {}'.format(class_list[gt], class_list[pred_class]))
def set_tpr(self): data = utils.decTo256(self.motor_type["tpr"]) resp = self.brook.write(self.cid, 23, data) print(utils.interpret(resp))
print("Iteration:", i, "Training loss:", utils.var_to_np(loss)) if LOG_TENSORBOARD: logger.log_scalar("training_loss", utils.var_to_np(loss), i) for k, v in utils.compute_accuracy(torch.argmax(output, 1), labels).items(): if LOG_TENSORBOARD: logger.log_scalar("training_" + k, v, i) print(" -", k, v, "%") # every 100th iteration if i % 100 == 0 and LOG_TENSORBOARD: network.eval() # Output predicted train/validation class/probability images for class_img in train_class_imgs + val_class_imgs: slice = class_img[1] slice_no = class_img[2] class_img = utils.interpret( network.classify, data, data_info, slice, slice_no, IM_SIZE, 16, return_full_size=True, use_gpu=USE_GPU, ) logger.log_images(slice + "_" + str(slice_no) + "_pred_class", class_img[0], step=i) class_img = utils.interpret( network, data, data_info, slice, slice_no, IM_SIZE, 16, return_full_size=True, use_gpu=USE_GPU, ) logger.log_images(slice + "_" + str(slice_no) + "_pred_prob", class_img[0], i) # Store trained network torch.save(network.state_dict(), join(ROOT_PATH, "saved_model.pt"))