Exemplo n.º 1
0
def predict(model_id):
    logging.info("Enter the mass (g) of the sample:")
    mass = sys.stdin.readline()
    logging.info("Enter the density (g/cm3) of the sample:")
    density = sys.stdin.readline()
    logging.info("Enter the height (mm) of the sample:")
    height = sys.stdin.readline()
    logging.info("Enter the width (mm) of the sample:")
    width = sys.stdin.readline()
    logging.info("Enter the depth (mm) of the sample:")
    depth = sys.stdin.readline()
    logging.info("Predicting sample with following variables:\n"
                 "mass = {}"
                 "density = {}"
                 "height = {}"
                 "width = {}"
                 "depth = {}".format(mass, density, height, width, depth))
    if model_id is not None:
        prediction, pred_name = training.predict_with(model_id, mass, density,
                                                      height, width, depth)
    else:
        prediction, pred_name = training.predict(mass, density, height, width,
                                                 depth)

    logging.info("Predicted type_id: {}".format(prediction[0]))
    logging.info("Predicted type: {}".format(pred_name))
Exemplo n.º 2
0
def _main(args):
    # Raw arguments from parser
    save = args.save
    data_path = args.data_path
    filename = args.file
    results_dir = args.results_dir
    training_path = args.training_path
    model_dir = args.model_dir
    classes = args.classes
    weights_name = args.weights

    # Computed arguments
    classes_path = os.path.join(model_dir, classes)
    dir_list = [x for x in sorted(os.listdir(data_path)) if x.endswith('.jpg')]

    # Creating config instance
    config = Config()

    # Extracting classes and anchors
    class_names = get_classes(classes_path)
    anchors = config.YOLO_ANCHORS

    # Loading dictionnary
    data = np.load(os.path.join(data_path, filename), allow_pickle=True)

    # Extracting images and boxes
    image_data, boxes = process_data(data['images'], data['boxes'])

    # Extracting anchor boxes and masks
    detectors_mask, matching_true_boxes = get_detector_mask(boxes, anchors)

    # Normalizing data
    normalized_data = normalize(image_data, training_path, train=False)

    # Creating model and printing summary
    model_body, model = create_model(anchors,
                                     class_names,
                                     freeze_body=config.FREEZE,
                                     load_pretrained=config.LOAD_PRETRAINED)

    # Call to predict function
    boxes_dict = predict(model_body,
                         class_names,
                         anchors,
                         normalized_data,
                         weights_name,
                         dir_list,
                         non_best_sup=config.NON_BEST_SUP,
                         results_dir=results_dir,
                         save=save)

    # Saving predictions
    save_annotation(boxes_dict,
                    os.path.join(results_dir, 'predictions', 'pred_boxes.p'))
def main():
    train_feature_vecs, train_labels = training.train()
    while 1:
        inpath = filedialog.askopenfilename()
        img = cv2.imread(inpath, cv2.IMREAD_COLOR)

        # img = cv2.imread(filename, cv2.IMREAD_COLOR)
        sign, sign_name = find_sign(img)
        if sign is not None:
            cv2.imshow("Sign", sign)
            class_image, label = training.predict(sign, train_feature_vecs,
                                                  train_labels)
            print("Predicted label is: ", label)
            cv2.imshow("Class", class_image)
        print(sign_name)

        cv2.waitKey(0)
Exemplo n.º 4
0
def process_image(img):

    sliding_sale = [64, 128, 256]
    boxlist = []
    heatmap = np.zeros_like(img[:, :, 0]).astype(np.float)
    heatmap_threshold = 1
    counter = 0

    for s in sliding_sale:
        print("sliding_sale: " + str(s))
        crop_arr = slw.slide_window(img,
                                    x_start_stop=[None, None],
                                    y_start_stop=[300, None],
                                    xy_window=(s, s),
                                    xy_overlap=(0.5, 0.5))
        counter = 0

        for x in crop_arr:
            counter += 1
            cropped_image = slw.crop_image(img, x)
            res = trn.predict(cropped_image)
            if str(res[0]) == "vehicle":
                boxlist.append(x)
                counter += 1

    print("boxes with cars found: " + str(counter))
    #by now we have identified all the boxes that contain a car
    # now we add the boxes to a heatmap
    for b in boxlist:
        #        heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
        heatmap[b[1]:b[3], b[0]:b[2]] += 1

    heatmap[heatmap <= heatmap_threshold] = 0

    res_image = proc.draw_labeled_bboxes(img, heatmap)

    #    cv2.imwrite('result_output.jpg',res_image)
    util.show_image_from_image(res_image, "output")
Exemplo n.º 5
0
    for file_path in list_ds.take(10):

        file_name = file_path.numpy().decode('utf-8').split(os.sep)[-1]
        test_sample = es.EventSample.from_input_file(sample_id, file_path.numpy().decode('utf-8'), **cuts)
        test_evts_j1, test_evts_j2 = test_sample.get_particles()
        print('{}: {} j1 evts, {} j2 evts'.format(file_path.numpy().decode('utf-8'), len(test_evts_j1), len(test_evts_j2)))
        test_j1_ds = tf.data.Dataset.from_tensor_slices(test_evts_j1).batch(batch_n)
        test_j2_ds = tf.data.Dataset.from_tensor_slices(test_evts_j2).batch(batch_n)

        # *******************************************************
        #         forward pass test data -> reco and losses
        # *******************************************************

        print('predicting {}'.format(sdi.path_dict['sample_name'][sample_id]))
        reco_j1, loss_j1_reco, loss_j1_kl = train.predict(vae.model, loss_fn, test_j1_ds)
        reco_j2, loss_j2_reco, loss_j2_kl = train.predict(vae.model, loss_fn, test_j2_ds)
        losses_j1 = [losses.total_loss(loss_j1_reco, loss_j1_kl, vae.beta), loss_j1_reco, loss_j1_kl]
        losses_j2 = [losses.total_loss(loss_j2_reco, loss_j2_kl, vae.beta), loss_j2_reco, loss_j2_kl]

        # *******************************************************
        #               add losses to DataSample and save
        # *******************************************************

        reco_sample = es.EventSample(sample_id + 'Reco', particles=[reco_j1, reco_j2], jet_features=test_sample.get_event_features(), particle_feature_names=test_sample.particle_feature_names)

        for loss, label in zip( losses_j1, ['j1TotalLoss', 'j1RecoLoss', 'j1KlLoss']):
            # import ipdb; ipdb.set_trace()    
            reco_sample.add_event_feature(label, loss)
        for loss, label in zip( losses_j2, ['j2TotalLoss', 'j2RecoLoss', 'j2KlLoss']):
            reco_sample.add_event_feature(label, loss)
Exemplo n.º 6
0
    saver = tf.train.Saver()
    saver.restore(session, ARGS.model_path)

""" Infer Task Variable for Test Tasks """

session, saver = training.infer_latent(
    model, objective, session, saver, dataset, ARGS)

""" Evaluate """

use_var = True if mean_func is None else False

X_pred = dataset.get_domain(n_tasks=ARGS.n_tasks)
ids = np.int32(list(range(ARGS.n_tasks)))

ymu, yvar, hmu, hvar = training.predict(
    model, session, X_pred, ids, use_var=use_var)

ymu = (ymu * dataset.Y_std) + dataset.Y_mu
yvar = (dataset.Y_std**2) * yvar
ymin = ymu - 2*np.sqrt(yvar)
ymax = ymu + 2*np.sqrt(yvar)

print("Latent Variables")
for t in range(ARGS.n_tasks):
    print(hmu[t])

for t in range(ARGS.n_train_tasks):
    x = D.reshape(-1)
    task = dataset.data["training"]["ids"][t]
    pylab.plot(x, F[task], color="black")
    pylab.plot(x, ymu[task], color=colors[task])
Exemplo n.º 7
0
                     warmup=.1)

# Learning
print("Start learning")
ts = time.time()
# Store our loss and accuracy for plotting
train_losses = []
for epoch in range(args.epochs):
    train_losses.append(train(model, train_dataloader, device, optimizer, epoch))

te = time.time()
t_learning = te - ts

print("Start predicting")
ts = time.time()
v_logits, v_labels, eval_acc = predict(model, validation_dataloader, device)
te = time.time()
t_predicting = te - ts

# Save the fine-tuning model
args_str = "bert_epochs_%d_batch_size_%d" % (args.epochs, args.batch_size)
torch.save(model.state_dict(), args.model_dir + "/" + args_str)

# Save the results
print("Save results")
d = {'learning_time': t_learning, 'prediction_time': t_predicting, 'loss_train': train_losses[-1],
     'accuracy_test': eval_acc}
results_df = pd.DataFrame(data=d, index=[0])
results_file = args.results_dir + '/results.csv'
with open(results_file, mode='w') as f:
    results_df.to_csv(f)
Exemplo n.º 8
0
# for i in learning_rates:
#     print ("learning rate is: " + str(i))
#     models[str(i)] = training.model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
#     print ('\n' + "-------------------------------------------------------" + '\n')
#
# for i in learning_rates:
#     plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
#
# plt.ylabel('cost')
# plt.xlabel('iterations')
#
# legend = plt.legend(loc='upper center', shadow=True)
# frame = legend.get_frame()
# frame.set_facecolor('0.90')
# plt.show()

# Test with your own image
my_image = "my_image.jpg"
image = np.array(ndimage.imread(my_image, flatten=False))
my_image = scipy.misc.imresize(image,
                               size=(num_px,
                                     num_px)).reshape(num_px * num_px * 3, 1)
my_predicted_image = training.predict(d["w"], d["b"], my_image)

plt.imshow(image)
plt.imshow(my_image.reshape(num_px, num_px, 3))
plt.show()
print("y = " + str(np.squeeze(my_predicted_image)) +
      ", your algorithm predicts a \"" +
      classes[int(np.squeeze(my_predicted_image)), ].decode("utf-8") +
      "\" picture.")