Beispiel #1
0
def sanity_check(model, test_loader, use_gpu=True):
    """
    TODO: add documentation here
    """
    device = get_device(use_gpu)
    model.to(device)
    model.eval()

    original_loss_list = []
    final_loss_list = []
    for features, labels in test_loader:
        # move tensors to GPU if CUDA is available
        features, labels = features.to(device).float(), labels.to(
            device).float()
        # forward pass: compute predicted outputs by passing inputs to the model
        output = model(features)
        original_loss = [
            utils.angular_error(
                features.data[0, index * 3:index * 3 + 3].detach().numpy(),
                np.transpose(labels.detach().numpy()))[0]
            for index in range(len(features[0]) // 3)
        ]

        final_loss = utils.angular_error(output.detach().numpy(),
                                         np.transpose(
                                             labels.detach().numpy()))[0][0]

        original_loss_list.append(original_loss[-1].item())
        final_loss_list.append(final_loss)

    print("Original:")
    utils.print_stats(original_loss_list)
    print("Final:")
    utils.print_stats(final_loss_list)
Beispiel #2
0
def check_unet(weights):
    dataset = Dataset()
    model = model_unet(INPUT_SHAPE)
    model.load_weights(weights)
    batch_size = 16

    for batch_x, batch_y in dataset.generate_validation(batch_size=batch_size):
        print(batch_x.shape, batch_y.shape)
        with utils.timeit_context('predict 16 images'):
            prediction = model.predict_on_batch(batch_x)

        for i in range(batch_size):
            # plt.imshow(unprocess_input(batch_x[i]))
            # plt.imshow(prediction[i, :, :, 0], alpha=0.75)
            img = batch_x[i].astype(np.float32)
            mask = prediction[i, :, :, 0]

            utils.print_stats('img', img)
            utils.print_stats('mask', mask)

            img[:, :, 0] *= mask
            img[:, :, 1] *= mask
            img[:, :, 2] *= mask
            img = unprocess_input(img)
            plt.imshow(img)
            plt.show()
def check_watershed_direction_and_energy():
    data = dataset.UVectorNetDataset(fold=-1,
                                     batch_size=4,
                                     output_watershed=True)
    for mask_samples in data.masks.values():
        crops = []
        for mask, crop_offset in mask_samples:
            cfg = dataset.SampleCfg(sample_id=0,
                                    src_center_x=128,
                                    src_center_y=128,
                                    scale_x=1,
                                    scale_y=1)
            tform = cfg.transform()
            crop = utils.transform_crop(mask,
                                        crop_offset,
                                        tform,
                                        output_shape=(dataset.CROP_SIZE,
                                                      dataset.CROP_SIZE))
            crops.append(crop)

        wshed = dataset.watershed_direction_and_energy(
            crops, output_mask_area=data.output_mask_area)

        plt.subplot(3, 3, 1)
        plt.title('mask')
        plt.imshow(np.sum(crops, axis=0))

        for i in range(wshed.shape[2]):
            plt.subplot(3, 3, i + 2)
            plt.title('mask')
            utils.print_stats(f'res {i}', wshed[:, :, i])
            plt.imshow(wshed[:, :, i])
        plt.show()
Beispiel #4
0
def test(model, loss, loader, xp, args):

    if not len(loader):
        return 0

    model.eval()

    metrics = xp.get_metric(tag=loader.tag, name='parent')
    timer = xp.get_metric(tag=loader.tag, name='timer')

    metrics.reset()

    if args.multiple_crops:
        epoch_test_multiple_crops(model, loader, xp, args.cuda)
    else:
        epoch_test(model, loader, xp, args.cuda)

    # measure elapsed time
    timer.update()
    xp.log_with_tag(loader.tag)

    if loader.tag == 'val':
        xp.Acc1_Val_Best.update(float(xp.acc1_val)).log()
        xp.Acck_Val_Best.update(float(xp.acck_val)).log()

    if args.verbosity:
        print_stats(xp, loader.tag)

    if args.eval:
        dump_results(xp, args)
Beispiel #5
0
def non_interactive_demo(model, args):
    renderer = create_renderer()
    show_window = not args.no_show
    for rec in tqdm(model.images_list):
        log.info("Starting inference for %s", rec['img_name'])
        image = rec['img']
        distribution, targets = model.infer_sync(image)
        prob = calculate_probability(distribution)
        log.info("Confidence score is %s", prob)
        if prob >= args.conf_thresh ** len(distribution):
            phrase = model.vocab.construct_phrase(targets)
            if args.output_file:
                with open(args.output_file, 'a') as output_file:
                    output_file.write(rec['img_name'] + '\t' + phrase + '\n')
            else:
                print("\n\tImage name: {}\n\tFormula: {}\n".format(rec['img_name'], phrase))
                if renderer is not None:
                    rendered_formula, _ = renderer.render(phrase)
                    if rendered_formula is not None and show_window:
                        cv.imshow("Predicted formula", rendered_formula)
                        cv.waitKey(0)
        else:
            log.info("Confidence score is low. The formula was not recognized.")
    if args.perf_counts:
        log.info("Encoder performance statistics")
        print_stats(model.exec_net_encoder)
        log.info("Decoder performance statistics")
        print_stats(model.exec_net_decoder)
def finish(buckets, num_bytes, X, V, row_reorder, col_reorder, filename,
           recons):
    compressed_X = _reconstruct(X, buckets, row_reorder, col_reorder, recons)

    # Print stats and send to file.
    utils.print_stats(X, compressed_X, num_bytes)
    utils.to_file(filename, V, compressed_X)
Beispiel #7
0
def find_init_h0(input_file, dataset_info, n_iters):
    A = utils.read_adj_sparse_matrix(input_file,
                                     comment=dataset_info['comment'],
                                     sep=dataset_info['sep'],
                                     min_node=dataset_info['min_node'])
    print(A.dtype)
    print("File read")
    components = utils.find_connected_components(A)
    largest = components[np.argmax([len(i) for i in components])]
    A = A[largest, :][:, largest]
    S_max = []
    for i in range(n_iters):
        # TIMBAL
        # We discard all connected components but the largest
        # Subsample => Whether or not to sample random graphs. Recommended when the graph is large
        # Max_removals => Max number of removed vertices per iteration
        # samples => Randomly sampled graphs. Must be a factor of the number of threads used
        # avg_size => Approximate desired size of randomly sampled graph
        start_time = time.time()
        print("TIMBAL started")
        S = timbal.process(A,
                           max_removals=dataset_info['max_removals'],
                           samples=dataset_info['samples'],
                           avg_size=dataset_info['avg_size'],
                           subsample=dataset_info['subsample'])
        print("TIMBAL finished")
        print("Time taken to find h0: ", str(time.time() - start_time) + " s")
        if ((len(S_max) < len(S)) and (utils.is_balanced(A[S, :][:, S]))):
            S_max = S
    As = A[S_max, :][:, S_max]
    print("\nH")
    utils.print_stats(A)
    print("\nS(H)")
    utils.print_stats(As)
    return (A, S_max)
Beispiel #8
0
def predict(model_name, epoch_num, img_dir, sample_ids, run=None):
    model = EmbeddingsModel()
    model = model.cuda()

    run_str = '' if run is None or run == '' else f'_{run}'
    checkpoints_dir = f'../output/checkpoints_3/{model_name}{run_str}'

    checkpoint = torch.load(f'{checkpoints_dir}/{epoch_num:03}.pt')
    model.load_state_dict(checkpoint['model_state_dict'])

    dataset = TripletDatasetPredict(sample_ids=sample_ids,
                                    img_dir=img_dir,
                                    transform=transforms.Compose([
                                        transforms.ToTensor(),
                                        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                                    ]),
                                    crop_size=256)

    data_loader_update_train = DataLoader(
        dataset,
        shuffle=False,
        num_workers=8,
        batch_size=64)

    results = []
    results_idx = []
    with torch.set_grad_enabled(False):
        for data in tqdm(data_loader_update_train):
            img = data['img'].cuda()
            samples_idx = data['idx']
            embeddings = model(img).detach().cpu().numpy()
            results.append(embeddings)
            results_idx.append(samples_idx)

        # for image_id in tqdm(sample_ids):
        #     images = []
        #     for color in ['red', 'green', 'blue']:
        #         try:
        #             img = cv2.imread(f'{img_dir}/{image_id}_{color}.png', cv2.IMREAD_UNCHANGED)
        #             img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_AREA).astype("uint8")
        #             images.append(img)
        #         except:
        #             print(f'failed to open {img_dir}/{image_id}_{color}.png')
        #             raise
        #
        #     images = np.stack(images, axis=0).astype(np.float32) / 255.0
        #     images = torch.from_numpy(images).cuda()
        #     images = normalize(images)
        #     images = torch.unsqueeze(images, 0)
        #     embeddings = model(images)
        #     embeddings = embeddings.detach().cpu().numpy()
        #     # print(image_id, embeddings.flatten())
        #     results.append(embeddings)

    results = np.concatenate(results, axis=0)
    results_idx = np.concatenate(results_idx, axis=0)
    utils.print_stats('results_idx diff', np.diff(results_idx))
    return results
Beispiel #9
0
def check_dataset_aug():
    with utils.timeit_context('load ds'):
        ds = ClassificationDataset(fold=0, is_training=True, img_aug_level=20, geometry_aug_level=10)

    while True:
        sample = ds[1]
        utils.print_stats('img', sample['img'])
        plt.imshow(np.moveaxis(sample['img'], 0, 2)[:, :, :3])
        plt.show()
Beispiel #10
0
def run_train(config, data, model_file, db=None):
    net_model = get_net_model(config)
    data.load()
    training_data = data.get_training_data(config['validation_split'], additional_layers_conf=net_model.get_additional_layers_conf())
    model = net_model.get_model(config, data)

    model.summary()

    gc.collect()
    tr = train_model(model, config, training_data)

    training_data = None
    gc.collect()

    test_data = data.get_validation_test_data(config['validation_split'], additional_layers_conf=net_model.get_additional_layers_conf())



    x_test = net_model.get_x_val_test(test_data)
    y_test = test_data['y_val_test']
    y_pred = model.predict(x_test)
    y_pred = get_y_pred(config, y_pred)

    p_r_f, acc, p_r_f_avg_micro, p_r_f_avg_weighted = print_stats(y_test, y_pred, data.labels_index, config['binary'])
    if model_file:
        data.save_model(model, model_file)


    not_matched = test_data['not_matched']

    if len(not_matched):
        print()
        print ('Computing metrics with not matched true candidates')
        y_test = y_test.tolist()
        y_pred = y_pred.tolist()
        for label in not_matched:
            for _ in range(not_matched[label]):
                y_test.append(data.get_label_from_idx(data.labels_index[label]))

                y_pred.append(data.get_label_from_idx(data.labels_index[data.irrelevant_class]))

        p_r_f, acc, p_r_f_avg_micro, p_r_f_avg_weighted = print_stats(np.array(y_test), np.array(y_pred), data.labels_index, config['binary'])


    sys.stdout = mystdout = StringIO()
    model.summary()
    sys.stdout = sys.__stdout__
    model_summary = mystdout.getvalue()


    if db:
        db.init()
        db.save_result(config, acc, p_r_f, model_summary, data.num_classes, tr["class_weights"], labels_index=data.labels_index,
                       model_id=model_file, p_r_f_avg_micro = p_r_f_avg_micro, p_r_f_avg_weighted = p_r_f_avg_weighted)
Beispiel #11
0
def trainP(net,
           disc,
           optim_g,
           optim_d,
           device,
           data_loader,
           start_step,
           current_epoch,
           epochs=1,
           train_disc=True,
           step_update=100,
           batch_size=1):

    losses = []
    epoch_times = []
    vgg = [
        VGGFeatureExtractor().float().cuda(),
        VGGFeatureExtractor(pool_layer_num=36).float().cuda()
    ]

    for i, (images, targets, bicub) in enumerate(data_loader):
        optim_g.zero_grad()

        images = images.to(device)
        targets = targets.to(device)
        bicub = bicub.to(device)
        images = images.view((-1, 3, 32, 32))
        targets = targets.view((-1, 3, 128, 128))
        bicub = bicub.view((-1, 3, 128, 128))

        images = images.squeeze(0)
        targets = targets.squeeze(0)
        bicub = bicub.squeeze(0)

        output = net(images.float())
        output = torch.add(output, bicub).clamp(0, 1)
        output = output.to(device)

        loss = LossP(device, vgg[0](output.float()), vgg[0](targets.float()),
                     vgg[1](output.float()), vgg[1](targets.float()))

        losses.append(loss.detach().item())

        loss.backward()
        optim_g.step()

        if i % step_update == 0 and i is not 0:
            end_step = time.perf_counter()
            epoch_times.append(end_step - start_step)
            print_stats(False, current_epoch, epochs, len(data_loader), i,
                        losses, [], [], [], [], step_update)
            time_stats(epoch_times, end_step, start_step, len(data_loader), i)
            losses = []
            start_step = time.perf_counter()
Beispiel #12
0
def spam_image():
    spam_count = utils.get_spam_count()
    print('\n** Make sure the image is copied to your clipboard **')
    utils.prompt_confirmation_and_countdown()
    image_count = 0
    start_time = time.time()
    for _ in range(spam_count):
        pyautogui.hotkey('command', 'v')
        pyautogui.press('enter')
        image_count += 1
        utils.check_and_print_progress(image_count)
    utils.print_stats(start_time, image_count)
Beispiel #13
0
def multiple_demo(load_fnames, save_fname="multi", reg_a=10., reg_d=10.):
    # assignment: section 6
    bfm = h5py.File(MODELS_PATH + BFM_FNAME, "r")
    bfm_params, color, triangles = utils.read_bfm(bfm)
    lms = utils.read_landmarks(MODELS_PATH + LM_FNAME)  # landmark annotations

    N = len(load_fnames)  # number of images to be loaded

    imgs_real = [utils.read_image(IMAGE_PATH + fname) for fname in load_fnames]  # load all images
    hs = [np.size(img, 0) for img in imgs_real]  # store all heights
    ws = [np.size(img, 1) for img in imgs_real]  # store all widths

    lms_real = [torch.from_numpy(detect_landmark(img)) for img in imgs_real]  # detect all ground truth landmarks
    lms_real_flip = [utils.flip_ycoords(lms_real[i], H=hs[i]) for i in range(N)]  # flip y axis

    alpha, deltas, rotations, translations, loss = multiple.estimate_params((bfm_params, color, triangles),
                                                                            lms, lms_real_flip, hs=hs, ws=ws,
                                                                            reg_a=reg_a, reg_d=reg_d)

    utils.save_loss(loss, save_fname=save_fname + "_loss.pdf")

    # save results for each image
    for i in range(N):
        print(load_fnames[i] + ":")  # print stats for each image  (alpha is the same for each img)
        utils.print_stats(alpha, deltas[i], rotations[i], translations[i])

        G = morph.compute_G(bfm_params, alpha=alpha, delta=deltas[i])
        G_transformed = pinhole.transform(G, rotations[i], translations[i])
        G_pinhole = pinhole.camera_model(G, rotations[i], translations[i], h=hs[i], w=ws[i])

        color = texture.get_color(imgs_real[i], G_pinhole[:, :2])

        print("Rendering...")
        img_pred = utils.get_image(G_pinhole, color, triangles, h=hs[i], w=ws[i])
        utils.show_face(img_pred)
        utils.flip_y()
        plt.savefig(PINHOLE_PATH + save_fname + str(i) + ".pdf")
        plt.close()

        save_obj(OBJ_3D_PATH + save_fname + str(i) + "_3d.obj", G_transformed, color, triangles)
        save_obj(OBJ_2D_PATH + save_fname + str(i) + "_2d.obj", G_pinhole, color, triangles)

        lm_pred_flip = utils.get_landmarks(G_pinhole[:, :2], lms)
        lm_pred = utils.flip_ycoords(lm_pred_flip, H=hs[i])

        utils.show_face(imgs_real[i], white_background=False)
        utils.show_landmarks(lms_real[i], indices=False, label="ground-truth")
        try:
            utils.show_landmarks(lm_pred, indices=False, label="model")
        except TypeError:
            print("... unable to show predicted landmarks")
        plt.savefig(PINHOLE_PATH + save_fname + str(i) + "_lm.pdf")
        plt.close()
Beispiel #14
0
def spam_message():
    message = input('What message would you like to send?: ')
    spam_count = utils.get_spam_count()
    utils.prompt_confirmation_and_countdown()
    start_time = time.time()
    message_count = 0
    for _ in range(spam_count):
        pyautogui.write(message)
        pyautogui.press('enter')
        message_count += 1
        utils.check_and_print_progress(message_count)
    utils.print_stats(start_time, message_count)
Beispiel #15
0
def spam_text_file():
    file_path = utils.choose_file()
    file = open(file_path, 'r')
    utils.prompt_confirmation_and_countdown()
    word_count = 0
    start_time = time.time()
    for line in file:
        for word in line.split():
            pyautogui.write(word)
            pyautogui.press('enter')
            word_count += 1
            utils.check_and_print_progress(word_count)
    file.close()
    utils.print_stats(start_time, word_count)
Beispiel #16
0
 def _stats(self, accum_recount):
     print('=========================================')
     print(f'Max p-value: {self.max_p_value:.5f}')
     print(f'Polled ballots: {self.m}')
     W = [
         winner for winner in self.party_members
         if winner in self.winning_candidates
     ]
     L = [
         loser for loser in self.party_members
         if loser not in self.winning_candidates
     ]
     utils.print_stats(accum_recount, W, L)
     print('=========================================')
Beispiel #17
0
def train(model, loss, optimizer, loader, xp, args):

    if args.use_dali:
        loader.reset()
        loader_len = loader._size // loader.batch_size
    else:
        loader_len = len(loader)

    if not loader_len:
        return 0

    model.train()

    xp.Parent_Train.reset()

    for batch_idx, batch in tqdm(enumerate(loader), desc='Train Epoch',
                                          leave=False, total=loader_len):
        if args.use_dali:
            data = batch[0]['data']
            target = batch[0]['label'].squeeze().cuda().long()
        else:
            data, target = batch
            data, target = data_to_var(data, target, args.cuda)

        output = model(data)
        obj = loss(output, target)
        if obj.item() != obj.item():
            print('NaN Erorr')
            import sys
            sys.exit(-1)

        optimizer.zero_grad()
        obj.backward()
        optimizer.step()

        prec1 = accuracy(output.data, target.data, topk=1)
        preck = accuracy(output.data, target.data, topk=xp.config['topk'])
        xp.Parent_Train.update(loss=obj.data.item(), acck=preck, acc1=prec1, n=data.size(0))

    # compute objective function (including regularization)
    obj = xp.Loss_Train.get() + regularization(model, xp.mu)
    xp.Obj_Train.update(obj)
    # measure elapsed time
    xp.Timer_Train.update()

    xp.log_with_tag('train')

    if args.verbosity:
        print_stats(xp, 'train')
Beispiel #18
0
def train():
    notcars = glob.glob('data/non-vehicles/*/*.png')
    cars = glob.glob('data/vehicles/*/*.png')

    print_stats(cars, notcars)

    features_car = []
    for car in cars:
        img = read_image(car)
        img_processed = process_image(img)
        features_car.append(extract_features(img_processed, parameters))

    features_notcar = []
    for notcar in notcars:
        img = read_image(notcar)
        img_processed = process_image(img)  # png
        features_notcar.append(extract_features(img_processed, parameters))

    features = np.vstack((features_car, features_notcar))
    # Fit a per-column scaler
    scaler = StandardScaler().fit(features)
    # Apply the scaler to X
    features_scaled = scaler.transform(features)
    # Define the labels vector
    labels = np.hstack(
        (np.ones(len(features_car)), np.zeros(len(features_notcar))))
    # Split up data into randomized training and test sets
    rand_state = np.random.randint(0, 100)
    out = train_test_split(features_scaled,
                           labels,
                           test_size=0.2,
                           random_state=rand_state)
    features_train, features_test, labels_train, labels_test = out

    # Initialize support vector machine object
    clf = SVC(kernel='linear', C=0.00001)
    # Check the training time for the SVC
    t = time.time()
    clf.fit(features_train, labels_train)
    print('{0:2.2f} seconds to train SVC...'.format(time.time() - t))

    # Accuracy score
    accuracy = clf.score(features_test, labels_test)
    print('Test Accuracy of SVC = {0:2.4f}'.format(accuracy))

    classifier = Classifier(clf, scaler)
    joblib.dump(classifier, 'classifier.pkl')

    return classifier
Beispiel #19
0
def main():
    # Prepare args
    parser = utils.create_arg_parser()
    args = parser.parse_args()

    ok = '{}OK{}'.format(colors.GREEN, colors.ENDC)

    # Create mirror, base_dir and contents_filename.
    print('Initial setup ... ', end='')
    mirror = utils.create_ftp_mirror(args.country)
    base_dir = utils.create_base_dir(args.dist, args.comp)
    contents_filename = utils.create_contents_filename(args.architecture)
    print(ok)

    # Establish ftp connection.
    print('Establishing FTP connection ... ', end='')
    ftp = utils.create_ftp_connection(mirror, base_dir)
    print(ok)

    # Download Contents-xxx.gz file.
    print('Downloading {}{}{} ... '.format(colors.WARNING, contents_filename,
                                           colors.ENDC),
          end='')
    utils.download_contents(ftp, contents_filename)
    print(ok)

    # Destroy ftp connection as we no longer need it.
    print('Closing FTP connection ... ', end='')
    utils.destroy_ftp_connection(ftp)
    print(ok)

    # Create a Counter object to store statistics.
    package_entries = Counter()

    # Begin parsing.
    print('Parsing ... ', end='')
    with utils.gunzip_file(contents_filename) as contents_file:
        for line in contents_file.readlines():
            utils.parse_line_and_update_entries(line, package_entries)
    print(ok)

    # Get Top N packages by number of associated files.
    print('Analyzing data ... ', end='')
    top_packages = utils.get_top_packages(args.top, package_entries)
    print(ok)

    # Print stats.
    utils.print_stats(top_packages)
Beispiel #20
0
def test_agent(env,
               agent,
               run=0,
               episodes=5,
               time_steps=500,
               initial_state=None,
               initial_noise=None,
               render=True,
               deterministic=True):

    stats = EpisodeStats(episode_lengths=np.zeros(episodes),
                         episode_rewards=np.zeros(episodes),
                         episode_loss=np.zeros(episodes))

    print_header(3, 'Testing')

    for e in range(episodes):

        s = env.reset(initial_state=initial_state,
                      noise_amplitude=initial_noise)

        for t in range(time_steps):

            if render:
                env.render()

            a = agent.get_action(s, deterministic=deterministic)
            s, r, d, _ = env.step(tn(a))

            stats.episode_rewards[e] += r
            stats.episode_lengths[e] = t

            if d:
                break

        pr_stats = {
            'run': run,
            'steps': int(stats.episode_lengths[e] + 1),
            'episode': e + 1,
            'episodes': episodes,
            'reward': stats.episode_rewards[e]
        }
        print_stats(pr_stats)

    if render:
        env.viewer.close()

    return stats
def run_cv(config, data, model_file=None):
    data.load()
    fold_idx = 0
    cv_results = []
    acc_list = []

    for fold_data in data.get_cv_folds_data(config['validation_split']):
        print("Running Fold", fold_idx + 1)
        fold_idx += 1
        model = branched_bi_gru_lstm.get_model(config, data)
        # model.summary()
        train_model(model, config, fold_data)
        x_test = branched_bi_gru_lstm.get_cv_x_test(fold_data)
        y_pred = model.predict(x_test)

        y_pred = get_y_pred(config, y_pred)

        p_r_f, acc = print_stats(fold_data['y_test'], y_pred,
                                 data.labels_index, config['binary'])
        acc_list.append(acc)
        cv_results.append(p_r_f)
        if model_file:
            data.save_model(model, model_file + "_fold_" + str(fold_idx))
    cv_prf = merge_several_folds_results(cv_results, fold_idx)
    print_p_r_f(cv_prf, data.labels_index)
Beispiel #22
0
def texture_demo(load_fname="yke_neutral.jpeg", save_fname="texture", reg_a=10., reg_d=10., idx=0):
    # assignment: section 5
    bfm = h5py.File(MODELS_PATH + BFM_FNAME, "r")
    bfm_params, color, triangles = utils.read_bfm(bfm)
    lms = utils.read_landmarks(MODELS_PATH + LM_FNAME)  # landmark annotations

    img_real = utils.read_image(IMAGE_PATH + load_fname)  # load image of face we want to reconstruct
    h, w, _ = np.shape(img_real)

    lm_real = torch.from_numpy(detect_landmark(img_real))  # detect ground-truth landmarks
    lm_real_flip = utils.flip_ycoords(lm_real, H=h)  # flip y axis because img is upside down compared to pinhole output

    alpha, delta, rotation, translation, loss = latent.estimate_params((bfm_params, color, triangles),
                                                                       lms, lm_real_flip, h=h, w=w,
                                                                       reg_a=reg_a, reg_d=reg_d)
    utils.print_stats(alpha, delta, rotation, translation)  # latent params statistics

    utils.save_loss(loss, save_fname=save_fname + str(idx) + "_loss.pdf")

    G = morph.compute_G(bfm_params, alpha=alpha, delta=delta)
    G_pinhole = pinhole.camera_model(G, rotation, translation, h=h, w=w)

    color = texture.get_color(img_real, G_pinhole[:, :2])  # obtain vertex colors from provided image

    save_obj(OBJ_3D_PATH + save_fname + str(idx) + "_3d.obj", G, color, triangles)
    save_obj(OBJ_2D_PATH + save_fname + str(idx) + "_2d.obj", G_pinhole, color, triangles)

    print("Rendering...")
    img_pred = utils.get_image(G_pinhole, color, triangles, h=h, w=w)

    utils.show_face(img_pred)
    utils.flip_y()
    plt.savefig(PINHOLE_PATH + save_fname + str(idx) + ".pdf")
    plt.close()

    lm_pred_flip = utils.get_landmarks(G_pinhole[:, :2], lms)
    lm_pred = utils.flip_ycoords(lm_pred_flip, H=h)

    utils.show_face(img_real, white_background=False)
    utils.show_landmarks(lm_real, indices=False, label="ground-truth")
    try:
        utils.show_landmarks(lm_pred, indices=False, label="model")
    except TypeError:
        print("... unable to show predicted landmarks")
    plt.savefig(PINHOLE_PATH + save_fname + str(idx) + "_lm.pdf")
    plt.close()
Beispiel #23
0
def get_votes(images_path, correct_illums, debug=False):
    # TODO: add doc here

    # constants
    GREY_WORLD = 'grey_world'
    MAX_RGB = 'max_rgb'
    GREY_EDGE = 'grey_edge'

    # keys lists
    keys = [GREY_WORLD, MAX_RGB, GREY_EDGE]
    # voters lists
    voters = {
        GREY_WORLD: lambda x: grey_edge(x, njet=0, mink_norm=1, sigma=0),
        MAX_RGB: lambda x: grey_edge(x, njet=0, mink_norm=-1, sigma=0),
        GREY_EDGE: lambda x: grey_edge(x, njet=1, mink_norm=5, sigma=2)
    }
    # illum lists
    illums = {GREY_WORLD: [], MAX_RGB: [], GREY_EDGE: []}
    # error lists
    errors = {GREY_WORLD: [], MAX_RGB: [], GREY_EDGE: []}
    # estimate illuminations and calculate error
    for index, (image_path,
                correct_illum) in enumerate(zip(images_path, correct_illums)):
        image = skio.imread(image_path)
        if debug:
            print(image_path)
            print('illumination: ' + str(correct_illum))

        for key in keys:
            estim_illum = voters[key](image)
            illums[key].append(estim_illum)
            errors[key].append(utils.angular_error(estim_illum, correct_illum))
            if debug:
                print(key + ": " + str(estim_illum) + " error: " +
                      str(errors[key][-1]))
        if debug:
            print()
        else:
            utils.clear_screen()
            print(str(index + 1) + ' / ' + str(len(images_path)))

    for key in keys:
        print(key + ":")
        utils.print_stats(errors[key])

    return illums, errors
Beispiel #24
0
def spam_image_url():
    image_url = input('Image URL: ')
    image_name = image_url.split('/')[-1]
    image_path = IMAGE_DOWNLOAD_FOLDER + image_name
    urllib.request.urlretrieve(image_url, image_path)
    print(f'Downloaded image {image_name} as {image_path}')
    utils.copy_image_to_clipboard(image_path)
    spam_count = utils.get_spam_count()
    utils.prompt_confirmation_and_countdown()
    image_count = 0
    start_time = time.time()
    for _ in range(spam_count):
        pyautogui.hotkey('command', 'v')
        pyautogui.press('enter')
        image_count += 1
        utils.check_and_print_progress(image_count)
    utils.print_stats(start_time, image_count)
Beispiel #25
0
def check_augmentations():
    with utils.timeit_context('load ds'):
        ds = NihDataset(fold=0, is_training=True, img_size=512)

        sample_num = 2

        ds.is_training = False
        plt.imshow(ds[sample_num]['img'])

        plt.figure()
        ds.is_training = True

        for i in range(100):
            sample = ds[sample_num]
            utils.print_stats('img', sample['img'])
            plt.imshow(sample['img'])
            plt.show()
Beispiel #26
0
def print_stat(twitter_name):
    """ compute the figures if needed, then serves it """
    token = datetime.datetime.now().strftime("%Y%m%d")
    events = get_tweets_for_user(target_user='******'+twitter_name, n_rounds=16)
    figure_path = op.join(project_path, 'figures', '%s_%s.png' % (twitter_name, token))
    print "Entering stat method"
    returned_path = print_stats(events, figure_path=figure_path)
    print returned_path
    
    return send_file(figure_path, mimetype='image/png')
Beispiel #27
0
def main(num_haps,
         num_snps,
         Ne=1e5,
         length=5e3,
         recombination_rate=2e-8,
         mutation_rate=2e-8,
         random_seed=None,
         flip=False,
         verbose=False,
         print_matrices=False):
    true_haplotypes = np.array([[]])
    while true_haplotypes.shape[1] < num_snps:
        if verbose:
            print('simulating...')
        true_haplotypes = simulate_haplotype_matrix(
            num_haps,
            num_snps,
            Ne=Ne,
            length=length,
            recombination_rate=recombination_rate,
            mutation_rate=mutation_rate,
            random_seed=random_seed)

    if flip:
        random.seed(a=random_seed)
        column_list = random.choices([0, 1], k=num_haps)
        true_haplotypes = flip_columns(column_list, true_haplotypes)
    write_vcf_from_haplotype_matrix(INPUT_VCF, true_haplotypes, phased=False)
    phased_haplotypes = beagle_phase(BEAGLE_JAR_PATH, INPUT_VCF,
                                     BEAGLE_OUTPUT_PATH)

    # constructed for statistics later, not used by beagle, which works directly on vcf
    genotypes = compress_to_genotype_matrix(true_haplotypes)
    unphased_haplotypes = get_incomplete_phasing_matrix(genotypes)

    # TODO nthomas: add some facility to contribute reference haplotypes to beagle
    if verbose:
        print_stats(true_haplotypes,
                    unphased_haplotypes,
                    phased_haplotypes,
                    print_matrices=print_matrices)
    return true_haplotypes, phased_haplotypes
Beispiel #28
0
def run(YEAR, DAY, p1_fn, p2_fn, cmds, FILE=None):
    target = get_target(YEAR, DAY)
    fmt_str = '%(asctime)-15s %(filename)8s:%(lineno)-3d %(message)s'
    log.basicConfig(level=log.DEBUG, format=fmt_str)
    force = 'force_fetch' in cmds
    v = fetch(YEAR, DAY, log, wait_until_date=target, force=force)
    if FILE != None:
        writeInputToFolder(FILE, v)
    if 'print_stats' in cmds:
        print_stats(v)

    if 'run1' in cmds:
        res = p1_fn(v)
        print('part_1: {}'.format(res))
        if 'submit1' in cmds:
            submit(YEAR, DAY, 1, res)
    if 'run2' in cmds:
        res = p2_fn(v)
        print('part_2: {}'.format(res))
        if 'submit2' in cmds:
            submit(YEAR, DAY, 2, res)
def main(num_haps,
         num_snps,
         num_ref=0,
         Ne=1e5,
         length=5e3,
         recombination_rate=2e-8,
         mutation_rate=2e-8,
         random_seed=None,
         flip=False,
         verbose=False,
         print_matrices=False):
    # simulate with msprime
    true_haplotypes = np.array([[]])
    while true_haplotypes.shape[1] < num_snps:
        if verbose:
            print('simulating...')
        true_haplotypes = simulate_haplotype_matrix(num_haps,
                                                    num_snps,
                                                    Ne=Ne,
                                                    length=length,
                                                    recombination_rate=recombination_rate,
                                                    mutation_rate=mutation_rate,
                                                    random_seed=random_seed)

    if flip:
        random.seed(a=random_seed)
        column_list = random.choices([0, 1], k=num_haps)
        true_haplotypes = flip_columns(column_list, true_haplotypes)

    genotypes = compress_to_genotype_matrix(true_haplotypes)
    unphased_haplotypes = get_incomplete_phasing_matrix(genotypes)

    unphased_haplotypes = np.vstack([unphased_haplotypes, true_haplotypes[0:num_ref]])
    true_with_ref = np.vstack([true_haplotypes, true_haplotypes[0:num_ref]])

    phased_haplotypes = phission_phase(unphased_haplotypes)

    if verbose:
        print_stats(true_with_ref, unphased_haplotypes, phased_haplotypes, print_matrices=print_matrices)
    return true_haplotypes, phased_haplotypes
def check_color_conv():
    data = dataset.UVectorNetDataset(fold=-1,
                                     batch_size=1,
                                     output_watershed=True,
                                     use_colors=False)

    from skimage.color import rgb2hed
    import skimage.exposure
    import PIL.Image

    for sample_id, img in data.images.items():
        print(sample_id)
        fn = '/home/dmytro/ml/kaggle/2018_data_science_bowl/input/stage1_test/0f1f896d9ae5a04752d3239c690402c022db4d72c0d2c087d73380896f72c466/images/0f1f896d9ae5a04752d3239c690402c022db4d72c0d2c087d73380896f72c466.png'
        img = PIL.Image.open(fn)
        img = np.array(img)[:, :, :3]

        plt.subplot(1, 3, 1)
        utils.print_stats('img', img)
        plt.imshow(img)

        plt.subplot(1, 3, 2)
        preprocessed = data.preprocess(img)[:, :, 0]
        utils.print_stats('preprocessed', preprocessed)
        plt.imshow(preprocessed)

        plt.subplot(1, 3, 3)
        img_hed = rgb2hed(img / 255.0 * 2 - 1)[:, :, 0]
        utils.print_stats('img_hed', img_hed)
        # hed_scaled = skimage.exposure.rescale_intensity(img_hed[:, :, 0]*-1, out_range=(0.0, 1.0))
        # utils.print_stats('img_hed_scaled', img_hed)
        plt.imshow(img_hed * -1)

        plt.show()
def process_po(ref_messages, po, glob_stats, do_stats, do_messages):
    print("Checking {}...".format(po))
    ret = 0

    messages, states, stats = utils.parse_messages(po)
    if do_messages:
        t = print_diff(ref_messages, messages, states)
        if t:
            ret = t
    if do_stats:
        print("\tStats:")
        t = utils.print_stats(stats, glob_stats, prefix="        ")
        if t:
            ret = t
    if states["is_broken"]:
        print("\tERROR! This .po is broken!")
        ret = 1
    return ret
Beispiel #32
0
def main():
    import argparse
    parser = argparse.ArgumentParser(description="" \
                    "Merge one or more .po files into the first dest one.\n" \
                    "If a msgkey (msgctxt, msgid) is present in more than " \
                    "one merged po, the one in the first file wins, unless " \
                    "it’s marked as fuzzy and one later is not.\n" \
                    "The fuzzy flag is removed if necessary.\n" \
                    "All other comments are never modified.\n" \
                    "Commented messages in dst will always remain " \
                    "commented, and commented messages are never merged " \
                    "from sources.")
    parser.add_argument('-s', '--stats', action="store_true",
                        help="Show statistics info.")
    parser.add_argument('-r', '--replace', action="store_true",
                        help="Replace existing messages of same \"level\" already in dest po.")
    parser.add_argument('dst', metavar='dst.po',
                        help="The dest po into which merge the others.")
    parser.add_argument('src', metavar='src.po', nargs='+',
                        help="The po's to merge into the dst.po one.")
    args = parser.parse_args()


    ret = 0
    done_msgkeys = set()
    done_fuzzy_msgkeys = set()
    nbr_merged = 0
    nbr_replaced = 0
    nbr_added = 0
    nbr_unfuzzied = 0

    dst_messages, dst_states, dst_stats = utils.parse_messages(args.dst)
    if dst_states["is_broken"]:
        print("Dest po is BROKEN, aborting.")
        return 1
    if args.stats:
        print("Dest po, before merging:")
        utils.print_stats(dst_stats, prefix="\t")
    # If we don’t want to replace existing valid translations, pre-populate
    # done_msgkeys and done_fuzzy_msgkeys.
    if not args.replace:
        done_msgkeys =  dst_states["trans_msg"].copy()
        done_fuzzy_msgkeys = dst_states["fuzzy_msg"].copy()
    for po in args.src:
        messages, states, stats = utils.parse_messages(po)
        if states["is_broken"]:
            print("\tSrc po {} is BROKEN, skipping.".format(po))
            ret = 1
            continue
        print("\tMerging {}...".format(po))
        if args.stats:
            print("\t\tMerged po stats:")
            utils.print_stats(stats, prefix="\t\t\t")
        for msgkey, val in messages.items():
            msgctxt, msgid = msgkey
            # This msgkey has already been completely merged, or is a commented one,
            # or the new message is commented, skip it.
            if msgkey in (done_msgkeys | dst_states["comm_msg"] | states["comm_msg"]):
                continue
            is_ttip = utils.is_tooltip(msgid)
            # New messages does not yet exists in dest.
            if msgkey not in dst_messages:
                dst_messages[msgkey] = messages[msgkey]
                if msgkey in states["fuzzy_msg"]:
                    done_fuzzy_msgkeys.add(msgkey)
                    dst_states["fuzzy_msg"].add(msgkey)
                elif msgkey in states["trans_msg"]:
                    done_msgkeys.add(msgkey)
                    dst_states["trans_msg"].add(msgkey)
                    dst_stats["trans_msg"] += 1
                    if is_ttip:
                        dst_stats["trans_ttips"] += 1
                nbr_added += 1
                dst_stats["tot_msg"] += 1
                if is_ttip:
                    dst_stats["tot_ttips"] += 1
            # From now on, the new messages is already in dst.
            # New message is neither translated nor fuzzy, skip it.
            elif msgkey not in (states["trans_msg"] | states["fuzzy_msg"]):
                continue
            # From now on, the new message is either translated or fuzzy!
            # The new message is translated.
            elif msgkey in states["trans_msg"]:
                dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
                done_msgkeys.add(msgkey)
                done_fuzzy_msgkeys.discard(msgkey)
                if msgkey in dst_states["fuzzy_msg"]:
                    dst_states["fuzzy_msg"].remove(msgkey)
                    nbr_unfuzzied += 1
                if msgkey not in dst_states["trans_msg"]:
                    dst_states["trans_msg"].add(msgkey)
                    dst_stats["trans_msg"] += 1
                    if is_ttip:
                        dst_stats["trans_ttips"] += 1
                else:
                    nbr_replaced += 1
                nbr_merged += 1
            # The new message is fuzzy, org one is fuzzy too,
            # and this msgkey has not yet been merged.
            elif msgkey not in (dst_states["trans_msg"] | done_fuzzy_msgkeys):
                dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
                done_fuzzy_msgkeys.add(msgkey)
                dst_states["fuzzy_msg"].add(msgkey)
                nbr_merged += 1
                nbr_replaced += 1

    utils.write_messages(args.dst, dst_messages, dst_states["comm_msg"], dst_states["fuzzy_msg"])

    print("Merged completed. {} messages were merged (among which {} were replaced), " \
          "{} were added, {} were \"un-fuzzied\"." \
          "".format(nbr_merged, nbr_replaced, nbr_added, nbr_unfuzzied))
    if args.stats:
        print("Final merged po stats:")
        utils.print_stats(dst_stats, prefix="\t")
    return ret

    if not batch_size:
        filename = paths.SERIALIZED_DIR + "test_batch.bin"
        print "Writing file", filename
        local.serialized.tofile(filename)
    elif local.serialized.any():
        write_batch()


all_data = {}

for sequence in TRAINING_DATA:
    sequence.download()
    data = sequence.read()
    if not all_data:
        all_data = data
    else:
        for key, value in all_data.iteritems():
            all_data[key] = np.concatenate(
                (all_data[key], data[key]),
                axis=0)

if os.path.exists(paths.SERIALIZED_DIR):
    shutil.rmtree(paths.SERIALIZED_DIR)
utils.print_stats(all_data, ['train', 'test'])
print "Serializing.."
utils.create_dirs_if_not_exists(paths.SERIALIZED_DIR)
serialize(all_data['train_dataset'], all_data['train_labels'], batch_size=2000)
serialize(all_data['test_dataset'], all_data['test_labels'])
datasets = [
    ('HeLa2D', 'hela', True, 'slf7dna', False),
    ('RT-widefield-no-origin', 'labeled-widefield', True, 'field-dna+', False),
    ('RT-widefield', 'labeled-widefield', True, 'field-dna+', True),
    ('RT-confocal-no-origin', 'labeled-confocal', True, 'field-dna+', False),
    ('RT-confocal', 'labeled-confocal', True, 'field-dna+', True),
    ('LOCATE-transfected', 'SubCellLoc/Transfected', True, 'field-dna+', False),
    ('LOCATE-endogenous', 'SubCellLoc/Endogenous', True, 'field-dna+', False),
    ('binucleate', 'binucleate', False, 'field+', False),
    ('CHO', 'cho', False, 'field+', False),
    ('Terminal Bulb', 'terminalbulb', False, 'field+', False),
    ('RNAi', 'rnai', False, 'field+', False),
    ]

print_stats(datasets)

run_twice = ['RT-widefield']

compares = []
rt_compares = []
sizes = {}
for name,directory,has_dna,base,use_origins in datasets:
    images = CachedFunction(load_directory,'../data/'+directory)
    sizes[name] = len(images)
    learner = precluster_learner_plus_features(kfrac=4)
    origins = None
    if use_origins:
        origins = [im.origin for im in images]
    labels = [im.label for im in images]
    surfs = ['surf']
Beispiel #35
0
            'duration': self.duration,
            'average speed': self.average_speed,
            'top speed': self.top_speed,
            'energy': self.energy,
            'energy rate': self.energy_rate,
            'peak output power': self.peak_output_power,
            'average motor power': self.average_motor_power,
            'peak regen power': self.peak_regen_power,
            'steepest incline': self.steepest_incline,
            'steepest decline': self.steepest_decline,
        }


if __name__ == '__main__':

    if len(sys.argv) > 1:
        commute = Commute(Car(), sys.argv[1:])

        for track in commute.tracks:
            print 'Track', track.filename
            print_stats(track.stats)
            print

        print 'Total commute'
        print_stats(commute.stats)
    else:
        sys.stderr.write('Usage: %s file.gps [...]\n' % sys.argv[0])
        sys.exit(1)