Example #1
0
def pol_validation(mesh_no, max_nn):

    max_neighb_nodes = np.max(max_nn)

    # Explicit check of fit
    for j in np.arange(4, max_neighb_nodes, 1):
        test_script.test(j, mesh_no)
Example #2
0
def first_move_batter_NN(joints_array_batter,
                         release_frames,
                         model="saved_models/batter_first_step",
                         start_after_release=10,
                         sequence_length=40):
    """
    Neural network method: takes an array of some joint trajectories data,
    cuts it to length 32, starting from 10 frames after the relase frame,
    returns predicted first movement frame index

    joints_array_batter: list or array of size nr_data, nr_frames, nr_joints, nr_cordinates
    (should be smoothed and interpolated) - can be list because different data can have different nr_frames
    release frames: array of size nr_data, required to cut array at the right spot
    """
    # start_after_release = int(model.split("_")[-2])
    # sequence_length = int(model.split("_")[-1])

    # print(start_after_release, sequence_length)
    data = []
    for i, d in enumerate(joints_array_batter):
        cutoff_min = release_frames[i] + start_after_release
        cutoff_max = cutoff_min + sequence_length
        data.append(d[cutoff_min:cutoff_max, :12])
    data = Tools.normalize01(np.array(data))
    lab, out = test(data, model)
    labels = np.asarray(
        lab.reshape(-1)) + np.asarray(release_frames) + start_after_release
    return labels
Example #3
0
def benchmark_stvae(dataset, log_name, cfg, **kwargs):
    ds = dataset
    n_genes = min(ds.X.shape[1], cfg.n_genes)
    expression = np.log(ds.X + 1.)
    scvai_genes, scvai_batches_ind, scvai_labels_ind = get_high_variance_genes(
        expression, ds.batch_indices, ds.labels, n_genes=n_genes, argmax=False)

    cfg.count_classes = np.unique(ds.batch_indices).shape[0]
    cfg.count_labels = np.unique(ds.labels).shape[0]
    cfg.input_dim = int(scvai_genes.shape[1])

    data = load_datasets(cfg, True, True,
                         (scvai_genes, scvai_batches_ind, scvai_labels_ind))
    dataloader_train = data[0]
    dataloader_val = data[1]
    dataloader_test = data[2]
    annot_train = data[3]
    annot_test = data[4]

    styletransfer_test_expr = annot_test.dataset.tensors[0].cpu().numpy()
    styletransfer_test_class = annot_test.dataset.tensors[1].cpu().numpy()
    styletransfer_test_celltype = annot_test.dataset.tensors[2].cpu().numpy()

    model = None
    disc = None

    print('Training...')
    model, disc = train(dataloader_train, dataloader_val, cfg, model, disc)

    print('Tests...')
    print('Dataset:', log_name)
    cfg.classifier_epochs = 1
    res = test(cfg,
               model,
               disc,
               annot_train,
               styletransfer_test_expr,
               styletransfer_test_class,
               styletransfer_test_celltype,
               dataset_name=log_name)

    (Path(cfg.metrics_dir) / 'stVAE').mkdir(parents=True, exist_ok=True)
    with open(Path(cfg.metrics_dir) / 'stVAE/' / (log_name + '.json'),
              'w') as file:
        json.dump(res, file, indent=4)

    del ds
    del model, disc
    del styletransfer_test_expr
    del styletransfer_test_class
    del styletransfer_test_celltype
    del data
    del dataloader_train, dataloader_val, dataloader_test
    del annot_train, annot_test
    del scvai_genes, scvai_batches_ind, scvai_labels_ind
    gc.collect()
    cuda.empty_cache()
Example #4
0
def testing(test_dates, restore_path, start = 0, end=160):
    final_results = []
    final_labels = []

    # Because otherwise there is too much data, we process all videos of one folder at a time,
    # and save the results and labels in final_results and final_labels
    for date in test_dates:
        frames = []
        labels = []
        input_dir= os.path.join(path_input, date, "center field/")
        list_files = listdir(input_dir)
        print("start processing videos of date", date)

        # Get data (array of length nr_videos*(end-start))
        frames, labels = get_test_data(input_dir, csv_path, start=start, end = end)
        for lab in labels:
            final_labels.append(lab)

        # make data array
        leng = end-start
        frames = np.array(frames)
        labels = np.array(labels)
        examples, width, height = frames.shape
        data = np.reshape(frames, (examples, width, height, 1))
        print("Data:", data.shape, "number videos", len(data)/leng, "number labels (must be same as number of videos)", len(labels), "labels",labels.tolist())

        # restore model and predict labels for each frame
        lab, out = test(data, restore_path)

        # TWO POSSIBILITIES:
        # - TAKE FRAME WITH HIGHEST OUTPUT
        # - TAKE FIRST FRAME FOR WHICH THE OUTPUT EXCEEDS A THRESHOLD
        def highest_prob(outputs):
            return np.argmax(outputs)
        def first_over_threshold(outputs, thresh=0.8):
            over_thresh = np.where(outputs>thresh)[0]
            if len(over_thresh)==0:
                return np.nan
            else:
                return over_thresh[0]

        for i in range(int(len(data)/leng)):
            highest = highest_prob(out[leng*i:leng*(i+1), 1]) # get always sequence of (end-start) frames
            # highest = first_over_threshold(out[leng*i:leng*(i+1), 1]) # SECOND POSSIBILITY (see above)
            print("real label:", labels[i], "frame index predicted: ", highest)
            final_results.append(highest)

        print("----------------------------")
        print("finished processing for date", date, "now results:", final_results)
    # Evaluation
    boxplots_testing(final_labels, final_results)
Example #5
0
def testing(data, labels, save_path):
    """
    Tests movement classification model on the first 5% of the data in the csv file (trained on last 95%)
    """

    print("Data shape", data.shape, "Mean of data", np.mean(data))
    tic = time.time()
    labs, out = test(data, save_path)
    toc = time.time()
    print("time for nr labels", len(labs), toc - tic)
    for i in range(20):  #len(labs)):
        print(labs[i], np.around(out[i], 2))

    #  To compare with labels
    print(labels.shape)
    for i in range(20):  #len(labels)):
        print('{:20}'.format(labels[i]), '{:20}'.format(
            labs[i]))  #, ['%.2f        ' % elem for elem in out_test[i]])
    print("Accuracy:", Tools.accuracy(np.asarray(labs), labels))
    print("Balanced accuracy:",
          Tools.balanced_accuracy(np.asarray(labs), labels))
Example #6
0
        return nd.Jacobian(function)(*args)



if __name__ == "__main__":
    # dt = 0.02

    # position_funciton = lambda position, velocity : position + velocity*dt
    # velocity_funciton = lambda velocity, acceleration : velocity + acceleration*dt
    # acceleration_function = lambda acceleration : acceleration

    # def transition_function(state_vector):

    #     position = position_funciton(state_vector[0:2], state_vector[2:4])
    #     velocity = velocity_funciton(state_vector[2:4], state_vector[4:6])
    #     acceleration = acceleration_function(state_vector[4:6])

    #     return_state = np.hstack((position, velocity, acceleration))

    #     return np.reshape(return_state, (-1, 1))

    # def measurement_function(state_vector):
    #     return np.reshape(state_vector[0:2], (-1,1))

    # initial_state = np.asarray([1,2,3,4,5,6]).T

    # print(nd.Jacobian(transition_function)(initial_state))

    from test_script import test
    test(noise = 20, time_max = 3, show_acceleration_plots = False, filter_name = 'extended')
Example #7
0
def benchmark_trvae(dataset, log_name, cfg, **kwargs):
    ds = dataset
    n_genes = min(ds.X.shape[1], cfg.n_genes)

    scvai_genes, scvai_batches_ind, scvai_labels_ind = get_high_variance_genes(
        ds.X,
        ds.batch_indices,
        ds.labels,
        n_genes = n_genes,
        argmax=False
    )
    cfg.count_classes = int(np.max(ds.batch_indices) + 1)
    cfg.count_labels = int(np.max(ds.labels) + 1)
    cfg.input_dim = int(scvai_genes.shape[1])


    data = load_datasets(cfg, True, True,
                         (scvai_genes, scvai_batches_ind, scvai_labels_ind),
                         0.9)
    dataloader_train = data[0]
    dataloader_val = data[1]
    dataloader_test = data[2]
    annot_train = data[3]
    annot_test = data[4]
    x, batch_ind, celltype = annot_train.dataset.tensors
    batch_ind = batch_ind.argmax(dim=1)
    celltype = celltype.argmax(dim=1)

    anndata_train = make_anndata(x.cpu().numpy(),
                                 batch_ind.cpu().numpy(),
                                 'condition',
                                 celltype.cpu().numpy(),
                                 'cell_type')
    x_test, batch_ind_test, celltype_test = annot_test.dataset.tensors
    batch_ind_test = batch_ind_test.argmax(dim=1)
    celltype_test = celltype_test.argmax(dim=1)
    anndata_test = make_anndata(x_test.cpu().numpy(), batch_ind_test.cpu().numpy(),
                                'condition', celltype_test.cpu().numpy(), 'cell_type')
    sc.pp.normalize_per_cell(anndata_train)
    sc.pp.normalize_per_cell(anndata_test)
    sc.pp.log1p(anndata_train)
    sc.pp.log1p(anndata_test)

    n_conditions = anndata_train.obs["condition"].unique().shape[0]
    x_test = anndata_test.X
    batch_ind_test_tmp = anndata_test.obs['condition']
    batch_ind_test = zeros(batch_ind_test_tmp.shape[0], cfg.count_classes)
    batch_ind_test = batch_ind_test.scatter(1, LongTensor(batch_ind_test_tmp.astype('uint16')).view(-1, 1), 1).numpy()
    celltype_test_tmp = anndata_test.obs['cell_type']
    celltype_test = zeros(celltype_test_tmp.shape[0], cfg.count_labels)
    celltype_test = celltype_test.scatter(1, LongTensor(celltype_test_tmp.astype('uint16')).view(-1, 1), 1).numpy()

    model = trVAE(x.shape[1],
                  num_classes=n_conditions,
                  encoder_layer_sizes=[128, 32],
                  decoder_layer_sizes=[32, 128],
                  latent_dim=cfg.bottleneck,
                  alpha=0.0001,
                 )
    trainer = trvaep.Trainer(model, anndata_train)

    print('Training...')
    trainer.train_trvae(cfg.epochs, 512, 50)#n_epochs, batch_size, early_patience

    print('Tests...')
    print('Dataset:', log_name)
    res = test(cfg,
                model, None,
                annot_train,
                x_test,
                batch_ind_test,
                celltype_test
    )
    res['n_genes'] = n_genes

    metrics_path = Path(cfg.metrics_dir) / 'trVAE'
    metrics_path.mkdir(parents=True, exist_ok=True)
    with open(metrics_path / (log_name + '.json'), 'w') as file:
        json.dump(res, file, indent=4)

    del ds
    del model
    del data
    del dataloader_train, dataloader_val, dataloader_test
    del annot_train, annot_test
    del scvai_genes, scvai_batches_ind, scvai_labels_ind
    cuda.empty_cache()
        return self.prioris[-1] + np.matmul(
            kalman_gain_matrix,
            (sensor_readings.reshape(sensor_readings.size, 1) -
             expected_readings))

    def posteriori_error_covariance_calculation(self, kalman_gain_matrix,
                                                predicted_error_covariance):
        return predicted_error_covariance - np.matmul(
            kalman_gain_matrix,
            np.matmul(self.measurement_matrix, predicted_error_covariance))

    def state_update(self, sensor_readings, kalman_gain_matrix,
                     expected_readings, predicted_error_covariance):

        posteriori = self.posteriori_calculation(sensor_readings,
                                                 kalman_gain_matrix,
                                                 expected_readings)
        updated_error_covariance = self.posteriori_error_covariance_calculation(
            kalman_gain_matrix, predicted_error_covariance)

        self.error_covariance = updated_error_covariance

        return posteriori


if __name__ == "__main__":

    from test_script import test

    test(noise=30, show_acceleration_plots=False, time_max=10)
Example #9
0
def benchmark_scgen(dataset, log_name, cfg, **kwargs):
    ds = dataset
    n_genes = min(ds.X.shape[1], cfg.n_genes)

    scvai_genes, scvai_batches_ind, scvai_labels_ind = get_high_variance_genes(
        ds.X, ds.batch_indices, ds.labels, n_genes=n_genes, argmax=False)

    cfg.count_classes = np.unique(ds.batch_indices).shape[0]
    cfg.count_labels = np.unique(ds.labels).shape[0]
    cfg.input_dim = int(scvai_genes.shape[1])

    data = load_datasets(cfg, True, True,
                         (scvai_genes, scvai_batches_ind, scvai_labels_ind))
    dataloader_train = data[0]
    dataloader_val = data[1]
    dataloader_test = data[2]
    annot_train = data[3]
    annot_test = data[4]

    # train data
    x, batch_ind, celltype = annot_train.dataset.tensors
    batch_ind = batch_ind.argmax(dim=1)
    celltype = celltype.argmax(dim=1)
    anndata_train = make_anndata(
        x.cpu().numpy(),
        batch_ind.cpu().numpy(),
        "condition",
        celltype.cpu().numpy(),
        "cell_type",
    )
    sc.pp.normalize_per_cell(anndata_train)
    sc.pp.log1p(anndata_train)
    """
    # validation data
    x_valid, batch_valid, celltype_valid = dataloader_val.dataset.tensors
    batch_valid = batch_valid.argmax(1)
    celltype_valid = celltype_valid.argmax(1)
    anndata_valid = make_anndata(x_valid.cpu().numpy(),
                                 batch_valid.cpu().numpy(), 'condition',
                                 celltype_valid.cpu().numpy(), 'cell_type')
    sc.pp.normalize_per_cell(anndata_valid)
    sc.pp.log1p(anndata_valid)
    """
    # test data
    x_test, batch_ind_test, celltype_test = annot_test.dataset.tensors
    batch_ind_test = batch_ind_test.argmax(dim=1)
    celltype_test = celltype_test.argmax(dim=1)
    anndata_test = make_anndata(
        x_test.cpu().numpy(),
        batch_ind_test.cpu().numpy(),
        "condition",
        celltype_test.cpu().numpy(),
        "cell_type",
    )
    sc.pp.normalize_per_cell(anndata_test)
    sc.pp.log1p(anndata_test)

    n_conditions = anndata_train.obs["condition"].unique().shape[0]
    x_test = anndata_test.X
    batch_ind_test_tmp = anndata_test.obs["condition"]
    batch_ind_test = zeros(batch_ind_test_tmp.shape[0], cfg.count_classes)
    batch_ind_test = batch_ind_test.scatter(
        1,
        LongTensor(batch_ind_test_tmp.astype("uint16")).view(-1, 1),
        1).numpy()
    celltype_test_tmp = anndata_test.obs["cell_type"]
    celltype_test = zeros(celltype_test_tmp.shape[0], cfg.count_labels)
    celltype_test = celltype_test.scatter(
        1,
        LongTensor(celltype_test_tmp.astype("uint16")).view(-1, 1), 1).numpy()

    # model = scgen.VAEArith(x_dimension=cfg.input_dim)
    model = SCGene(cfg.input_dim, latent_dim=cfg.bottleneck, device="cuda")

    print("Training...")
    model.train(train_data=anndata_train, n_epochs=cfg.epochs)

    print("Tests...")

    print("Dataset:", log_name)
    res = test(
        cfg, model, None, annot_train, x_test, batch_ind_test, celltype_test
        # styletransfer_test_expr,
        # styletransfer_test_class,
        # styletransfer_test_celltype
    )

    metrics_path = Path(cfg.metrics_dir) / 'scGen'
    metrics_path.mkdir(parents=True, exist_ok=True)
    with open(metrics_path / (log_name + ".json"), "w") as file:
        json.dump(res, file, indent=4)

    del ds  # , res
    del model
    # del styletransfer_test_expr
    # del styletransfer_test_class
    # del styletransfer_test_celltype
    del data
    del dataloader_train, dataloader_val, dataloader_test
    del annot_train, annot_test
    del scvai_genes, scvai_batches_ind, scvai_labels_ind
    cuda.empty_cache()