Exemplo n.º 1
0
def make_graph_etaphi(arrays, valid_sim_indices, ievt, mask, layered_norm, algo, preprocessing_args):
   
    x = arrays[b'rechit_x'][ievt][mask]
    y = arrays[b'rechit_y'][ievt][mask]
    z = arrays[b'rechit_z'][ievt][mask]
    layer = arrays[b'rechit_layer'][ievt][mask]
    time = arrays[b'rechit_time'][ievt][mask]
    energy = arrays[b'rechit_energy'][ievt][mask]    
    feats = np.stack((x,y,layer,time,energy)).T

    eta = arrays[b'rechit_eta'][ievt][mask]
    phi = arrays[b'rechit_phi'][ievt][mask]
    layer_normed = layer / layered_norm
    
    all_sim_hits = np.unique(valid_sim_indices[ievt].flatten())
    sim_hits_mask = np.zeros(arrays[b'rechit_z'][ievt].size, dtype=np.bool)
    sim_hits_mask[all_sim_hits] = True
    simmatched = np.where(sim_hits_mask[mask])[0]
    
    if algo == 'kdtree':
        Ri, Ro, y_label = algo_kdtree(np.stack((x,y,layer)).T, layer, simmatched, **preprocessing_args)
    elif algo == 'knn':
        Ri, Ro, y_label = algo_knn(np.stack((x,y,layer)).T, layer, simmatched, **preprocessing_args)
    else:
        raise Exception('Edge construction algo %s unknown' % algo)
    
    return Graph(feats, Ri, Ro, y_label, simmatched)
Exemplo n.º 2
0
def main():
    import argparse

    parser = argparse.ArgumentParser(description="Evaluate model on dataset")
    parser.add_argument("dataset", choices=["casia-b"])
    parser.add_argument("weights_path")
    parser.add_argument("data_path")
    parser.add_argument("--network_name", default="resgcn-n39-r8")
    parser.add_argument("--sequence_length", type=int, default=60)
    parser.add_argument("--batch_size", type=int, default=256)
    parser.add_argument("--embedding_layer_size", type=int, default=128)
    parser.add_argument("--use_multi_branch", action="store_true")
    parser.add_argument("--shuffle", action="store_true")

    opt = parser.parse_args()

    # Config for dataset
    graph = Graph("coco")
    dataset_class = dataset_factory(opt.dataset)
    evaluation_fn = None
    if opt.dataset == "casia-b":
        evaluation_fn = _evaluate_casia_b

    # Load data
    dataset = dataset_class(
        opt.data_path,
        train=False,
        sequence_length=opt.sequence_length,
        transform=transforms.Compose([
            SelectSequenceCenter(opt.sequence_length),
            ShuffleSequence(opt.shuffle),
            MultiInput(graph.connect_joint, opt.use_multi_branch),
            ToTensor()
        ]),
    )
    data_loader = DataLoader(dataset, batch_size=opt.batch_size)
    print(f"Data loaded: {len(data_loader)} batches")

    # Init model
    model, model_args = get_model_resgcn(graph, opt)

    if torch.cuda.is_available():
        model.cuda()

    # Load weights
    checkpoint = torch.load(opt.weights_path)
    model.load_state_dict(checkpoint["model"])

    result, accuracy_avg, sub_accuracies, dataframe = evaluate(data_loader,
                                                               model,
                                                               evaluation_fn,
                                                               use_flip=True)

    print("\n")
    print((dataframe * 100).round(2))
    print(f"AVG: {accuracy_avg*100} %")
    print("=================================")
    print((dataframe * 100).round(1).to_latex())
    print((dataframe * 100).round(1).to_markdown())
def construct_graph(hits, layer_pairs, phi_slope_max, z0_max, feature_names,
                    feature_scale):
    """Construct one graph (e.g. from one event)"""
    # Loop over layer pairs and construct segments
    layer_groups = hits.groupby('layer')
    segments = []
    for (layer1, layer2) in layer_pairs:
        # Find and join all hit pairs
        try:
            hits1 = layer_groups.get_group(layer1)
            hits2 = layer_groups.get_group(layer2)
        # If an event has no hits on a layer, we get a KeyError.
        # In that case we just skip to the next layer pair
        except KeyError as e:
            logging.info('skipping empty layer: %s' % e)
            continue
        # Construct the segments
        segments.append(select_segments(hits1, hits2, phi_slope_max, z0_max))
    # Combine segments from all layer pairs
    segments = pd.concat(segments)

    # Prepare the graph matrices
    n_hits = hits.shape[0]
    n_edges = segments.shape[0]
    X = (hits[feature_names].values / feature_scale).astype(np.float32)
    Ri = np.zeros((n_hits, n_edges), dtype=np.uint8)
    Ro = np.zeros((n_hits, n_edges), dtype=np.uint8)
    # y = np.zeros(n_edges, dtype=np.float32)
    y = hits['noise']
    #print(y)
    # I = hits['noise']

    # We have the segments' hits given by dataframe label,
    # so we need to translate into positional indices.
    # Use a series to map hit label-index onto positional-index.
    hit_idx = pd.Series(np.arange(n_hits), index=hits.index)
    seg_start = hit_idx.loc[segments.index_1].values
    seg_end = hit_idx.loc[segments.index_2].values

    # Now we can fill the association matrices.
    # Note that Ri maps hits onto their incoming edges,
    # which are actually segment endings.
    Ri[seg_end, np.arange(n_edges)] = 1
    Ro[seg_start, np.arange(n_edges)] = 1
    # Fill the segment labels
    pid1 = hits.particle_id.loc[segments.index_1].values
    pid2 = hits.particle_id.loc[segments.index_2].values
    # y[:] = (pid1 == pid2)
    # Return a tuple of the results
    return Graph(X, Ri, Ro, y)
Exemplo n.º 4
0
def make_graph_noedge(arrays, valid_sim_indices, ievt, mask,layered_norm=None, algo=None, preprocessing_args=None):
    x = arrays[b'rechit_x'][ievt][mask]
    y = arrays[b'rechit_y'][ievt][mask]
    z = arrays[b'rechit_z'][ievt][mask]
    layer = arrays[b'rechit_layer'][ievt][mask]
    time = arrays[b'rechit_time'][ievt][mask]
    energy = arrays[b'rechit_energy'][ievt][mask]    
    feats = np.stack((x,y,z,layer,time,energy)).T
    
    all_sim_hits = np.unique(valid_sim_indices[ievt].flatten())
    sim_hits_mask = np.zeros(arrays[b'rechit_z'][ievt].size)
    sim_hits_mask[all_sim_hits] = 1
    
    y_label = sim_hits_mask[mask]
    simmatched = np.where(sim_hits_mask[mask])[0]

    return Graph(feats, [], [], y_label, simmatched)
Exemplo n.º 5
0
def construct_output_graph(hits, edges, feature_names):
    # Prepare the graph matrices
    n_hits = hits.shape[0]
    n_edges = edges.shape[0]
    X = (hits[feature_names].values).astype(np.float32)
    Ri = np.zeros((n_hits, n_edges), dtype=np.float32)
    Ro = np.zeros((n_hits, n_edges), dtype=np.float32)
    y = np.zeros(n_edges, dtype=np.float32)
    # We have the segments' hits given by dataframe label,
    # so we need to translate into positional indices.
    # Use a series to map hit label-index onto positional-index.
    hit_idx = pd.Series(np.arange(n_hits), index=hits.index)
    seg_start = hit_idx.loc[edges.edge_index_p].values
    seg_end = hit_idx.loc[edges.edge_index_c].values
    # Now we can fill the association matrices.
    # Note that Ri maps hits onto their incoming edges,
    # which are actually segment endings.
    Ri[seg_end, np.arange(n_edges)] = 1
    Ro[seg_start, np.arange(n_edges)] = 1
    # Fill the segment labels
    pid1 = hits.track.loc[edges.edge_index_p].values
    pid2 = hits.track.loc[edges.edge_index_c].values
    y[:] = ((pid1 == pid2) & (pid1 != -1))
    return Graph(X, Ri, Ro, y)
Exemplo n.º 6
0
def main(opt):
    opt = setup_environment(opt)
    graph = Graph("coco")

    # Dataset
    transform = transforms.Compose([
        MirrorPoses(opt.mirror_probability),
        FlipSequence(opt.flip_probability),
        RandomSelectSequence(opt.sequence_length),
        ShuffleSequence(opt.shuffle),
        PointNoise(std=opt.point_noise_std),
        JointNoise(std=opt.joint_noise_std),
        MultiInput(graph.connect_joint, opt.use_multi_branch),
        ToTensor()
    ], )

    dataset_class = dataset_factory(opt.dataset)
    dataset = dataset_class(
        opt.train_data_path,
        train=True,
        sequence_length=opt.sequence_length,
        transform=TwoNoiseTransform(transform),
    )

    dataset_valid = dataset_class(
        opt.valid_data_path,
        sequence_length=opt.sequence_length,
        transform=transforms.Compose([
            SelectSequenceCenter(opt.sequence_length),
            MultiInput(graph.connect_joint, opt.use_multi_branch),
            ToTensor()
        ]),
    )

    train_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batch_size,
        num_workers=opt.num_workers,
        pin_memory=True,
        shuffle=True,
    )

    val_loader = torch.utils.data.DataLoader(
        dataset_valid,
        batch_size=opt.batch_size_validation,
        num_workers=opt.num_workers,
        pin_memory=True,
    )

    # Model & criterion
    model, model_args = get_model_resgcn(graph, opt)
    criterion = SupConLoss(temperature=opt.temp)

    print("# parameters: ", count_parameters(model))

    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model, opt.gpus)

    if opt.cuda:
        model.cuda()
        criterion.cuda()

    # Trainer
    optimizer, scheduler, scaler = get_trainer(model, opt, len(train_loader))

    # Load checkpoint or weights
    load_checkpoint(model, optimizer, scheduler, scaler, opt)

    # Tensorboard
    writer = SummaryWriter(log_dir=opt.tb_path)

    sample_input = torch.zeros(opt.batch_size, model_args["num_input"],
                               model_args["num_channel"], opt.sequence_length,
                               graph.num_node).cuda()
    writer.add_graph(model, input_to_model=sample_input)

    best_acc = 0
    loss = 0
    for epoch in range(opt.start_epoch, opt.epochs + 1):
        # train for one epoch
        time1 = time.time()
        loss = train(train_loader, model, criterion, optimizer, scheduler,
                     scaler, epoch, opt)

        time2 = time.time()
        print(f"epoch {epoch}, total time {time2 - time1:.2f}")

        # tensorboard logger
        writer.add_scalar("loss/train", loss, epoch)
        writer.add_scalar("learning_rate", optimizer.param_groups[0]["lr"],
                          epoch)

        # evaluation
        result, accuracy_avg, sub_accuracies, dataframe = evaluate(
            val_loader, model, opt.evaluation_fn, use_flip=True)
        writer.add_text("accuracy/validation", dataframe.to_markdown(), epoch)
        writer.add_scalar("accuracy/validation", accuracy_avg, epoch)
        for key, sub_accuracy in sub_accuracies.items():
            writer.add_scalar(f"accuracy/validation/{key}", sub_accuracy,
                              epoch)

        print(f"epoch {epoch}, avg accuracy {accuracy_avg:.4f}")
        is_best = accuracy_avg > best_acc
        if is_best:
            best_acc = accuracy_avg

        if opt.tune:
            tune.report(accuracy=accuracy_avg)

        if epoch % opt.save_interval == 0 or (
                is_best and epoch > opt.save_best_start * opt.epochs):
            save_file = os.path.join(
                opt.save_folder,
                f"ckpt_epoch_{'best' if is_best else epoch}.pth")
            save_model(model, optimizer, scheduler, scaler, opt, opt.epochs,
                       save_file)

    # save the last model
    save_file = os.path.join(opt.save_folder, "last.pth")
    save_model(model, optimizer, scheduler, scaler, opt, opt.epochs, save_file)

    log_hyperparameter(writer, opt, best_acc, loss)

    print(f"best accuracy: {best_acc*100:.2f}")
def construct_graph(hits, layer_pairs, phi_slope_max, z0_max, feature_names,
                    feature_scale):
    """Construct one graph (e.g. from one event)"""

    # Loop over layer pairs and construct segments
    layer_groups = hits.groupby('layer')
    #remove later
    #layer_groups = layer_groups.append(pd.DataFrame([[1, 2], [3, 4]][[0,0],[1,1],[2,2],[3,3]]))
    segments = []
    for (layer1, layer2) in layer_pairs:
        # Find and join all hit pairs
        try:
            hits1 = layer_groups.get_group(layer1)
            hits2 = layer_groups.get_group(layer2)
        # If an event has no hits on a layer, we get a KeyError.
        # In that case we just skip to the next layer pair
        except KeyError as e:
            logging.info('skipping empty layer: %s' % e)
            continue

        # Start with all possible pairs of hits
        keys = ['evtid', 'particle_id', 'r', 'phi', 'z']
        hit_pairs = hits1[keys].reset_index().merge(hits2[keys].reset_index(),
                                                    on='evtid',
                                                    suffixes=('_1', '_2'))
        hit_pairs = hit_pairs[hit_pairs.index_1 != hit_pairs.index_2]
        #print("Adding hit_pairs:", hit_pairs[['index_1', 'index_2']].head(200))

        # Compute line through the points
        dphi = calc_dphi(hit_pairs.phi_1, hit_pairs.phi_2)
        dz = hit_pairs.z_2 - hit_pairs.z_1
        dr = hit_pairs.r_2 - hit_pairs.r_1
        dR = np.sqrt(dr**2 + dz**2)
        phi_slope = dphi / dr
        z0 = hit_pairs.z_1 - hit_pairs.r_1 * dz / dr

        # Filter segments according to criteria
        good_seg_mask = (phi_slope.abs() < phi_slope_max) & (z0.abs() < z0_max)
        if (layer1 == layer2):
            good_seg_mask = (good_seg_mask & (dR < 24))
            #good_seg_mask = (good_seg_mask & (dphi > 0))
        hit_pairs = hit_pairs[['index_1', 'index_2']][good_seg_mask]
        if (layer1 == layer2):
            hit_pairs['adjacent'] = 0
        else:
            hit_pairs['adjacent'] = 1

        # Construct the segments
        segments.append(hit_pairs)

    # Combine segments from all layer pairs
    segments = pd.concat(segments)

    # Prepare the graph matrices
    n_hits = hits.shape[0]
    n_edges = segments.shape[0]
    X = (hits[feature_names].values / feature_scale).astype(np.float32)
    Ri = np.zeros((n_hits, n_edges), dtype=np.uint8)
    Ro = np.zeros((n_hits, n_edges), dtype=np.uint8)
    y = np.zeros(n_edges, dtype=np.float32)

    # We have the segments' hits given by dataframe label,
    # so we need to translate into positional indices.
    # Use a series to map hit label-index onto positional-index.
    hit_idx = pd.Series(np.arange(n_hits), index=hits.index)
    seg_start = hit_idx.loc[segments.index_1].values
    seg_end = hit_idx.loc[segments.index_2].values

    # Now we can fill the association matrices.
    # Note that Ri maps hits onto their incoming edges,
    # which are actually segment endings.
    Ri[seg_end, np.arange(n_edges)] = 1
    Ro[seg_start, np.arange(n_edges)] = 1
    # Fill the segment labels
    pid1 = hits.particle_id.loc[segments.index_1].values
    pid2 = hits.particle_id.loc[segments.index_2].values
    y[:] = (pid1 == pid2)
    # Return a tuple of the results
    return Graph(X, Ri, Ro, y)