예제 #1
0
def getPlots(kernel, scheme):

    #Get pairs of C and gamma
    Ns = np.array([100,300,500,700])

    A=np.array([0.0,0.0,0.0,0.0])
    L=np.array([0.0,0.0,0.0,0.0])

    #Record accuracy and runtime for C values
    i=0
    for n in tqdm(Ns, desc=str(kernel)+str(scheme)+' (reduc)', leave=False):
        tic=time.perf_counter()
        A[i] = train2(kernel=kernel, scheme=scheme, n=n)
        tok=time.perf_counter()

        L[i]=tok-tic

        i+=1
    
    plt.plot(Ns,A)
    plt.xlabel('N')
    plt.ylabel('Accuracy')
    plt.title('Kernel: '+str(kernel)+', Scheme: '+str(scheme))

    path='digits/'+str(kernel)+'/'+str(kernel)+str(scheme)+'Nacc'
    plt.savefig(path+'.png')
    plt.clf()

    plt.plot(Ns,L)
    plt.xlabel('N')
    plt.ylabel('Runtime')
    plt.title('Kernel: '+str(kernel)+', Scheme: '+str(scheme))

    path='digits/'+str(kernel)+'/'+str(kernel)+str(scheme)+'Nrun'
    plt.savefig(path+'.png')
    plt.clf()
예제 #2
0
    print(device)

    train_dataset = customized_dataset(df_train, mode='train')
    eval_dataset1 = customized_dataset(df_eval1, mode='eval')
    eval_dataset2 = customized_dataset(df_eval2, mode='eval')
    test_dataset = customized_dataset(df_test, mode='test')

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=False)
    eval_loader1 = DataLoader(eval_dataset1, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=False)
    eval_loader2 = DataLoader(eval_dataset2, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=False)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=False)

    # class_weights for arcface loss
    val_counts = df_train.target.value_counts().sort_index().values
    class_weights = 1/np.log1p(val_counts)
    class_weights = (class_weights / class_weights.sum()) * num_classes
    class_weights = torch.tensor(class_weights, dtype=torch.float32)
    # arcface
    metric_crit = ArcFaceLoss(arcface_s, arcface_m, crit, weight=class_weights, class_weights_norm=class_weights_norm)
    facenet = FaceNet2(num_classes=num_classes, model_name=model_name, pool=pool, embedding_size=embedding_size, dropout=dropout, device=device, pretrain=pretrain)
    optimizer = get_Optimizer2(facenet, metric_crit, optimizer_type, lr, weight_decay) # optimizer
    scheduler = get_Scheduler(optimizer, lr, scheduler_name) # scheduler
    # load previous trained model
    if False:
        facenet, optimizer, scheduler = load(name)
        facenet.to(device)
    # train
    train2(facenet.to(device),train_loader,eval_loader1,eval_loader2,metric_crit,optimizer,scheduler,num_epochs,eval_every,num_classes,device,name)
    dist_threshold = evalulate(facenet, eval_loader1, eval_loader2, device, loss_fn)
    print('Distance threshold:',dist_threshold)
    test(facenet,test_loader,dist_threshold,device, loss_fn)
예제 #3
0
파일: main.py 프로젝트: dozingLee/DRSL
is_shuffle = {'train': True, 'test': False}

dataloaders = {
    x: DataLoader(dataset[x],
                  batch_size=batch_size[x],
                  shuffle=is_shuffle[x],
                  num_workers=1)
    for x in ['train', 'test']
}

dataset_sizes = {x: len(dataset[x]) for x in ['train', 'test']}

model = Model(input_dim_I=4096,
              input_dim_T=300,
              hidden_dim_I=1024,
              hidden_dim_T=1024,
              hidden_dim_R=1024,
              output_dim_I=300,
              output_dim_T=300,
              output_dim_R=1)
model.to(device)

import train

model = train.train2(model,
                     dataloaders,
                     device,
                     dataset_sizes,
                     num_epochs=20,
                     retreival=True)
def main(args):
    try:
        os.makedirs(args.log_dir)
    except:
        pass
    try:
        os.makedirs(args.check_dir)
    except:
        pass

    # Load the data
    X_all, X2_all, Y_all, Y, blockSize = read_data(args.timeFeats, args.data_root_dir, args.file1, args.file2)
    if os.path.isfile(os.path.join(args.save_dir, 'dataSplits_ratio2.npz')):
        with np.load(os.path.join(args.save_dir, 'dataSplits_ratio2.npz'), allow_pickle=True) as data:
            train_ind = data['train_ind']
            val_ind = data['val_ind']
    else:
        train_ind = []
        val_ind = []
        # defining hold-out ratio
        ss = StratifiedShuffleSplit(n_splits=10, test_size=0.1)
        for train_indx, val_indx in ss.split(np.zeros((Y.shape[0], 1)), Y):
            train_ind.append(train_indx)
            val_ind.append(val_indx)
        np.savez_compressed(os.path.join(args.save_dir, 'dataSplits_ratio2.npz'),
                            train_ind=train_ind, val_ind=val_ind)
    # Start with the first fold
    for i in range(10):
        train_indx = train_ind[i]
        val_indx = val_ind[i]
        # Splitting into training & validation
        X_train, y_train = reshapeTimeFeats(X_all, Y_all, train_indx, blockSize)
        X3_train, X_mean, X_std = earlyFusion(X_train)
        y_train = to_categorical(y_train, 2)
        X_val, y_val = reshapeTimeFeats(X_all, Y_all, val_indx, blockSize)
        X3_val, _, _ = earlyFusion(X_val, X_mean, X_std, training=False)
        y_val = to_categorical(y_val, 2)

        # Create the model
        model = create_model(X3_train)
        print(model.summary())

        # Train the model
        model = train2(args, X3_train, X3_val, y_train, y_val, model, i)
        # Save the model
        model.save(os.path.join(args.save_dir, args.model_name + 'fold_' + str(i) + '.h5'))
        # Initializing output variables
        output = []

        # Evaluating the model
        scores = model.evaluate([X3_val], y_val, verbose=2)
        print('Model PSDTime')
        print("Accuracy: %.2f%%" % (scores[1] * 100))
        print(scores)
        for j in range(X3_val.shape[0]):
            tmp1 = X3_val[j, ...]
            tmp1 = np.reshape(tmp1, (1, X3_val.shape[1], X3_val.shape[2]))
            output.append(model.predict(tmp1))
        output = np.array(output)
        output = np.reshape(output, (output.shape[0], output.shape[2]))
        np.savez_compressed(
            os.path.join(args.save_dir, args.model_name + 'fold_' + str(i) + 'output' + '.npz'), block_Scores=scores,
            block_op=output, block_gt=y_val)
        output_reshaped = reshapeOutputs(output)
        output_reshaped2 = probBlockApproach(output, blockSize)
        pred = blockToChannel(output_reshaped, blockSize)
        # Compute different weighting i.e. not majority voting but weighted classification
        pred2 = predictions(output_reshaped2)
        pred3 = predictions2(output_reshaped2)
        [Se, Sp, Pr, acc] = getstats(pred, Y[val_indx])
        [Se2, Sp2, Pr2, acc2] = getstats(pred2, Y[val_indx])
        [Se3, Sp3, Pr3, acc3] = getstats(pred3, Y[val_indx])
        np.savez_compressed(
            os.path.join(args.save_dir, args.model_name + 'fold_' + str(i) + 'Finaloutput' + '.npz'), block_Scores=scores,
            block_op=output_reshaped, block_gt=y_val, pred=pred, gt=Y[val_indx], Se=Se, Sp=Sp, Pr=Pr, acc=acc, pred2=pred2,
            Se2=Se2, Sp2=Sp2, Pr2=Pr2, acc2=acc2, pred3=pred3, Se3=Se3, Sp3=Sp3, Pr3=Pr3, acc3=acc3)
예제 #5
0
def getPlots(kernel, scheme):

    #Get pairs of C and gamma
    Cs = np.arange(.4,2.1,.4)
    Gs = np.arange(.4,2.1,.4)

    A=np.array([0.0,0.0,0.0,0.0,0.0])
    L=np.array([0.0,0.0,0.0,0.0,0.0])

    #Record accuracy and runtime for C values
    i=0
    for c in tqdm(Cs, desc=str(kernel)+str(scheme)+' (C)', leave=False):
        tic=time.perf_counter()
        A[i] = train2(C=c, kernel=kernel, scheme=scheme)
        tok=time.perf_counter()

        L[i]=tok-tic

        i+=1
    
    plt.plot(Cs,A)
    plt.xlabel('C')
    plt.ylabel('Accuracy')
    plt.title('Kernel: '+str(kernel)+', Scheme: '+str(scheme))

    path='digits/'+str(kernel)+'/'+str(kernel)+str(scheme)+'Cacc'
    plt.savefig(path+'.png')
    plt.clf()

    plt.plot(Cs,L)
    plt.xlabel('C')
    plt.ylabel('Runtime')
    plt.title('Kernel: '+str(kernel)+', Scheme: '+str(scheme))

    path='digits/'+str(kernel)+'/'+str(kernel)+str(scheme)+'Crun'
    plt.savefig(path+'.png')
    plt.clf()

    #Repeat for gamma
    i=0
    for g in tqdm(Gs, desc=str(kernel)+str(scheme)+' (gamma)', leave=False):
        tic=time.perf_counter()
        A[i] = train2(gamma=g, kernel=kernel, scheme=scheme)
        tok=time.perf_counter()

        L[i]=tok-tic

        i+=1

    plt.plot(Gs,A)
    plt.xlabel('G')
    plt.ylabel('Accuracy')
    plt.title('Kernel: '+str(kernel)+', Scheme: '+str(scheme))

    path='digits/'+str(kernel)+'/'+str(kernel)+str(scheme)+'Gacc'
    plt.savefig(path+'.png')
    plt.clf()

    plt.plot(Gs,L)
    plt.xlabel('G')
    plt.ylabel('Runtime')
    plt.title('Kernel: '+str(kernel)+', Scheme: '+str(scheme))

    path='digits/'+str(kernel)+'/'+str(kernel)+str(scheme)+'Grun'
    plt.savefig(path+'.png')
    plt.clf()