示例#1
0
def main():
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'i:o:r:c:',
                                   ['images=', 'labels=', 'resize=', 'crop='])
    except getopt.GetoptError as err:
        print(err)
        sys.exit()

    ipath, lpath = '', ''
    resize, crop = [0, 0], [0, 0]

    for o, a in opts:
        if o in ('-i', '--images') and type(a) == str:
            ipath = a
        elif o in ('-l', '--labels') and type(a) == str:
            lpath = a
        elif o in ('-r', '--resize') and type(a) == int:
            resize = [a, a]
        elif o in ('-c', '-crop') and type(a) == int:
            crop = [a, a]
        else:
            assert False, 'unhandled option'

    res = Resnet()
    res.input(ipath=ipath, lpath=lpath, resize=resize, crop=crop)
    res.compile_model()
    res.callback()
    res.train()
示例#2
0
def train():
    video_dir = os.path.join(parentUrl, 'data', 'train', 'lab')
    box_to_vect_dataset = BoxToVectDataSet(
        train_set_file='data/train.npy',
        val_set_file='data/val.npy',
        video_files=[
            os.path.join(video_dir, '4p-c0.avi'),
            os.path.join(video_dir, '4p-c1.avi'),
            os.path.join(video_dir, '4p-c2.avi'),
            os.path.join(video_dir, '4p-c3.avi'),
        ],
        test_set_file=None,
        image_width=Config['image_width'],
        image_height=Config['image_height'])
    box_to_vect = Resnet(
        image_height=Config['image_height'],
        image_width=Config['image_width'],
        vector_dim=128,
        alpha=Config['alpha'],
        feature_map_layer='block_layer3',
        resnet_size=18,
        data_format='channels_first',
        mode='train',
        init_learning_rate=0.001,
        optimizer_name='adam',
        batch_size=Config['batch_size'],
        max_step=Config['max_step'],
        model_path='model/',
        logdir='log/',
    )

    with tf.Session() as sess:
        box_to_vect.train(box_to_vect_dataset, sess)
示例#3
0
文件: main.py 项目: ximzzzzz/SDAMS
def main():
    args = parse_args()
    if args is None:
        exit()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        cnn = Resnet(sess,args)

        cnn.build_model()
        show_all_variables()

        if args.phase=='train':
            cnn.train()
            print('Train finished! \n')

            cnn.test()
            print('Test finished!')

        if args.phase=='test':
            cnn.test()
            print('Test finished!!')
示例#4
0
def update_learning_rate(current_lr, optimizer):
  new_lr = current_lr/10
  for g in optimizer.param_groups:
    g['lr'] = new_lr
  return new_lr



if __name__ == '__main__':
    use_cuda = len(sys.argv) > 1 and sys.argv[1] == 'cuda'
    num_epochs = 80
    # get cifar 10 data
    trainloader, testloader = get_dataset()
    benchmark, debug = False, True
    resnet = Resnet(n=2,dbg=debug)
    resnet.train()
    if use_cuda:
        resnet = resnet.cuda()
        for block in resnet.residual_blocks:
            block.cuda()
    current_lr = 1e-4
#     optimizer = optim.SGD(resnet.parameters(), lr=current_lr, weight_decay=0.0001, momentum=0.9)
    optimizer = optim.Adam(resnet.parameters(), lr=1e-4, weight_decay=0.0001)
    train_accs, test_accs = [], []
    gradient_norms = []
    def train_model():
      current_lr=1e-4
      stopping_threshold, current_count = 3, 0
      n_iters = 0
      for e in range(num_epochs):
        # modify learning rate at 
示例#5
0
aug_train = Augmentation(x_train, y_train, 14400)
x_train, y_train = aug_train.data_augmentation()
print('after augmentation shape')
print(x_train.shape, x_test.shape)

#feature_train = FeatureExtraction(x_train)
#feature_test = FeatureExtraction(x_test)
#gray = feature_train.to_grayscale()
#sat = feature_train.get_color_sat().reshape(-1,1)
#pix = feature_train.get_color_pix_r().reshape(-1,1)
#grad = feature_train.get_img_gradient()
#print(gray.shape, sat.shape, pix.shape, grad.shape)
#x_train = np.concatenate((feature_train.to_grayscale(), feature_train.get_color_sat().reshape(-1,1), feature_train.get_color_pix_r().reshape(-1,1), feature_train.get_img_gradient()),axis = 1)
#x_test = np.concatenate((feature_test.to_grayscale(), feature_test.get_color_sat().reshape(-1,1), feature_test.get_color_pix_r().reshape(-1,1), feature_test.get_img_gradient()),axis = 1)
#print(x_train.shape)
#print(np.concatenate((gray, sat, pix), axis =1).shape)
#x_train = np.concatenate((gray, grad, sat, pix), axis =1)
#print('after data engineering x_train shape, x_test shape')
#print(x_train.shape, x_test.shape)

#x_train, x_test = data_eng.to_standardize(x_train, x_test)
#print('std done')

#x_train,x_test = data_eng.to_PCA(x_train, x_test)
#print('pca done')

res = Resnet(x_train, x_test, y_train, y_test)
accurate = res.train()

print(accurate)
def main():
    model = Resnet().to(device)
    # trained_model = resnet18(pretrained=True)
    # print(trained_model)
    # model = nn.Sequential(*list(trained_model.children())[:-1],#[b, 512, 1, 1]
    #                       Flatten(), # [b, 512 ,1,1] >=[b, 512]
    #                       nn.Linear(512, 10),
    #                       ).to(device)
    print(model)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    criteon = nn.CrossEntropyLoss()

    best_acc, best_epoch = 0, 0
    global_step = 0
    viz.line([0], [0],
             win='loss',
             opts=dict(title='loss', xlabel='batch', ylabel='loss'))
    viz.line([0], [0],
             win='val_acc',
             opts=dict(title='val_acc', xlabel='batch', ylabel='accuracy'))
    for epoch in range(epochs):

        for step, (x, y) in enumerate(train_loader):
            # x: [b, 3, 224, 224], y: [b]
            # print(x.shape,x,y.shape,y)

            x, y = x.to(device), y.to(device)
            model.train()
            logits = model(x)
            # print('logits is:', logits.cpu().detach().numpy())
            loss = criteon(logits, y)

            # print("loss:", loss)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            viz.line([loss.item()], [global_step], win='loss', update='append')
            global_step += 1
            print('已进行到:', batchsz * global_step)

        if epoch % 1 == 0:

            val_acc = evalute(model, val_loader)
            print(val_acc)
            # viz.line([val_acc], [global_step], win='val_acc', update='append')
            if val_acc > best_acc:
                best_epoch = epoch
                best_acc = val_acc

                torch.save(model.state_dict(), 'best_canny.mdl')

            viz.line([val_acc], [global_step], win='val_acc', update='append')

    print('best acc:', best_acc, 'best epoch:', best_epoch)

    model.load_state_dict(torch.load('best_canny.mdl'))
    print('loaded from ckpt!')

    test_acc = evalute(model, test_loader)

    print('test acc:', test_acc)