def train_P_net(image_set, root_path, dataset_path, prefix, end_epoch,
                frequent, lr):
    imdb = IMDB(data_name, image_set, root_path, dataset_path)
    gt_imdb = imdb.gt_imdb()
    gt_imdb = imdb.append_flipped_images(gt_imdb)
    sym = P_Net

    train_net(sym, prefix, end_epoch, gt_imdb, 12, frequent, lr)
Ejemplo n.º 2
0
def train_R_net(image_set, root_path, dataset_path, prefix, ctx,
                pretrained, epoch, begin_epoch, end_epoch, batch_size, thread_num,
                frequent, lr, lr_epoch, resume):
    imdb = IMDB("mtcnn", image_set, root_path, dataset_path, 'train')
    gt_imdb = imdb.get_annotations()
    sym = R_Net()

    train_net(sym, prefix, ctx, pretrained, epoch, begin_epoch, end_epoch, gt_imdb, batch_size, thread_num,
              24, True, True, False, frequent, not resume, lr, lr_epoch)
Ejemplo n.º 3
0
def train_O_net(image_set, root_path, dataset_path, prefix, ctx,
                pretrained, epoch, begin_epoch, end_epoch, batch_size, thread_num, 
                frequent, lr,lr_epoch, resume, with_landmark):
    imdb = IMDB("mtcnn", image_set, root_path, dataset_path, 'train')
    gt_imdb = imdb.get_annotations()
    sym = O_Net('train',with_landmark)

    train_net(sym, prefix, ctx, pretrained, epoch, begin_epoch, end_epoch, gt_imdb, batch_size, thread_num,
              48, True, True, with_landmark, frequent, not resume, lr, lr_epoch)
Ejemplo n.º 4
0
def train_O_net(image_set, root_path, dataset_path, prefix, ctx, pretrained,
                epoch, begin_epoch, end_epoch, frequent, lr, resume):
    imdb = IMDB("mtcnn", image_set, root_path, dataset_path)
    gt_imdb = imdb.gt_imdb()
    gt_imdb = imdb.append_flipped_images(gt_imdb)
    sym = O_Net()

    train_net(sym, prefix, ctx, pretrained, epoch, begin_epoch, end_epoch,
              gt_imdb, 48, frequent, not resume, lr)
Ejemplo n.º 5
0
def train_imagenet(anno_file, color_mode, num_classes, prefix, ctx,
                pretrained, epoch, begin_epoch, end_epoch, batch_size, thread_num, 
                frequent, lr,lr_epoch, resume):
    imdb = IMDB(anno_file)
    gt_imdb = imdb.get_annotations()
    sym = mymodel.get_symbol(num_classes)

    train_net(sym, prefix, ctx, pretrained, epoch, begin_epoch, end_epoch, gt_imdb, color_mode, batch_size, thread_num,
              frequent, not resume, lr, lr_epoch)
Ejemplo n.º 6
0
def train_O_net(image_set, root_path, dataset_path, prefix, ctx,
                pretrained, epoch, begin_epoch,
                end_epoch, frequent, lr, resume):
    imdb = IMDB("mtcnn", image_set, root_path, dataset_path)
    gt_imdb = imdb.gt_imdb()
    gt_imdb = imdb.append_flipped_images(gt_imdb)
    sym = O_Net()

    train_net(sym, prefix, ctx, pretrained, epoch, begin_epoch, end_epoch, gt_imdb,
              48, frequent, not resume, lr)
Ejemplo n.º 7
0
def train_R_net(image_set, root_path, dataset_path, prefix, ctx,
                pretrained, epoch, begin_epoch, end_epoch, batch_size, thread_num,
                frequent, lr, lr_epoch, resume):
    imdb = IMDB("mtcnn", image_set, root_path, dataset_path)
    gt_imdb = imdb.gt_imdb()
    gt_imdb = imdb.append_flipped_images(gt_imdb)
    sym = R_Net()

    train_net(sym, prefix, ctx, pretrained, epoch, begin_epoch, end_epoch, gt_imdb, batch_size, thread_num,
              24, frequent, not resume, lr, lr_epoch)
Ejemplo n.º 8
0
def run_pat():
    #################################
    # variable you want to set
    #################################
    # set the path for the log file
    basedir = "/mnt/disk1/bugfree/PAT"
    basename = "pat1m-sgd"
    date = time.strftime("%d-%m-%Y")
    basename += "-" + date + "-"
    # fname_train = '/home/bugfree/Workspace/caffe/python_ext/data/pat/pat_1m_train.txt'
    # fname_test = '/home/bugfree/Workspace/caffe/python_ext/data/pat/pat_1w_test.txt'
    # fcpickle_train = '/home/bugfree/Workspace/caffe/python_ext/data/pat/pat_1m_train.cpickle'
    # fcpickle_test = '/home/bugfree/Workspace/caffe/python_ext/data/pat/pat_1w_test.cpickle'

    # fname_train = basedir + "/data/train_data_orig_scale.txt"
    # fname_test = basedir + "/data/test_data_orig_scale.txt"
    fcpickle_train = basedir + "/data/train_data_orig_scale.cpickle"
    fcpickle_test = basedir + "/data/test_data_orig_scale.cpickle"
    input_dim = 300
    output_dim = 1
    train_batch_size = 128
    test_batch_size = 128
    test_iter = [100]
    test_interval = 100000
    lr_policy = "step"
    lr = 0.00002
    gamma = 0.5
    snapshot = 1000000
    stepsize = 2000000
    maxiter = 10000000


    # initialize the log file
    caffe.init_glog()

    if not os.path.exists(basedir):
        os.mkdir(basedir)


    # train_provider = data_provider(fname_train,save_cpickle=1, batch_size=train_batch_size)
    # test_provider = data_provider(fname_test,save_cpickle=1, batch_size=test_batch_size)
    train_provider = data_provider('',fcpickle=fcpickle_train, batch_size=train_batch_size)
    test_provider = data_provider('',fcpickle=fcpickle_test, batch_size=test_batch_size)
    # net = pat_net(300,33,train_batch_size,test_batch_size)
    # net = pat_net(input_dim,output_dim,train_batch_size,test_batch_size)
    net = pat_net_one_relu(input_dim,output_dim,train_batch_size,test_batch_size)
    expname = basename + "%g" % (lr)
    out_dir = "%s/%s/" %(basedir, expname)
    solver = get_sgd_solver(test_iter=test_iter, test_interval=test_interval,\
                            snapshot_prefix=out_dir, lr_policy="step", \
                            base_lr=lr, stepsize=stepsize, maxiter=maxiter,\
                            snapshot=snapshot, gamma=gamma)
    train_net(solver=solver, net=net, data_provider=[train_provider,test_provider],\
              output_dir=out_dir, maxiter=maxiter,log_to_file=True)
Ejemplo n.º 9
0
def train_GA_net(mode, image_set, root_path, dataset_path, prefix, ctx,
                 pretrained, epoch, begin_epoch, end_epoch, batch_size,
                 thread_num, frequent, lr, lr_epoch, resume):
    imdb = IMDB("GA", 112, image_set, root_path, dataset_path)
    gt_imdb = imdb.gt_imdb()
    gt_imdb = imdb.append_flipped_images(gt_imdb)
    sym = GA_Net112(mode, batch_size)

    train_net(mode, sym, prefix, ctx, pretrained, epoch, begin_epoch,
              end_epoch, gt_imdb, batch_size, thread_num, 112, 112, frequent,
              not resume, lr, lr_epoch)
Ejemplo n.º 10
0
def train(args, dataset_cfg, model_cfg, output_path):
    """ Training function. """
    # Update model config from the resume path (only in resume mode)
    if args.resume:
        if os.path.realpath(output_path) != os.path.realpath(args.resume_path):
            record_config(model_cfg, dataset_cfg, output_path)
        
    # First time, then write the config file to the output path
    else:
        record_config(model_cfg, dataset_cfg, output_path)

    # Launch the training
    train_net(args, dataset_cfg, model_cfg, output_path)
Ejemplo n.º 11
0
def main():
    '''main function'''
    parser = argparse.ArgumentParser()
    parser.add_argument('--state', default='test', help='train or test')
    parser.add_argument('--n_in', default=3, type=int, help='输入层大小')
    parser.add_argument('--n_hide', default=5, type=int, help='隐藏层大小')
    parser.add_argument('--n_out', default=1, type=int, help='输出层大小')
    parser.add_argument('--epoch', default=100000, type=int, help='训练次数')
    parser.add_argument('--lr', default=0.001, help='学习速率')
    parser.add_argument('--data', default='train.csv', help='训练数据集')
    parser.add_argument('--checkpoint',
                        default='ckpt\\model.ckpt',
                        help='持久化文件名')
    opt = parser.parse_args()
    print(opt)

    if opt.state == 'train':
        model = train.train_net(opt.n_in, opt.n_hide, opt.n_out,
                                opt.checkpoint, opt.epoch, opt.lr)
        x, y = get_samples(opt.data, opt.n_in, opt.n_out)
        model.train(x, y)

    elif opt.state == 'test':
        test = Test(opt.n_in, opt.n_hide, opt.n_out, opt.checkpoint)
        # 14.61,13.49,22.67,17.81
        x = np.array([[14.61, 13.49, 22.67]], dtype=np.float32)
        test.test(x)

    else:
        print('Error state, must choose from train and eval!')
Ejemplo n.º 12
0
def train_normal():
    simple_train_savepath = "/home/oole/tf_normal_save/tfnet_full"

    initial_epoch = 0

    train_datapath = "/home/oole/Data/training/patient_patches_jpg"
    # train_datapath = '/home/oole/tf_test_data/validation'
    val_datapath = "/home/oole/Data/validation/patient_patches_jpg"

    logfile_path = "/home/oole/tfnetsave/tfnet_em_full_log.csv"
    logreg_savepath = "/home/oole/tfnetsave/tfnet_em_full_logreg"

    model_name = "model"

    train_slidelist, train_slide_dimensions, old_disc_patches, _ = data_tf.collect_data(
        train_datapath, batch_size)
    val_slidelist, _, _, _ = data_tf.collect_data(val_datapath, batch_size)

    train_patches = dataset.slidelist_to_patchlist(train_slidelist)
    val_patches = dataset.slidelist_to_patchlist(val_slidelist)
    np.random.shuffle(train_patches)
    np.random.shuffle(val_patches)

    train_accuracy, val_accuracy = train.train_net(
        train_patches,
        val_patches,
        num_epochs=20,
        batch_size=batch_size,
        savepath=simple_train_savepath,
        do_augment=True,
        model_name=model_name)
Ejemplo n.º 13
0
def continue_simple_training():
    epochs = 100
    initialEpoch = 100

    netRoot = "/home/oole/lymphoma_net/"
    runName = "lymphoma_simple_180907_cont_sanity/"
    modelName = "lymph_model"

    if not os.path.exists(netRoot):
        os.makedirs(netRoot)
    else:
        print("Net root folder already extists.")
    if not os.path.exists(netRoot + runName):
        os.makedirs(netRoot + runName)
    else:
        print("Run folder already extists.")

    old_simple_savepath = netRoot + "lymphoma_simple_180907/" + "lymph_simple"
    simple_cont_savepath = netRoot + runName + modelName
    logfile_path = netRoot + runName + "lymph_net_log.csv"
    logreg_savepath = netRoot + runName + "lymph_logreg"

    # load data
    # split into train val
    basePath = "/home/oole/data_lymphoma/"
    trainDataPath = basePath + "train/"
    testDataPath = basePath + "test/"
    trainSlideData = ldata.collect_data(trainDataPath)
    testSlideData = ldata.collect_data(testDataPath)

    train.train_net(trainSlideData,
                    testSlideData,
                    num_epochs=epochs,
                    batch_size=BATCH_SIZE,
                    savepath=simple_cont_savepath,
                    do_augment=True,
                    model_name=modelName,
                    getlabel_train=ldata.getlabel,
                    log_savepath=logreg_savepath,
                    runName=runName,
                    lr=LEARNING_RATE,
                    buildNet=lnet.getLymphNet,
                    valIsTestData=True,
                    initialEpoch=initialEpoch,
                    loadpath=old_simple_savepath)

    print("Data collected.")
Ejemplo n.º 14
0
def main():
    # parser = argparse.ArgumentParser()
    # parser.add_argument('mode', type=string)
    # args = parser.parse_args()
    # print args.mode
    sess = U.single_threaded_session()
    sess.__enter__()
    set_global_seeds(0)

    dir_name = "training_images"

    cur_dir = get_cur_dir()
    img_dir = osp.join(cur_dir, dir_name)
    header("Load model")
    mynet = mymodel(name="mynet", img_shape = [210, 160, 1], latent_dim = 2048)
    header("Load model")
    train_net(model = mynet, img_dir = img_dir)
def continue_simple_training(initialEpoch, epochNumber):

    netRoot = "/home/oole/lymphoma_net_vgg/"
    runName = BASENAME + "_simple_" + SIMPLERUNSTAMP + "_cont/"
    modelName = BASENAME + "_model"

    if not os.path.exists(netRoot):
        os.makedirs(netRoot)
    else:
        print("Net root folder already extists.")
    if not os.path.exists(netRoot + runName):
        os.makedirs(netRoot + runName)
    else:
        print("Run folder already extists.")

    old_simple_savepath = netRoot + runName + BASENAME + "_simple"
    simple_cont_savepath = netRoot + runName + modelName
    logfile_path = netRoot + runName + BASENAME + "_net_log_em.csv"
    logreg_savepath = netRoot + runName + BASENAME + "_logreg"

    # load data
    # split into train val
    basePath = "/home/oole/data_lymphoma/"
    trainDataPath = basePath + "train/"
    testDataPath = basePath + "test/"
    trainSlideData = ldata.collect_data(trainDataPath)
    testSlideData = ldata.collect_data(testDataPath)

    train.train_net(trainSlideData,
                    testSlideData,
                    num_epochs=epochNumber,
                    batch_size=BATCH_SIZE,
                    savepath=simple_cont_savepath,
                    do_augment=True,
                    model_name=modelName,
                    getlabel_train=ldata.getlabel,
                    log_savepath=logreg_savepath,
                    runName=runName,
                    lr=LEARNING_RATE,
                    buildNet=lnet.getLymphNet,
                    valIsTestData=True,
                    initialEpoch=initialEpoch,
                    loadpath=old_simple_savepath,
                    splitSeed=SPLIT_SEED)

    print("Data collected.")
Ejemplo n.º 16
0
def simple_training(numberOfEpochs=2):
    initialEpoch = 0
    epochs = numberOfEpochs

    netRoot = "/home/oole/lymphoma_net_vgg/"
    runName = BASENAME + "_simple_" + RUNSTAMP + "/"
    modelName = BASENAME + "_model"

    if not os.path.exists(netRoot):
        os.makedirs(netRoot)
    else:
        print("Net root folder already extists.")
    if not os.path.exists(netRoot + runName):
        os.makedirs(netRoot + runName)
    else:
        print("Run folder already extists.")

    simple_train_savepath = netRoot + runName + BASENAME + "_simple"
    em_train_savepath = netRoot + runName + BASENAME + "_em"
    logfile_path = netRoot + runName + BASENAME + "_net_log.csv"
    logreg_savepath = netRoot + runName + BASENAME + "_logreg"

    # load data
    # split into train val
    basePath = "/home/oole/data_lymphoma/"
    trainDataPath = basePath + "train/"
    testDataPath = basePath + "test/"
    trainSlideData = ldata.collect_data(trainDataPath)
    testSlideData = ldata.collect_data(testDataPath)

    train.train_net(trainSlideData,
                    testSlideData,
                    num_epochs=epochs,
                    batch_size=BATCH_SIZE,
                    savepath=simple_train_savepath,
                    do_augment=True,
                    model_name=modelName,
                    getlabel_train=ldata.getlabel,
                    log_savepath=logreg_savepath,
                    runName=runName,
                    lr=LEARNING_RATE,
                    buildNet=lnet.getLymphNet,
                    valIsTestData=True)

    print("Data collected.")
Ejemplo n.º 17
0
def test_training(d, k):
    def nth_dim_positive_data(n, d, k):
        data = torch.randn(d, k)
        u = torch.cat([torch.clamp(torch.sign(data[2:3]), min=0), data])
        return u.t()

    train = nth_dim_positive_data(2, d, k)
    dev = nth_dim_positive_data(2, d, 500)
    #test = nth_dim_positive_data(2, d, 500)
    classifier = DropoutClassifier(d, 100, 2)
    train_net(classifier,
              train,
              dev,
              tensor_batcher,
              batch_size=96,
              n_epochs=30,
              learning_rate=0.001,
              verbose=True)
Ejemplo n.º 18
0
def simple_training():
    epochs = 2
    initialEpoch = 0

    netRoot = "/home/oole/breasthistology_net/"
    runName = "breasthistology_simple_testing/"
    modelName = "breasthistology_model"

    if not os.path.exists(netRoot):
        os.makedirs(netRoot)
    else:
        print("Net root folder already extists.")
    if not os.path.exists(netRoot + runName):
        os.makedirs(netRoot + runName)
    else:
        print("Run folder already extists.")

    simple_train_savepath = netRoot + runName + "breasthistology_simple"
    em_train_savepath = netRoot + runName + "breasthistology_em"
    logfile_path = netRoot + runName + "breasthistology_net_log.csv"
    logreg_savepath = netRoot + runName + "breasthistology_logreg"

    # load data
    # split into train val
    trainDataPath = "/home/oole/breasthistology/training/"
    testDataPath = "/home/oole/breasthistology/testing/"
    trainSlideData = bdata.collect_data(trainDataPath)
    testSlideData = bdata.collect_data(testDataPath)

    train.train_net(trainSlideData,
                    testSlideData,
                    num_epochs=epochs,
                    batch_size=BATCH_SIZE,
                    savepath=simple_train_savepath,
                    do_augment=True,
                    model_name=modelName,
                    getlabel_train=bdata.getlabel,
                    log_savepath=logreg_savepath,
                    runName=runName,
                    lr=0.0001,
                    buildNet=bnet.getBreasthistoNet,
                    valIsTestData=True)

    print("Data collected.")
Ejemplo n.º 19
0
def create_and_train_net(training_data, test_data):
    training_data = cudaify(training_data)
    test_data = cudaify(test_data)
    print("training size:", training_data.shape)
    print("testing size:", test_data.shape)
    classifier = cudaify(DropoutClassifier(1536, 2, 200))
    return train_net(classifier,
                     training_data,
                     test_data,
                     lambda x, y: tensor_batcher(x, y, False),
                     batch_size=96,
                     n_epochs=12,
                     learning_rate=0.001,
                     verbose=True)
Ejemplo n.º 20
0
def createAndTrainNN(file_name, trainingData, testData):

    if torch.cuda.is_available():
        print("using gpu")
        cuda = torch.device('cuda:2')
        FloatTensor = torch.FloatTensor
        LongTensor = torch.LongTensor

        def cudaify(model):
            return model.cuda(cuda)
    else:
        print("using cpu")
        cuda = torch.device('cpu')
        FloatTensor = torch.FloatTensor
        LongTensor = torch.LongTensor

        def cudaify(model):
            return model

    #trainingData, testData = sampleFromFileTwoSenses(num_pairs, file_name, 0.8, senses)
    trainingData = cudaify(trainingData)
    testData = cudaify(testData)
    print("training size:", trainingData.shape)
    print("testing size:", testData.shape)
    print(file_name)

    classifier = cudaify(DropoutClassifier(1536, 100, 2))

    train_net(classifier,
              trainingData,
              testData,
              tensor_batcher,
              batch_size=96,
              n_epochs=10,
              learning_rate=0.001,
              verbose=True)
Ejemplo n.º 21
0
def run_model(config, fold, fold_base=None):

    if config['model'] == 'LSTM':
        return train_lstm(config, fold)
    if config['model'] == 'BiLSTM':
        return train_bilstm(config, fold)
    if config['model'] == 'NN':
        return train_net(config, fold)
    if config['model'] == 'linear':
        return train_linear(config, fold)
    if config['model'] == 'svm':
        return train_svm(config, fold)
    if config['model'] == 'random_forest':
        return train_random_forrest(config, fold)
    if config['model'] == 'baseline':
        return train_baseline(config, fold, fold_base)
Ejemplo n.º 22
0
def train_augment_csv(train_csv="/home/oole/Data/nice_data/train.csv",
                      test_csv="/home/oole/Data/nice_data/test.csv"):
    val_csv = "/home/oole/Data/nice_data/400x400_data/train.csv"

    netRoot = "/home/oole/tfnetsave/"
    runName = "180806_betterAugmentation_lr0.001/"

    if not os.path.exists(netRoot + runName):
        os.makedirs(netRoot + runName)
    else:
        print("Run folder already extists.")

    simple_train_savepath = netRoot + runName + "tfnet_simple_full"
    em_train_savepath = netRoot + runName + "tfnet_em_full"

    initial_epoch = 0

    logfile_path = netRoot + runName + "tfnet_log.csv"
    logreg_savepath = netRoot + runName + "tfnet_logreg"

    model_name = "model"

    labelEncoder = data_tf.labelencoder()

    trainSlideData = data_tf.collect_data_csv(train_csv, data_tf.getlabel_new)
    trainSlideData.setLabelEncoder(labelEncoder)

    valSlideData = data_tf.collect_data_csv(val_csv,
                                            data_tf.getlabel_new,
                                            doAugment=False)
    valSlideData.setLabelEncoder(labelEncoder)

    #test purposes
    #trainSlideData, valSlideData = data_tf.getTestSizeData(trainSlideData, valSlideData, 20)
    # Initial training
    train_accuracy, val_accuracy, netAcc = train.train_net(
        trainSlideData,
        valSlideData,
        num_epochs=30,
        batch_size=batch_size,
        savepath=simple_train_savepath,
        do_augment=True,
        model_name=model_name,
        getlabel_train=data_tf.getlabel_new,
        log_savepath=logfile_path,
        runName=runName,
        lr=0.001)
Ejemplo n.º 23
0
def create_and_train_net(net, training_data, test_data, verbose):
    training_data = cudaify(training_data)
    test_data = cudaify(test_data)
    if verbose:
        print("training size:", training_data.shape)
        print("testing size:", test_data.shape)
    classifier = cudaify(net)

    best_net, best_acc = train_net(classifier,
                                   training_data,
                                   test_data,
                                   tensor_batcher,
                                   batch_size=2,
                                   n_epochs=10,
                                   learning_rate=0.001,
                                   verbose=verbose)
    return best_acc
def train_parser(train_csv, dev_csv):
    print('loading train')
    train = torch.tensor(pd.read_csv(train_csv).values).float()
    print('train size: {}'.format(train.shape[0]))
    print('loading dev')
    dev = torch.tensor(pd.read_csv(dev_csv).values).float()
    print('dev size: {}'.format(dev.shape[0]))
    classifier = DropoutClassifier(768 * 2, 200, 2)
    net = train_net(classifier,
                    train,
                    dev,
                    tensor_batcher,
                    batch_size=96,
                    n_epochs=30,
                    learning_rate=0.001,
                    verbose=True)
    return net
Ejemplo n.º 25
0
            parser.print_help()
            parser.error('For testing, test_mode must be 0,1 or 2.')

    if not args.model:
        print('No model specified. using default: ', default_store_model)
        args.model = default_store_model

    if not args.gpu:
        args.gpu = '0'

    return args

if __name__ == '__main__':
    args = parse_arguments()

    # Limit execution on certain GPU/GPUs
    gpu_id = args.gpu  # Comma separated string of GPU IDs to be used e.g. '0, 1, 2, 3'
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    common_cfg_file = os.path.join('configure', 'common.json')
    train_cfg_file = os.path.join('configure', 'train.json')
    test_cfg_file = os.path.join('configure', 'test.json')

    if args.phase == 'train':
        train_net(
            common_cfg_file, train_cfg_file, args.train_mode, args.model, args.epochs,
            args.retrain, args.initial_epoch)
    else:
        test_net(common_cfg_file, test_cfg_file, args.test_mode, args.model)
Ejemplo n.º 26
0
def run():
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    # Change consts when changing dataset
    DATASET_DOWNLOAD_FUNC = torchvision.datasets.CIFAR10

    trainset = DATASET_DOWNLOAD_FUNC(root='./data',
                                     train=True,
                                     download=True,
                                     transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True,
                                              num_workers=1)

    testset = DATASET_DOWNLOAD_FUNC(root='./data',
                                    train=False,
                                    download=True,
                                    transform=transform)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=BATCH_SIZE,
                                             shuffle=False,
                                             num_workers=0)
    parser = argparse.ArgumentParser()
    parser.add_argument('--layers', default='2', type=int)
    parser.add_argument('--net', default='resnet', choices='resnet, resnetfc')
    parser.add_argument('--opt',
                        default='none',
                        choices=('none', 'anti', 'leap'))
    parser.add_argument('--multi',
                        default='none',
                        choices=('none', 'random', 'copy', 'interleave',
                                 'interpolate'))
    ns = parser.parse_args(sys.argv[1:])

    # if ns.net == 'resnet':
    #     if ns.multi == 'none':
    #         method=None
    #     elif ns.multi == 'random':
    #         method=Multilevel.random
    #     elif ns.multi == 'copy':
    #         method=Multilevel.copy
    #     elif ns.multi == 'interleave':
    #         method=Multilevel.interleave
    #     elif ns.multi == 'interpolate':
    #         method=Multilevel.interpolate
    #
    #     train_net(ResNet(ns.layers), testloader, trainloader, method)
    # elif ns.net == 'resnetfc':
    #     if ns.opt == 'anti':
    #         train_net(ResNetFc(ns.layers, antisymmetric=True), testloader, trainloader)
    #     elif ns.opt == 'leap':
    #         train_net(ResNetFc(ns.layers, leapfrog=True), testloader, trainloader)
    #     else:
    #         train_net(ResNetFc(ns.layers, antisymmetric=False), testloader, trainloader)
    # for i in range(0, 1):
    #     train_net(ResNetFc(4), testloader, trainloader)
    # for i in range(0, 3):
    # train_net(ResNetFc(8), testloader, trainloader)
    # train_net(ResNetFc(8, antisymmetric=True), testloader, trainloader)
    # train_net(ResNetFc(8, leapfrog=True, h=1), testloader, trainloader, h=1)
    # train_net(ResNetFc(8, leapfrog=True, h=0.1), testloader, trainloader, h=0.1)
    # train_net(ResNetFc(8, leapfrog=True, h=0.01), testloader, trainloader, h=0.01)
    # train_net(ResNetFc(8, dropOut=True), testloader, trainloader)
    for i in range(0, 5):
        # train_net(ResNet(1), testloader, trainloader, Multilevel.random)
        # train_net(ResNet(1), testloader, trainloader, Multilevel.copy)
        train_net(ResNet(1), testloader, trainloader, Multilevel.interleave)
Ejemplo n.º 27
0
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    train_cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(train_cfg)

    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(train_cfg.RNG_SEED)
        caffe.set_random_seed(train_cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    print 'imdb name `{:s}`'.format(args.imdb_name)
    imdb, roidb = combined_roidb(args.imdb_name)
    print '{:d} roidb entries'.format(len(roidb))

    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)

    train_net(args.solver, roidb, output_dir,
              pretrained_model=args.pretrained_model,
              max_iters=args.max_iters)
Ejemplo n.º 28
0
		'recon_cost' : 'quadratic', 'pred_cost' : 'nll', 'error_func' : 'neq',
		'cost_add' : [['L2', 0.001, None], ['S', 0.01, None]],
		'dropout' : True,
		'target_is_int' : True,
		'params_to_train' : "all",
		'mode' : 'train',
		'dataname' : dataname} 
	
recon_tracer = Tracer(rng=rng,  details = tracer_details) 



train_net(datasets, dataname, recon_tracer, 
			n_epochs = 50, 
			learning_rate = 0.1, learning_rate_decay = 0.9, 
			mom_i = 0.5, mom_f = 0.9, mom_tau = 200, mom_switch =10, 
			regularizer = 0, 
			rmsprop = True,
			batch_size=100,
			save_many_params =  True)



view_reconstruction(params=recon_tracer.params, details=tracer_details)



# train tracer for classification only, using weights learnt for reconstruction


tracer_details = {
		'n_context' : 33*33, 'n_trace' : 17*17, 'n_h' : [500, 100, 2000], 'n_recon' : 17*17, 'n_out' : 9, 'n_layers' : 5,
Ejemplo n.º 29
0
    "all",
    'mode':
    'train',
    'dataname':
    dataname
}

recon_tracer = Tracer(rng=rng, details=tracer_details)

train_net(datasets,
          dataname,
          recon_tracer,
          n_epochs=50,
          learning_rate=0.1,
          learning_rate_decay=0.9,
          mom_i=0.5,
          mom_f=0.9,
          mom_tau=200,
          mom_switch=10,
          regularizer=0,
          rmsprop=True,
          batch_size=100,
          save_many_params=True)

view_reconstruction(params=recon_tracer.params, details=tracer_details)

# train tracer for classification only, using weights learnt for reconstruction

tracer_details = {
    'n_context':
    33 * 33,
    'n_trace':
Ejemplo n.º 30
0
        manipulate_solver(cfg.TRAIN.SOLVER, target_sw, train_net=target_train)
        manipulate_train(cfg.TRAIN.PROTOTXT, target_train)

        if isinstance(cfg.TRAIN.GPU_ID, int):
            cfg.TRAIN.GPU_ID = [cfg.TRAIN.GPU_ID]

        cfg_print(cfg)

        with open(osp.join(output_dir, 'cfgs.txt'), 'w') as f:
            cfg_dump({i: cfg[i] for i in cfg if i != 'TEST'}, f)
        tb.sess.add_text('train_cfg', \
                         cfg_table({i: cfg[i] for i in cfg if i != 'TEST'}))
        train_net(target_sw,
                  roidb,
                  output_dir=output_dir,
                  pretrained_model=cfg.TRAIN.PRETRAINED,
                  max_iter=cfg.TRAIN.ITERS,
                  gpus=cfg.TRAIN.GPU_ID)

        f.close()
        # Set test models for the following testing
        cfg.TEST.MODEL = osp.join(output_dir, 'final.caffemodel')

    if args.test == 'true' or args.test == 'True':  # the testing entrance
        if isinstance(cfg.TEST.GPU_ID, int):
            cfg.TEST.GPU_ID = [cfg.TEST.GPU_ID]

        if not cfg.TEST.DEMO.ENABLE:
            imdb = get_imdb(cfg.TEST.DB)
            output_dir = get_output_dir(imdb.name,
                                        cfg.NAME + '_' + cfg.LOG.TIME)
Ejemplo n.º 31
0
    print('Using config:')
    pprint.pprint(cfg)

    # if not args.randomize:
    #     # fix the random seeds (numpy and caffe) for reproducibility
    #     np.random.seed(cfg.RNG_SEED)
    #     caffe.Net.set_random_seed(cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    if args.gpu_id is not None:
        caffe.set_device(args.gpu_id)
    _imdb = wholeimage('102flowers')
    print 'Loaded datasets `{:s}` for training'.format(_imdb.name)
    _imdb.get_train_image()

    _imdb = get_training_imdb(_imdb)
    imdb = _imdb.train_image

    output_dir = get_output_dir(_imdb, None)
    print 'Output will be saved to `{:s}`'.format(output_dir)
    # print compute_means(imdb)


    train_net(args.solver, imdb, output_dir,
              pretrained_model=args.pretrained_model,
              max_iters=args.max_iters)


Ejemplo n.º 32
0
    net = FCN().apply(initialize_weights_advance_).to(device)
    print(net)

    data_transform = transforms.Compose(
        [Rescale(250), RandomCrop(224),
         Normalize(), ToTensor()])

    transformed_dataset = FacialKeypointsDataset(
        csv_file='data/training_frames_keypoints.csv',
        root_dir='data/training/',
        transform=data_transform)

    # load training data in batches
    batch_size = 128

    train_loader = DataLoader(transformed_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4)

    criterion = nn.MSELoss()

    optimizer = optim.Adam(params=net.parameters(), lr=0.001)

    losses = train_net(n_epochs, net, train_loader, device, optimizer,
                       criterion)

    plt.xlabel("Steps")
    plt.ylabel("MSE Loss")
    plt.plot(losses, "g-")
Ejemplo n.º 33
0
                        help='GPU device id to use [0]',
                        default=0, type=int)
    parser.add_argument('--solver', dest='solver',
                        help='solver prototxt',
                        default=None, type=str)
    parser.add_argument('--output', dest='output_dir',
                        help='dir to save the model',
                        default=None, type=str)
    parser.add_argument('--iters', dest='max_iters',
                        help='number of iterations to train',
                        default=40000, type=int)
    parser.add_argument('--snapstep', dest='snapshot_iters',
                        help='snapshot every snapstep iters',
                        default=500, type=int)
    parser.add_argument('--weights', dest='pretrained_model',
                        help='initialize with pretrained model weights',
                        default=None, type=str)

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    return args

if __name__ == "__main__":
    args = parse_args()
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)
    train_net(args.solver, args.output_dir, args.pretrained_model, max_iters=args.max_iters, snapshot_iters=args.snapshot_iters)
Ejemplo n.º 34
0
        cfg_from_file(args.cfg_file)

    cfg.GPU_ID = args.gpu_id
    cfg.train_imdb = args.train_imdb
    cfg.val_prototxt = args.test_prototxt
    cfg.test_prototxt = args.test_prototxt
    cfg.TRAIN.VALIDATION_ITERATION = eval(cfg.TRAIN.VALIDATION_ITERATION)

    print('Using config:')
    pprint.pprint(cfg)

    # if not args.randomize:
    # fix the random seeds (numpy and caffe) for reproducibility
    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    from easydict import EasyDict as edict
    imdb = edict()
    imdb.name = cfg.train_imdb

    # train
    start = clock()
    model_paths, validate_acc = train_net(args.solver,
                                          pretrained_model=args.pretrained_model)
    elapse_time_train = (clock() - start) / 60
    print 'Training time', elapse_time_train, 'min'
Ejemplo n.º 35
0
        cfg_from_file(args.cfg_file)

    cfg.GPU_ID = args.gpu_id
    cfg.train_imdb = args.train_imdb
    cfg.val_prototxt = args.test_prototxt
    cfg.test_prototxt = args.test_prototxt
    cfg.TRAIN.VALIDATION_ITERATION = eval(cfg.TRAIN.VALIDATION_ITERATION)

    print('Using config:')
    pprint.pprint(cfg)

    # if not args.randomize:
    # fix the random seeds (numpy and caffe) for reproducibility
    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    from easydict import EasyDict as edict
    imdb = edict()
    imdb.name = cfg.train_imdb

    # train
    start = clock()
    model_paths, validate_acc = train_net(
        args.solver, pretrained_model=args.pretrained_model)
    elapse_time_train = (clock() - start) / 60
    print 'Training time', elapse_time_train, 'min'
Ejemplo n.º 36
0
        norm_node.input = CfgNode()
        norm_node.input.mean = train_dataset.input_mean.tolist()
        norm_node.input.std = train_dataset.input_std.tolist()
        norm_node.target = CfgNode()
        norm_node.target.mean = train_dataset.target_mean.tolist()
        norm_node.target.std = train_dataset.target_std.tolist()
        with open(os.path.join(output_base_path, "norm.yaml"), "w") as f:
            f.write(norm_node.dump())
    else:
        print("Copy skipped")

    print("Building network...")
    net = network.RamanAINetwork(cfg.network.structure)

    print("Begin training...")
    stream = train.train_net(net, train_dataset, valid_dataset, test_dataset,
                             cfg)
    handler = utils.str_to_obj(
        f"train_stream_handlers.{cfg.train_stream_handler}")

    # main loop scheduler
    loop = asyncio.new_event_loop()
    main_scheduler = AsyncIOThreadSafeScheduler(loop)
    s = scheduler.ThreadPoolScheduler()
    observer = handler(cfg, cfg_dir)

    stream.pipe(operators.subscribe_on(s), ).subscribe(observer)

    if observer.plot_sub:

        def plot_task(data):
            f: plt.Figure = plt.gcf()