Exemplo n.º 1
0
def main():
    torch.manual_seed(43)

    criterion = nn.CrossEntropyLoss()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    train_loader, test_loader = data_loaders()

    model = FNN_ID(750).to(device)
    optimizer = optim.Adam(model.parameters())

    train(model, device, train_loader, criterion, optimizer)
    test(model, device, test_loader, criterion)
    save(model)
Exemplo n.º 2
0
def train_it():
    """
    Function for triggering training of the model
    """
    print("request received: training the model")
    results = train(
        "Hello! This is the result of training from Flask backend!")
    resp = jsonify(results)
    resp.headers.add("Access-Control-Allow-Origin", "*")
    return resp
def main():
    # user inputs from command line
    in_arg = get_input_args()
    # load and process data into training, validation and test data sets
    trainloader, validationloader, testloader, train_data = load_and_transform(
        in_arg.data_dir)
    # load pre-trained nn and build classifier with user inputs (loss criterion & optimizer are fixed) to create the model
    model, criterion, optimizer = build_classifier(in_arg.arch,
                                                   in_arg.in_features,
                                                   in_arg.hidden_layers,
                                                   in_arg.output_size,
                                                   in_arg.learning_rate)
    # Train the model
    trained_model = train(in_arg.epochs, trainloader, validationloader,
                          optimizer, model, criterion, in_arg.gpu)
    # saving the model
    save_model(trained_model, optimizer, in_arg.saving_dir, in_arg.arch,
               in_arg.learning_rate, in_arg.epochs, train_data)
Exemplo n.º 4
0
def main(net, dataloader, device, config):
    train_loader, test_loader = dataloader[0], dataloader[1]
    optimizer = optim.SGD(net.parameters(),
                          lr=config.lr,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)
    scheduler = optim.lr_scheduler.ExponentialLR(optimizer, 0.95)

    crit = get_criterion(config)
    crit = crit.to(device)

    if not os.path.isdir(config.weight):
        os.makedirs(config.weight)
    checkpoint = os.path.join(config.weight, config.model + '.pth')

    best_acc = 0
    for epoch in range(config.epoch):
        logging.info(f'LR: {scheduler.get_lr()}')
        ########## TRAIN ##########
        start = time()
        net.train()
        loss_train = train(config, net, device, train_loader, crit, optimizer,
                           epoch)
        end = time() - start
        logging.info(
            f'=> Epoch[{epoch}], Average train Loss: {loss_train:.3f}, Tot Time: {end:.3f}'
        )

        ########## TEST ###########
        start = time()
        net.eval()
        acc = test(config, net, device, test_loader, epoch)
        end = time() - start
        logging.info(
            f'=> Epoch[{epoch}], Final accuracy: {acc:.3f}, Tot Time: {end:.3f}\n'
        )
        if acc > best_acc:
            torch.save(net.state_dict(), checkpoint)
            logging.info(f'Saving best model checkpoint at {checkpoint}')
            best_acc = acc
        scheduler.step()

    logging.info(f'Best test accuracy: {best_acc}')
Exemplo n.º 5
0
def command_line(dictionary):
    while True:
        command = input('Enter your command: ')
        if command == 'exit':
            return 'exit'
        elif command == 'move':
            functions.move()
        elif command == 'help':
            for command, description in list(commands_list.items()):
                print(command + ' = ' + description)
        elif command == 'add':
            functions.add()
            return functions.update()

        elif command == 'remove':
            functions.remove()
        elif command == 'print':
            continue
        elif command == 'show':
            functions.show()
        elif command == 'train':
            return functions.train(dictionary)
Exemplo n.º 6
0
    baseline = baselineNet(nObs).to(DEVICE) if BASELINE else None
    saver.addObj(baseline, "baseline", True,
                 DEVICE) if saver is not None else None
    optbaseline = optimizerMaker("adam", LEARNING_RATE)

    # Training policy
    print("Training policy\n -------------")
    LOGR.logr("Begining training")
    accRewards, var, steps = train(envmaker,
                                   policy,
                                   optpolicy,
                                   baseline=baseline,
                                   optBaselineMaker=optbaseline,
                                   device=DEVICE,
                                   iterations=ITERS,
                                   testFreq=TEST_FREQ,
                                   maxEpisodeLength=MAX_EPISODE_LENGTH,
                                   gamma=GAMMA,
                                   batchSize=BATCH_SIZE,
                                   pBatchMem=PBM,
                                   nWorkers=1,
                                   saver=saver,
                                   learningRate=LEARNING_RATE)
    LOGR.logr("Trainnig Done")
    saver.saveAll() if saver is not None else None
    LOGR.logr("Objects saved")
    graphResults(accRewards, var, meanRnd, varRnd, ITERS, TEST_FREQ, GRAPH_MOD)
    if PLAY: playTest(env, policy, NAME, 200, 20)
    LOGR.logr("Experiment done")
Exemplo n.º 7
0
            for i_batch in range(nb_batches - 1):

                sentences_batch = sentences[i_batch *
                                            glob.batch_size:(i_batch + 1) *
                                            glob.batch_size]

                # xlm_batch = XLM_pre.create_batch(sentences_batch)
                # xlm_batch = xlm_batch.detach()       # do not train the pretrained XLM weights

                for s in sentences_batch:
                    actual_tags = torch.tensor(
                        [glob.POS_DICT[w.pos] for w in s.words],
                        device=glob.device)
                    # indexes = sentence_to_word_index(dico, s)
                    train(Net, s, actual_tags, optimizer)

                batch_time = time.clock() - start_time
                print(round(100 * (i_batch + 1) / nb_batches, 2), end="% ")
                remaining_time = batch_time / (
                    (i_batch + 1) / nb_batches) - batch_time
                remaining_time *= glob.epochs
                h = int(remaining_time // 3600)
                m = int(remaining_time // 60 - (remaining_time // 3600) * 60)
                s = int(remaining_time % 60)
                print("Remaining : {0}h{1}m{2}s".format(h, m, s))
                sys.stdout.flush()

            print('.')

    except KeyboardInterrupt:
Exemplo n.º 8
0
def main():
    args = cfg.parse_args()
    random.seed(args.random_seed)
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)

    # set tf env
    _init_inception()
    inception_path = check_or_download_inception(None)
    create_inception_graph(inception_path)

    # weight init
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            if args.init_type == 'normal':
                nn.init.normal_(m.weight.data, 0.0, 0.02)
            elif args.init_type == 'orth':
                nn.init.orthogonal_(m.weight.data)
            elif args.init_type == 'xavier_uniform':
                nn.init.xavier_uniform(m.weight.data, 1.)
            else:
                raise NotImplementedError('{} unknown inital type'.format(
                    args.init_type))
        elif classname.find('BatchNorm2d') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0.0)

    gen_net = Generator(bottom_width=args.bottom_width,
                        gf_dim=args.gf_dim,
                        latent_dim=args.latent_dim).cuda()
    dis_net = eval('models.' + args.model + '.Discriminator')(args=args).cuda()
    gen_net.apply(weights_init)
    dis_net.apply(weights_init)

    initial_gen_net_weight = torch.load(os.path.join(args.init_path,
                                                     'initial_gen_net.pth'),
                                        map_location="cpu")
    initial_dis_net_weight = torch.load(os.path.join(args.init_path,
                                                     'initial_dis_net.pth'),
                                        map_location="cpu")

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    exp_str = args.dir
    args.load_path = os.path.join('output', exp_str, 'pth',
                                  'epoch{}.pth'.format(args.load_epoch))

    # state dict:
    assert os.path.exists(args.load_path)
    checkpoint = torch.load(args.load_path)
    print('=> loaded checkpoint %s' % args.load_path)
    state_dict = checkpoint['generator']
    gen_net = load_subnet(args, state_dict, initial_gen_net_weight).cuda()
    avg_gen_net = deepcopy(gen_net)

    # set optimizer
    gen_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr,
        (args.beta1, args.beta2))
    dis_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr,
        (args.beta1, args.beta2))
    gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0,
                                  args.max_iter * args.n_critic)
    dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0,
                                  args.max_iter * args.n_critic)

    # set up data_loader
    dataset = datasets.ImageDataset(args)
    train_loader = dataset.train

    # fid stat
    if args.dataset.lower() == 'cifar10':
        fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
    else:
        raise NotImplementedError('no fid stat for %s' % args.dataset.lower())
    assert os.path.exists(fid_stat)

    # epoch number for dis_net
    args.max_epoch = args.max_epoch * args.n_critic
    if args.max_iter:
        args.max_epoch = np.ceil(args.max_iter * args.n_critic /
                                 len(train_loader))

    # initial
    np.random.seed(args.random_seed)
    fixed_z = torch.cuda.FloatTensor(
        np.random.normal(0, 1, (25, args.latent_dim)))

    start_epoch = 0
    best_fid = 1e4

    args.path_helper = set_log_dir('logs', args.exp_name)
    logger = create_logger(args.path_helper['log_path'])
    #logger.info('=> loaded checkpoint %s (epoch %d)' % (checkpoint_file, start_epoch))

    logger.info(args)
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': start_epoch * len(train_loader),
        'valid_global_steps': start_epoch // args.val_freq,
    }
    gen_avg_param = copy_params(gen_net)
    # train loop
    for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)),
                      desc='total progress'):
        lr_schedulers = (gen_scheduler,
                         dis_scheduler) if args.lr_decay else None
        train(args, gen_net, dis_net, gen_optimizer, dis_optimizer,
              gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers)

        if epoch and epoch % args.val_freq == 0 or epoch == int(
                args.max_epoch) - 1:
            backup_param = copy_params(gen_net)
            load_params(gen_net, gen_avg_param)
            inception_score, fid_score = validate(args, fixed_z, fid_stat,
                                                  gen_net, writer_dict)
            logger.info(
                'Inception score: %.4f, FID score: %.4f || @ epoch %d.' %
                (inception_score, fid_score, epoch))
            load_params(gen_net, backup_param)
            if fid_score < best_fid:
                best_fid = fid_score
                is_best = True
            else:
                is_best = False
        else:
            is_best = False

        avg_gen_net.load_state_dict(gen_net.state_dict())
        load_params(avg_gen_net, gen_avg_param)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': args.model,
                'gen_state_dict': gen_net.state_dict(),
                'dis_state_dict': dis_net.state_dict(),
                'avg_gen_state_dict': avg_gen_net.state_dict(),
                'gen_optimizer': gen_optimizer.state_dict(),
                'dis_optimizer': dis_optimizer.state_dict(),
                'best_fid': best_fid,
                'path_helper': args.path_helper
            }, is_best, args.path_helper['ckpt_path'])
Exemplo n.º 9
0
def main():

    args = parse_args()
    args.pretrain = False

    root_path = 'exps/exp_{}'.format(args.exp)

    if not os.path.exists(root_path):
        os.mkdir(root_path)
        os.mkdir(os.path.join(root_path, "log"))
        os.mkdir(os.path.join(root_path, "model"))

    base_lr = args.lr  # base learning rate

    train_dataset, val_dataset = build_dataset(args.dataset, args.data_root,
                                               args.train_list)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.num_workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             num_workers=args.num_workers,
                                             pin_memory=True)

    model = VNet(args.n_channels, args.n_classes).cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=0.0005)
    #scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.7)

    model = torch.nn.DataParallel(model)

    model.train()

    if args.resume is None:
        assert os.path.exists(args.load_path)
        state_dict = model.state_dict()
        print("Loading weights...")
        pretrain_state_dict = torch.load(args.load_path,
                                         map_location="cpu")['state_dict']

        for k in list(pretrain_state_dict.keys()):
            if k not in state_dict:
                del pretrain_state_dict[k]
        model.load_state_dict(pretrain_state_dict)
        print("Loaded weights")
    else:
        print("Resuming from {}".format(args.resume))
        checkpoint = torch.load(args.resume, map_location="cpu")

        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        model.load_state_dict(checkpoint['state_dict'])

    logger = Logger(root_path)
    saver = Saver(root_path)

    for epoch in range(args.start_epoch, args.epochs):
        train(model, train_loader, optimizer, logger, args, epoch)
        validate(model, val_loader, optimizer, logger, saver, args, epoch)
        adjust_learning_rate(args, optimizer, epoch)
valid_loss2 = []
valid_acc2 = []

train_loss3 = []
train_acc3 = []
valid_loss3 = []
valid_acc3 = []

train_loss4 = []
train_acc4 = []
valid_loss4 = []
valid_acc4 = []

for epoch in range(1, nb_epochs + 1):

    train_l, train_a = train(resnet2, train_loader, learning_rate, criterion,
                             epoch, batch_log, device)
    train_l, train_a = test(resnet2, train_loader, criterion, epoch, batch_log,
                            device)
    train_loss2.append(train_l)
    train_acc2.append(train_a)

    valid_l, valid_a = test(resnet2, valid_loader, criterion, epoch, batch_log,
                            device)
    valid_loss2.append(valid_l)
    valid_acc2.append(valid_a)

with open("compareResnet_log2.txt", "w") as output:
    output.write("ResNet\t size=16\t ratio=2^(-1/3)\t nratio=6 \n")
    output.write(str(train_loss2))
    output.write(str(train_acc2))
    output.write(str(valid_loss2))
Exemplo n.º 11
0
def main():
    args = cfg.parse_args()
    torch.cuda.manual_seed(args.random_seed)

    # set tf env
    _init_inception()
    inception_path = check_or_download_inception(None)
    create_inception_graph(inception_path)

    # import network
    gen_net = eval('models.' + args.model + '.Generator')(args=args).cuda()
    dis_net = eval('models.' + args.model + '.Discriminator')(args=args).cuda()

    # weight init
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            if args.init_type == 'normal':
                nn.init.normal_(m.weight.data, 0.0, 0.02)
            elif args.init_type == 'orth':
                nn.init.orthogonal_(m.weight.data)
            elif args.init_type == 'xavier_uniform':
                nn.init.xavier_uniform(m.weight.data, 1.)
            else:
                raise NotImplementedError('{} unknown inital type'.format(
                    args.init_type))
        elif classname.find('BatchNorm2d') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0.0)

    gen_net.apply(weights_init)
    dis_net.apply(weights_init)

    # set optimizer
    gen_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr,
        (args.beta1, args.beta2))
    dis_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr,
        (args.beta1, args.beta2))
    gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0,
                                  args.max_iter * args.n_critic)
    dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0,
                                  args.max_iter * args.n_critic)

    # set up data_loader
    dataset = datasets.ImageDataset(args)
    train_loader = dataset.train

    # fid stat
    if args.dataset.lower() == 'cifar10':
        fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
    elif args.dataset.lower() == 'stl10':
        fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz'
    else:
        raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
    assert os.path.exists(fid_stat)

    # epoch number for dis_net
    args.max_epoch = args.max_epoch * args.n_critic
    if args.max_iter:
        args.max_epoch = np.ceil(args.max_iter * args.n_critic /
                                 len(train_loader))

    # initial
    fixed_z = torch.cuda.FloatTensor(
        np.random.normal(0, 1, (25, args.latent_dim)))
    gen_avg_param = copy_params(gen_net)
    start_epoch = 0
    best_fid = 1e4

    # set writer
    if args.load_path:
        print(f'=> resuming from {args.load_path}')
        assert os.path.exists(args.load_path)
        checkpoint_file = os.path.join(args.load_path, 'Model',
                                       'checkpoint.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)
        start_epoch = checkpoint['epoch']
        best_fid = checkpoint['best_fid']
        gen_net.load_state_dict(checkpoint['gen_state_dict'])
        dis_net.load_state_dict(checkpoint['dis_state_dict'])
        gen_optimizer.load_state_dict(checkpoint['gen_optimizer'])
        dis_optimizer.load_state_dict(checkpoint['dis_optimizer'])
        avg_gen_net = deepcopy(gen_net)
        avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
        gen_avg_param = copy_params(avg_gen_net)
        del avg_gen_net

        args.path_helper = checkpoint['path_helper']
        logger = create_logger(args.path_helper['log_path'])
        logger.info(
            f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})')
    else:
        # create new log dir
        assert args.exp_name
        args.path_helper = set_log_dir('logs', args.exp_name)
        logger = create_logger(args.path_helper['log_path'])

    logger.info(args)
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': start_epoch * len(train_loader),
        'valid_global_steps': start_epoch // args.val_freq,
    }

    # train loop
    for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)),
                      desc='total progress'):
        lr_schedulers = (gen_scheduler,
                         dis_scheduler) if args.lr_decay else None
        train(args, gen_net, dis_net, gen_optimizer, dis_optimizer,
              gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers)

        if epoch and epoch % args.val_freq == 0 or epoch == int(
                args.max_epoch) - 1:
            backup_param = copy_params(gen_net)
            load_params(gen_net, gen_avg_param)
            inception_score, fid_score = validate(args, fixed_z, fid_stat,
                                                  gen_net, writer_dict)
            logger.info(
                f'Inception score: {inception_score}, FID score: {fid_score} || @ epoch {epoch}.'
            )
            load_params(gen_net, backup_param)
            if fid_score < best_fid:
                best_fid = fid_score
                is_best = True
            else:
                is_best = False
        else:
            is_best = False

        avg_gen_net = deepcopy(gen_net)
        load_params(avg_gen_net, gen_avg_param)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': args.model,
                'gen_state_dict': gen_net.state_dict(),
                'dis_state_dict': dis_net.state_dict(),
                'avg_gen_state_dict': avg_gen_net.state_dict(),
                'gen_optimizer': gen_optimizer.state_dict(),
                'dis_optimizer': dis_optimizer.state_dict(),
                'best_fid': best_fid,
                'path_helper': args.path_helper
            }, is_best, args.path_helper['ckpt_path'])
        del avg_gen_net
            data[3].append(float(time.time() - init_time))
            functions.us(us_dur, us_pin)
            data[3].append(float(time.time() - init_time))
            print(time.time() - init_time)
    else:
        if i in probe_trials:
            time.sleep(timing[i])
            data[3].append(float(time.time() - init_time))
            functions.cs(cs_dur, cs_pin)
            data[3].append(float(time.time() - init_time))
            print(time.time() - init_time)
            continue
        else:
            time.sleep(timing[i])
            data[3].append(float(time.time() - init_time))
            functions.train(cs_dur, us_dur, isi, cs_pin, us_pin)
            data[3].append(float(time.time() - init_time))
            print(time.time() - init_time)

# %%
#writing stuff

for i in range(0, len(data[0])):
    with open(data[0][i] + ".csv", 'w') as myfile:
        write = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        write.writerow(data[i + 1])

# %%
(data[1][5] - data[1][4]) * 1000

# %%
Exemplo n.º 13
0
def main():
    #define supported models
    allowed_models = [
        'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
    ]

    #Set up argument parser for console input
    parser = argparse.ArgumentParser(description='Train NN')
    parser.add_argument('data_dir',
                        help='directory containing sub-folders with data')
    parser.add_argument('--save_dir',
                        help='directory for saving checkpoint',
                        default='checkpoints')
    parser.add_argument('--arch',
                        help='pre-trained model architecture',
                        default='resnet18',
                        choices=allowed_models)
    parser.add_argument('--learning_rate',
                        help='learning rate during learning',
                        type=float,
                        default=0.01)
    parser.add_argument('--dropout',
                        help='dropout during learning',
                        type=float,
                        default=0.05)
    parser.add_argument('--hidden_units',
                        help='List of number of nodes in hidden layers',
                        nargs='+',
                        type=int,
                        default=[256, 128])
    parser.add_argument('--epochs',
                        help='Number of epochs for training',
                        default=3,
                        type=int)
    parser.add_argument('--gpu', help='Enable GPU', action='store_true')

    args = parser.parse_args()

    # Describe directories relative to working directory
    data_dir = args.data_dir
    train_dir = data_dir + '/train'
    valid_dir = data_dir + '/valid'
    test_dir = data_dir + '/test'
    save_dir = args.save_dir

    # Set variables for console input arguments
    model_arch = args.arch
    model_hidden_units = args.hidden_units
    learning_rate = args.learning_rate
    drop = args.dropout

    #Testing area
    print('Data directory: ' + data_dir)
    print('hidden units: ' + str(args.hidden_units))
    print('Save directory: ' + save_dir)
    print('Architecture: ' + args.arch)

    #create save directory if not existing
    fu.create_directory(save_dir)

    # Loading Pre-Trained model dependent on console input arch
    model = models.__getattribute__(model_arch)(pretrained=True)

    # Freeze parameters so we don't backprop through them
    for param in model.parameters():
        param.requires_grad = False

    # Create the network, define the criterion and optimizer
    model.fc = fu.Network(model.fc.in_features, 102, model_hidden_units, drop)
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.fc.parameters(), lr=learning_rate)

    device = torch.device(
        'cuda' if torch.cuda.is_available() and args.gpu == True else 'cpu')
    print('Device is: ', device)

    epochs = args.epochs
    print_every = 50
    running_loss = 0
    steps = 0

    train_loader, test_loader, valid_loader, train_data, test_data, valid_data = load_transform.load_transform(
        data_dir, train_dir, valid_dir, test_dir)

    fu.train(device, model, epochs, criterion, optimizer, print_every,
             train_loader, test_loader, valid_loader)
    fu.save_checkpoint(model, model_arch, epochs, criterion, optimizer,
                       train_data, save_dir)

    return model, test_loader, criterion
def predict_review(trie):
    review_typed = ''
    while True:
        next_chars = stdin.readline().rstrip('\r\n')
        if len(next_chars) == 0:
            return
        review_typed += next_chars
        preds = f.predict(review_typed, trie)
        try:
            print('{}{}{}{}{}'.format(preds[0], sep, preds[1], sep, preds[2]))
            stdout.flush()
        except Exception:
            print(
                ' {} {} '.format(sep, sep)
            )  # in the event that anything weird happens, pass three blank predictions
            stdout.flush()


trie = f.train(para.training_sample_size, para.training_file)

while True:
    try:
        print('Predicting review.', file=stderr)
        predict_review(trie)
    except EOFError:
        sys.exit(0)
    except Exception as e:
        print(e, file=stderr)
        sys.exit(1)
Exemplo n.º 15
0
Arquivo: train.py Projeto: nnuq/tpu
def main(index, args):
    device = xm.xla_device()

    gen_net = Generator(args).to(device)
    dis_net = Discriminator(args).to(device)
    enc_net = Encoder(args).to(device)

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            if args.init_type == 'normal':
                nn.init.normal_(m.weight.data, 0.0, 0.02)
            elif args.init_type == 'orth':
                nn.init.orthogonal_(m.weight.data)
            elif args.init_type == 'xavier_uniform':
                nn.init.xavier_uniform(m.weight.data, 1.)
            else:
                raise NotImplementedError('{} unknown inital type'.format(
                    args.init_type))
        elif classname.find('BatchNorm2d') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0.0)

    gen_net.apply(weights_init)
    dis_net.apply(weights_init)
    enc_net.apply(weights_init)

    ae_recon_optimizer = torch.optim.Adam(
        itertools.chain(enc_net.parameters(), gen_net.parameters()),
        args.ae_recon_lr, (args.beta1, args.beta2))
    ae_reg_optimizer = torch.optim.Adam(
        itertools.chain(enc_net.parameters(), gen_net.parameters()),
        args.ae_reg_lr, (args.beta1, args.beta2))
    dis_optimizer = torch.optim.Adam(dis_net.parameters(), args.d_lr,
                                     (args.beta1, args.beta2))
    gen_optimizer = torch.optim.Adam(gen_net.parameters(), args.g_lr,
                                     (args.beta1, args.beta2))

    dataset = datasets.ImageDataset(args)
    train_loader = dataset.train
    valid_loader = dataset.valid
    para_loader = pl.ParallelLoader(train_loader, [device])

    fid_stat = str(pathlib.Path(
        __file__).parent.absolute()) + '/fid_stat/fid_stat_cifar10_test.npz'
    if not os.path.exists(fid_stat):
        download_stat_cifar10_test()

    is_best = True
    args.num_epochs = np.ceil(args.num_iter / len(train_loader))

    gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0,
                                  args.num_iter / 2, args.num_iter)
    dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0,
                                  args.num_iter / 2, args.num_iter)
    ae_recon_scheduler = LinearLrDecay(ae_recon_optimizer, args.ae_recon_lr, 0,
                                       args.num_iter / 2, args.num_iter)
    ae_reg_scheduler = LinearLrDecay(ae_reg_optimizer, args.ae_reg_lr, 0,
                                     args.num_iter / 2, args.num_iter)

    # initial
    start_epoch = 0
    best_fid = 1e4

    # set writer
    if args.load_path:
        print(f'=> resuming from {args.load_path}')
        assert os.path.exists(args.load_path)
        checkpoint_file = os.path.join(args.load_path, 'Model',
                                       'checkpoint.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)
        start_epoch = checkpoint['epoch']
        best_fid = checkpoint['best_fid']
        gen_net.load_state_dict(checkpoint['gen_state_dict'])
        enc_net.load_state_dict(checkpoint['enc_state_dict'])
        dis_net.load_state_dict(checkpoint['dis_state_dict'])
        gen_optimizer.load_state_dict(checkpoint['gen_optimizer'])
        dis_optimizer.load_state_dict(checkpoint['dis_optimizer'])
        ae_recon_optimizer.load_state_dict(checkpoint['ae_recon_optimizer'])
        ae_reg_optimizer.load_state_dict(checkpoint['ae_reg_optimizer'])
        args.path_helper = checkpoint['path_helper']
        logger = create_logger(args.path_helper['log_path'])
        logger.info(
            f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})')
    else:
        # create new log dir
        assert args.exp_name
        logs_dir = str(pathlib.Path(__file__).parent.parent) + '/logs'
        args.path_helper = set_log_dir(logs_dir, args.exp_name)
        logger = create_logger(args.path_helper['log_path'])

    logger.info(args)
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': start_epoch * len(train_loader),
        'valid_global_steps': start_epoch // args.val_freq,
    }

    # train loop
    for epoch in tqdm(range(int(start_epoch), int(args.num_epochs)),
                      desc='total progress'):
        lr_schedulers = (gen_scheduler, dis_scheduler, ae_recon_scheduler,
                         ae_reg_scheduler)
        train(device, args, gen_net, dis_net, enc_net, gen_optimizer,
              dis_optimizer, ae_recon_optimizer, ae_reg_optimizer, para_loader,
              epoch, writer_dict, lr_schedulers)
        if epoch and epoch % args.val_freq == 0 or epoch == args.num_epochs - 1:
            fid_score = validate(args, fid_stat, gen_net, writer_dict,
                                 valid_loader)
            logger.info(f'FID score: {fid_score} || @ epoch {epoch}.')
            if fid_score < best_fid:
                best_fid = fid_score
                is_best = True
            else:
                is_best = False
        else:
            is_best = False

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'gen_state_dict': gen_net.state_dict(),
                'dis_state_dict': dis_net.state_dict(),
                'enc_state_dict': enc_net.state_dict(),
                'gen_optimizer': gen_optimizer.state_dict(),
                'dis_optimizer': dis_optimizer.state_dict(),
                'ae_recon_optimizer': ae_recon_optimizer.state_dict(),
                'ae_reg_optimizer': ae_reg_optimizer.state_dict(),
                'best_fid': best_fid,
                'path_helper': args.path_helper
            }, is_best, args.path_helper['ckpt_path'])
Exemplo n.º 16
0
data = getData(ifPlotData=True)

## 3.2

A = rg.regressAData()
B = rg.regressBData()
v = rg.validateData()

X_train = A[0]
Y_train = A[1]
X_test = B[0]
Y_test = B[1]
X_val = v[0]
Y_val = v[1]

M_list = [1,2,5,10]
lambda_list = [1e-5,1e-2,0.1,1]

mod_A = f.model_eval(X_train,Y_train,M_list,lambda_list)
mod_B = f.model_select(X_val,Y_val,M_list,lambda_list, mod_A)
        
weights, errors, grid = f.train(B, v, M_list, lambda_list)

sse = fun.SSE(X_test,Y_test,1)
w = weights[0,0]
sse(w)
fun.graph_reg(X_train,Y_train,1,w)



w, RSS_final = eval(B[0],B[1], weights[1,0], M = 2, l = 1e-7, plot = True)
Exemplo n.º 17
0
siallcnn.to(device)

criterion = nn.CrossEntropyLoss()

all_optimizer = optim.Adam(allcnn.parameters(), lr=learning_rate)
siall_optimizer = optim.Adam(siallcnn.parameters(), lr=learning_rate)

siall_train_loss = []
siall_train_acc = []
siall_test_loss = []
siall_test_acc = []
siall_test_loss_sc = []
siall_test_acc_sc = []

for epoch in range(1, nb_epochs + 1):
    train_l, train_a = train(siallcnn, train_loader, siall_optimizer,
                             criterion, epoch, batch_log, device)
    test_l, test_a = test(siallcnn, test_loader, criterion, epoch, batch_log,
                          device)
    siall_train_loss.append(train_l)
    siall_train_acc.append(train_a)
    siall_test_loss.append(test_l)
    siall_test_acc.append(test_a)
    test_l_sc, test_a_sc = test(siallcnn, test_loader_sc, criterion, epoch,
                                batch_log, device)
    siall_test_loss_sc.append(test_l_sc)
    siall_test_acc_sc.append(test_a_sc)

with open("si_allcnn_log_scaled.txt", "w") as output:
    output.write(str(siall_train_loss))
    output.write(str(siall_train_acc))
    output.write(str(siall_test_loss))
def main(args):
    logging.info(args)
    writer = SummaryWriter(args.save_dir)

    best_prec1, best_ata, cln_best_prec1 = 0, 0, 0
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)
    cudnn.benchmark = True

    setup_seed(args.seed)

    model = get_network_func(args.norm_module)
    model.cuda()

    start_epoch = 0
    if args.resume:
        print('resume from checkpoint')
        checkpoint = torch.load(os.path.join(args.save_dir, 'model.pt'))
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])

    # dataset process
    train_datasets = datasets.CIFAR10(root=args.data,
                                      train=True,
                                      transform=transforms.Compose([
                                          transforms.RandomHorizontalFlip(),
                                          transforms.RandomCrop(32, 4),
                                          transforms.ToTensor()
                                      ]),
                                      download=True)

    test_dataset = datasets.CIFAR10(root=args.data,
                                    train=False,
                                    transform=transforms.Compose(
                                        [transforms.ToTensor()]),
                                    download=True)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=2,
                                              pin_memory=True)

    valid_size = 0.1
    indices = list(range(len(train_datasets)))
    split = int(np.floor(valid_size * len(train_datasets)))

    np.random.seed(args.seed)
    np.random.shuffle(indices)

    train_idx, valid_idx = indices[split:], indices[:split]
    train_sampler = SubsetRandomSampler(train_idx)
    valid_sampler = SubsetRandomSampler(valid_idx)

    train_loader = torch.utils.data.DataLoader(train_datasets,
                                               batch_size=args.batch_size,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(train_datasets,
                                             batch_size=args.batch_size,
                                             sampler=valid_sampler)

    decreasing_lr = list(map(int, args.decreasing_lr.split(',')))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=decreasing_lr,
                                                     gamma=0.1)

    print('starting adv training')

    all_result = {}
    train_acc = []
    ta0 = []
    ata0 = []
    ta1 = []
    ata1 = []

    test_ta0 = []
    test_ata0 = []
    test_ta1 = []
    test_ata1 = []

    if os.path.exists(args.save_dir) is not True:
        os.mkdir(args.save_dir)

    for epoch in tqdm(range(args.epochs)):

        if epoch < start_epoch:
            scheduler.step()
            continue

        print(optimizer.state_dict()['param_groups'][0]['lr'])
        acc, train_loss = train(args, train_loader, model, criterion,
                                optimizer, epoch)
        writer.add_scalar('acc/train_acc', acc, epoch)
        writer.add_scalar('loss/train_loss', train_loss, epoch)

        # evaluate on validation set
        tacc_clean, tloss_clean = validate(args, val_loader, model, criterion,
                                           0)
        writer.add_scalar('acc/tacc_clean', tacc_clean, epoch)
        writer.add_scalar('loss/tloss_clean', tloss_clean, epoch)

        atacc_clean, atloss_clean = validate_adv(args, val_loader, model,
                                                 criterion, 0)
        writer.add_scalar('acc/atacc_clean', atacc_clean, epoch)
        writer.add_scalar('loss/atloss_clean', atloss_clean, epoch)

        tacc_adv, tloss_adv = validate(args, val_loader, model, criterion, 1)
        writer.add_scalar('acc/tacc_adv', tacc_adv, epoch)
        writer.add_scalar('loss/tloss_adv', tloss_adv, epoch)

        atacc_adv, atloss_adv = validate_adv(args, val_loader, model,
                                             criterion, 1)
        writer.add_scalar('acc/atacc_adv', atacc_adv, epoch)
        writer.add_scalar('loss/atloss_adv', atloss_adv, epoch)

        # evaluate on test set
        # clean branch
        test_tacc_clean, test_tloss_clean = validate(args, test_loader, model,
                                                     criterion, 0)
        writer.add_scalar('acc/test_tacc_clean', test_tacc_clean, epoch)
        writer.add_scalar('loss/test_tloss_clean', test_tloss_clean, epoch)

        test_atacc_clean, test_atloss_clean = validate_adv(
            args, test_loader, model, criterion, 0)
        writer.add_scalar('acc/test_atacc_clean', test_atacc_clean, epoch)
        writer.add_scalar('loss/test_atloss_clean', test_atloss_clean, epoch)

        # adv branch
        test_tacc_adv, test_tloss_adv = validate(args, test_loader, model,
                                                 criterion, 1)
        writer.add_scalar('acc/test_tacc_adv', test_tacc_adv, epoch)
        writer.add_scalar('loss/test_tloss_adv', test_tloss_adv, epoch)

        test_atacc_adv, test_atloss_adv = validate_adv(args, test_loader,
                                                       model, criterion, 1)
        writer.add_scalar('acc/test_atacc_adv', test_atacc_adv, epoch)
        writer.add_scalar('loss/test_atloss_adv', test_atloss_adv, epoch)

        scheduler.step()

        train_acc.append(acc)
        ta0.append(tacc_clean)
        ata0.append(atacc_clean)
        ta1.append(tacc_adv)
        ata1.append(atacc_adv)

        test_ta0.append(test_tacc_clean)
        test_ata0.append(test_atacc_clean)
        test_ta1.append(test_tacc_adv)
        test_ata1.append(test_atacc_adv)

        # remember best prec@1 and save checkpoint
        is_best = tacc_adv > best_prec1
        best_prec1 = max(tacc_adv, best_prec1)

        ata_is_best = atacc_adv > best_ata
        best_ata = max(atacc_adv, best_ata)

        cln_is_best = tacc_clean > cln_best_prec1
        cln_best_prec1 = max(tacc_clean, cln_best_prec1)

        if is_best:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'ata_best_prec1': best_ata,
                },
                is_best,
                filename=os.path.join(args.save_dir, 'best_model.pt'))

        if cln_is_best:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'ata_best_prec1': best_ata,
                },
                is_best,
                filename=os.path.join(args.save_dir, 'clean_best_model.pt'))

        if ata_is_best:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'ata_best_prec1': best_ata,
                },
                is_best,
                filename=os.path.join(args.save_dir, 'ata_best_model.pt'))

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'ata_best_prec1': best_ata,
            },
            is_best,
            filename=os.path.join(args.save_dir, 'model.pt'))

        if epoch and epoch % 10 == 0:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'ata_best_prec1': best_ata,
                },
                is_best,
                filename=os.path.join(args.save_dir, f'model_epoch{epoch}.pt'))

        all_result['train'] = train_acc
        all_result['test_ta0'] = test_ta0
        all_result['test_ata0'] = test_ata0
        all_result['test_ta1'] = test_ta1
        all_result['test_ata1'] = test_ata1
        all_result['ta0'] = ta0
        all_result['ata0'] = ata0
        all_result['ta1'] = ta1
        all_result['ata1'] = ata1

        pickle.dump(all_result,
                    open(os.path.join(args.save_dir, 'result_c.pkl'), 'wb'))
def main():
    args = cfg.parse_args()
    torch.cuda.manual_seed(args.random_seed)

    # Setting up the resnet model
    if args.pretrained:
        resnet = torchvision.models.resnet50(pretrained=True, progress=True)
    else:
        resnet = torchvision.models.resnet50(pretrained=False, progress=True)
    num_features = resnet.fc.in_features
    resnet.fc = nn.Linear(num_features, args.num_classes)
    resnet = resnet.cuda()

    # Setting up the optimizer
    if args.optimizer == 'sgd':
        optimizer = optim.SGD(resnet.parameters(),
                              lr=args.lr,
                              weight_decay=1e-4)
    elif args.optimizer == 'sgd_momentum':
        optimizer = optim.SGD(resnet.parameters(),
                              lr=args.lr,
                              momentum=0.9,
                              weight_decay=1e-4)
    elif args.optimizer == 'adam':
        optimizer = optim.Adam(
            filter(lambda p: p.requires_grad, resnet.parameters()), args.g_lr,
            (args.beta1, args.beta2))
    else:
        optimizer = None
    assert optimizer != None

    criterion = nn.CrossEntropyLoss()

    if args.percentage == 1.0:
        train_data, val_data, test_data = get_train_validation_test_data(
            args.train_csv_path, args.train_img_path, args.val_csv_path,
            args.val_img_path, args.test_csv_path, args.test_img_path)
    else:
        train_data = get_label_unlabel_dataset(args.train_csv_path,
                                               args.train_img_path,
                                               args.percentage)
        _, val_data, test_data = get_train_validation_test_data(
            args.train_csv_path, args.train_img_path, args.val_csv_path,
            args.val_img_path, args.test_csv_path, args.test_img_path)

    train_loader = DataLoader(train_data,
                              batch_size=args.train_batch_size,
                              shuffle=True,
                              drop_last=True,
                              num_workers=args.num_workers)
    val_loader = DataLoader(val_data,
                            batch_size=args.eval_batch_size,
                            shuffle=True,
                            drop_last=True,
                            num_workers=args.num_workers)
    test_loader = DataLoader(test_data,
                             batch_size=args.eval_batch_size,
                             shuffle=True,
                             drop_last=True,
                             num_workers=args.num_workers)
    print('Training Datasize:', len(train_data))

    start_epoch = 0
    best_acc1 = 0
    best_acc2 = 0
    best_acc3 = 0

    # set writer
    if args.load_path:
        print(f'=> resuming from {args.load_path}')
        assert os.path.exists(args.load_path)
        checkpoint_file = os.path.join(args.load_path, 'Model',
                                       'checkpoint_last.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)
        start_epoch = checkpoint['epoch']
        resnet.load_state_dict(checkpoint['resnet_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

        args.path_helper = checkpoint['path_helper']
        logger = create_logger(args.path_helper['log_path'])
        logger.info(
            f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})')
    else:
        # create new log dir
        assert args.exp_name
        args.path_helper = set_log_dir('logs', args.exp_name)
        logger = create_logger(args.path_helper['log_path'])

    logger.info(args)
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': start_epoch * len(train_loader),
        'valid_global_steps': start_epoch // args.val_freq,
    }

    start = time.time()
    for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)),
                      desc='total progress'):
        best_curr_acc1, best_curr_acc2, best_curr_acc3 = train(
            args, resnet, optimizer, criterion, train_loader, val_loader,
            epoch, writer_dict, best_acc1, best_acc2, best_acc3)

        best_acc1, best_acc2, best_acc3 = best_curr_acc1, best_curr_acc2, best_curr_acc3

        if epoch and epoch % args.val_freq == 0 or epoch == int(
                args.max_epoch) - 1:
            val_acc = get_val_acc(val_loader, resnet)
            logger.info(f'Validation Accuracy {val_acc} || @ epoch {epoch}.')

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'resnet_state_dict': resnet.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'path_helper': args.path_helper
            },
            False,
            False,
            False,
            args.path_helper['ckpt_path'],
            filename='checkpoint_last.pth')

    end = time.time()
    final_val_acc = get_val_acc(val_loader, resnet)
    final_test_acc = get_val_acc(test_loader, resnet)
    time_elapsed = end - start

    print('\n Final Validation Accuracy:', final_val_acc.data,
          '\n Final Test Accuracy:', final_test_acc.data, '\n Time Elapsed:',
          time_elapsed, 'seconds.')
Exemplo n.º 20
0
def main():
    # for reproduciblity
    random_seed = 2020
    random.seed(random_seed)
    np.random.seed(random_seed)
    torch.manual_seed(random_seed)
    torch.cuda.manual_seed(random_seed)
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    args = parse_args()
    reset_config(config, args)

    # model loading
    model = get_pose_net(config, is_train=True)
    # model = model.half()
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    logging.info(f"Training on CUDA: {torch.cuda.is_available()}")
    model = model.to("cuda" if torch.cuda.is_available() else "cpu")

    # Data loading process
    train_dataset = coco(
        config,
        config.DATASET.ROOT,
        config.DATASET.TRAIN_SET,
        is_train=True,
        is_eval=False,
        transform=tfms.RandomErasing(p=0.8, scale=(0.5, 0.5)),
    )

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.TRAIN.BATCH_SIZE,
        shuffle=True,
        drop_last=True)

    valid_dataset = coco(config,
                         config.DATASET.ROOT,
                         config.DATASET.TEST_SET,
                         is_train=False,
                         is_eval=True,
                         transform=None)

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE,
        shuffle=False,
        drop_last=False)

    start_epoch = config.TRAIN.BEGIN_EPOCH
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=5e-4,
                                 weight_decay=1e-5,
                                 eps=1e-5)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[90, 120],
                                                     gamma=0.1)
    losschecker = BestLossChecker()

    if config.MODEL.CHECKPOINT is not None:
        info = load_checkpoint(config.MODEL.CHECKPOINT)
        if info is not None:
            start_epoch, model_dic, optim_dic, sched_dic = info

            try:
                model.load_state_dict(model_dic)
                logging.info('Model Loaded.')

                optimizer.load_state_dict(optim_dic)
                logging.info('Optimizer Loaded.')

                if sched_dic is not None:
                    scheduler.load_state_dict(sched_dic)
                else:
                    scheduler.last_epoch = start_epoch
                scheduler.optimizer.load_state_dict(optim_dic)
                logging.info('Scheduler Loaded.')
                logging.info('All Weights Loaded...\n')
            except Exception as e:
                start_epoch = config.TRAIN.BEGIN_EPOCH
                logging.info('Model shape is different. Plz check.')
                logging.info('Starts with init weights...\n')

    end = time.time()
    logging.info('Training Ready\n')

    for epoch in range(start_epoch, config.TRAIN.END_EPOCH):
        if epoch == 10:
            config.TEST.FLIP_TEST = True

        if epoch % 5 == 0 and epoch != 0:
            result = evaluate(config, model, valid_loader)

            if epoch % 100 == 0 and epoch != 0:
                with open(f'{config.result_dir}/data.json', 'w') as f:
                    json.dump(result, f)

                os.makedirs(config.result_dir, exist_ok=True)
            valid_dataset.keypoint_eval(result, config.result_dir + '/pred')
            valid_dataset.keypoint_eval(
                './data/input_pose_path/keypoints_valid2017_results.json',
                config.result_dir + '/ori/')
            end = time.time()

        losses = train(config,
                       epoch=epoch,
                       loader=train_loader,
                       model=model,
                       optimizer=optimizer)
        total_loss, hm_loss, coord_loss = losses
        is_best = losschecker.update(epoch, total_loss, hm_loss, coord_loss)

        try:
            state_dict = model.module.state_dict()
        except Exception as e:
            state_dict = model.state_dict()

        save_checkpoint(
            {
                'epoch': epoch,
                'model': get_model_name(config),
                'state_dict': state_dict,
                'optimizer': optimizer.state_dict(),
            }, is_best, "./weights_2")

        scheduler.step()
        spent = time.time() - end
        hour = int(spent // 3600)
        min = int((spent - hour * 3600) // 60)
        second = (spent - hour * 3600 - min * 60)

        logging.info(
            f"Epoch {epoch} taken {hour:d}h{min:2d}m {second:2.3f}s\n")
        end = time.time()
Exemplo n.º 21
0
    logging.info('Reading train set')
    ncs = read_ncs(args.p2tc)

    logging.info('Creating vector for training instances')
    X, Y = get_vectors(ncs, gensim_w2v_model)
    if model_config.poly_degree > 1:
        X = get_poly_features(X, model_config.poly_degree)

    logging.info('Creating batches')
    in_batches, tar_batches = create_batch(X, Y, model_config.batch_size)

    logging.info('Creating the regression model')
    model, optimizer, criterion = build_model(X, Y)

    logging.info('Training')
    train(in_batches, tar_batches, model, model_config.nb_epochs, optimizer,
          criterion)

    logging.info('Calculating regression-based scores')
    reg_score = regression_score(eval_ncs, gensim_w2v_model, model)
    write_score(eval_ncs, reg_score, args.p2out + 'reg_scores.csv')

    print('Spearman rho bet. inv human score and regression',
          scipy.stats.spearmanr(reg_score, eval_scores_inv))
    print('Spearman rho bet. inv human score and additive score',
          scipy.stats.spearmanr(additive_score, eval_scores_inv))

    if args.rank == 'true':
        print('Ranking based on regression model: ',
              rank_with_score(eval_ncs, reg_score))
        print('Ranking based on additive model:  ',
              rank_with_score(eval_ncs, additive_score))
Exemplo n.º 22
0
from sklearn import tree
from variables import *
from functions import train,make_conversions,output_data,test

# train the program 
name,Pclass,Sex,SibSp,Parch,Embarked,Survived = train()
make_conversions(Pclass,Sex,SibSp,Parch,Embarked)

# test the program, this recieved the inputs from the test set
name,Pclass,Sex,SibSp,Parch,Embarked = test()
make_conversions(Pclass,Sex,SibSp,Parch,Embarked)

# fill x with all possible parameters for each person
for i in range(0,len(name)):
    z.append(pclass_conv[i])
    z.append(sex_conv[i])
    z.append(sibsp_conv[i])
    z.append(parch_conv[i])
    z.append(embarked_conv[i])
    x.append(z)
    z = []

# apply to sklearns function
y = Survived
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x,y)

# output a csv to compare results to the original training dataset
output_data(name,Pclass,Sex,SibSp,Parch,Survived,Embarked,clf)

Exemplo n.º 23
0
resnet.to(device)

criterion = nn.CrossEntropyLoss()

optimizer = optim.Adam(resnet.parameters(), lr = learning_rate)

all_train_loss=[]
all_train_acc = []
all_test_loss = []
all_test_acc = []

dynamics = []

for epoch in range(1, nb_epochs + 1):  
      
    train_l, train_a = train(resnet, train_loader, optimizer, criterion, epoch, batch_log, device) 
    train_l, train_a = test(resnet, train_loader, criterion, epoch, batch_log, device) 
    all_train_loss.append(train_l)
    all_train_acc.append(train_a)

    test_l, test_a = test(resnet, test_loader_sc, criterion, epoch, batch_log, device) 
    all_test_loss.append(test_l)
    all_test_acc.append(test_a)

    dynamics.append({
        "epoch": epoch,
        "train_loss": train_l,
        "train_acc": train_a,
        "test_loss": test_l,
        "test_acc": test_a
    })
Exemplo n.º 24
0
                    dest='dropout',
                    action='store',
                    type=int,
                    default=0.05)
# parser.add_argument('--lr' , dest='lr' , action ='store' , type = int, default= 0.001)
parser.add_argument('--save_directory',
                    dest='save_directory',
                    action='store',
                    default='./checkpoint.pth')

parser = parser.parse_args()
epochs = parser.epochs
lr = parser.lr
structure = parser.structure
dropout = parser.dropout
hidden_layer1 = parser.hidden_layer1
hidden_layer2 = parser.hidden_layer2
power = parser.gpu

train_loaders, valid_loaders, test_loaders = functions.read_data(data_dir)
## load the model
model, optimizer, criterion = functions.model_setup(structure, epochs, dropout,
                                                    hidden_layer1,
                                                    hidden_layer2, lr, power)
## train the model
functions.train(model, epochs, criterion, optimizer, train_loaders,
                valid_loaders, power)
## save the model
functions.save_checkpoint(path, hidden_layer1, hidden_layer2, dropout, lr,
                          epochs)
Exemplo n.º 25
0
#!/usr/bin/env python3
import sys
import numpy as np
from functions import train, test

#Load in files
training_file = sys.argv[1]
testing_file = sys.argv[2]

#Convert to 1D array if necessary
train_test = [np.loadtxt(training_file), np.loadtxt(testing_file)]
for i in range(0, 2):
    if len(train_test[i].shape) < 2:
        train_test[i] = np.array([train_test[i]])

tree = []
train(train_test[0], tree, 1)
#indices = np.argsort(tree,axis=0)
#for i in indices[:,0]:
#print(tree[i])

test(train_test[1], tree)
Exemplo n.º 26
0
    models = [
        kanazawa(f_in, ratio, nratio, srange=0),
        kanazawa(f_in, ratio, nratio, srange),
        kanazawa_big(f_in, ratio, nratio, srange=0),
        kanazawa(f_in, ratio=2**(1 / 3), nratio=6, srange=0),
        kanazawa(f_in, ratio=2**(1 / 3), nratio=6, srange=2)
    ]

    for m, model in enumerate(models):
        print("trial {}, model {}".format(ii, m))
        model.to(device)
        model_log = open("mnist_trained_model_{}_{}_k.pickle".format(m, ii),
                         "wb")

        for epoch in range(1, nb_epochs + 1):
            train_l, train_a = train(model, train_loader, learning_rate,
                                     criterion, epoch, batch_log, device)
            train_l, train_a = test(model, train_loader, criterion, epoch,
                                    batch_log, device)
            dyn = {"epoch": epoch, "train_loss": train_l, "train_acc": train_a}
            pickle.dump(dyn, model_log)
        pickle.dump(model, model_log)
        model_log.close()

        #lists of last test loss and acc for each scale with model ii
        s_test_loss = []
        s_test_acc = []
        for s in scales:
            test_transf = transforms.Compose([
                transforms.Resize(40),
                RandomRescale(size=40, scales=(s, s), sampling="uniform"),
                transforms.ToTensor(),
Exemplo n.º 27
0
def main():
    args = parse_args()
    update_config(cfg, args)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)

    # Loss
    criterion = CrossEntropyLoss(cfg.MODEL.NUM_CLASSES).cuda()

    # model and optimizer
    print(f"Definining network with {cfg.MODEL.LAYERS} layers...")
    model = Network(cfg.MODEL.INIT_CHANNELS, cfg.MODEL.NUM_CLASSES, cfg.MODEL.LAYERS, criterion, primitives_2,
                    drop_path_prob=cfg.TRAIN.DROPPATH_PROB)
    model = model.cuda()

    # weight params
    arch_params = list(map(id, model.arch_parameters()))
    weight_params = filter(lambda p: id(p) not in arch_params,
                           model.parameters())

    # Optimizer
    optimizer = optim.Adam(
        weight_params,
        lr=cfg.TRAIN.LR
    )

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint_best.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)

        # load checkpoint
        begin_epoch = checkpoint['epoch']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_acc1 = checkpoint['best_acc1']
        optimizer.load_state_dict(checkpoint['optimizer'])
        args.path_helper = checkpoint['path_helper']

        logger = create_logger(args.path_helper['log_path'])
        logger.info("=> loaded checkpoint '{}'".format(checkpoint_file))
    else:
        exp_name = args.cfg.split('/')[-1].split('.')[0]
        args.path_helper = set_path('logs_search', exp_name)
        logger = create_logger(args.path_helper['log_path'])
        begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        best_acc1 = 0.0
        last_epoch = -1

    logger.info(args)
    logger.info(cfg)

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(
        os.path.join(this_dir, 'models', cfg.MODEL.NAME + '.py'),
        args.path_helper['ckpt_path'])

    # Datasets and dataloaders

    # The toy dataset is downloaded with 10 items for each partition. Remove the sample_size parameters to use the full toy dataset
    asv_train, asv_dev, asv_eval = asv_toys(sample_size=10)


    train_dataset = asv_train #MNIST('mydata', transform=totensor, train=True, download=True)
    val_dataset = asv_dev #MNIST('mydata', transform=totensor, train=False, download=True)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    print(f'search.py: Train loader of {len(train_loader)} batches')
    print(f'Tot train set: {len(train_dataset)}')
    val_loader = torch.utils.data.DataLoader(
        dataset=val_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    print(f'search.py: Val loader of {len(val_loader)} batches')
    print(f'Tot val set {len(val_dataset)}')
    test_dataset = asv_eval #MNIST('mydata', transform=totensor, train=False, download=True)
    test_loader = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )

    # training setting
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': begin_epoch * len(train_loader),
        'valid_global_steps': begin_epoch // cfg.VAL_FREQ,
    }

    # training loop
    architect = Architect(model, cfg)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, cfg.TRAIN.END_EPOCH, cfg.TRAIN.LR_MIN,
        last_epoch=last_epoch
    )

    for epoch in tqdm(range(begin_epoch, cfg.TRAIN.END_EPOCH), desc='search progress'):
        model.train()

        genotype = model.genotype()
        logger.info('genotype = %s', genotype)

        if cfg.TRAIN.DROPPATH_PROB != 0:
            model.drop_path_prob = cfg.TRAIN.DROPPATH_PROB * epoch / (cfg.TRAIN.END_EPOCH - 1)

        train(cfg, model, optimizer, train_loader, val_loader, criterion, architect, epoch, writer_dict)

        if epoch % cfg.VAL_FREQ == 0:
            # get threshold and evaluate on validation set
            acc = validate_identification(cfg, model, test_loader, criterion)

            # remember best acc@1 and save checkpoint
            is_best = acc > best_acc1
            best_acc1 = max(acc, best_acc1)

            # save
            logger.info('=> saving checkpoint to {}'.format(args.path_helper['ckpt_path']))
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
                'arch': model.arch_parameters(),
                'genotype': genotype,
                'path_helper': args.path_helper
            }, is_best, args.path_helper['ckpt_path'], 'checkpoint_{}.pth'.format(epoch))

        lr_scheduler.step(epoch)
Exemplo n.º 28
0
from functions import load_data, train, evaluate, plot_result
from keras.models import Sequential
from keras.layers import Dense, Dropout, LeakyReLU

(x_train, y_train), (x_test, y_test) = load_data()

model = Sequential()
model.add(Dense(784, activation='relu', input_shape=x_train.shape[1:]))
model.add(Dense(128))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(64))
model.add(LeakyReLU(alpha=0.01))
model.add(Dropout(rate=0.25))
model.add(Dense(128))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(64))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(10, activation='softmax'))

model.compile(
    loss='categorical_crossentropy',
    optimizer='sgd', 
    metrics=['accuracy']
)

history = train(model, x_train, y_train)
plot_result(history)
evaluate(model, x_test, y_test)
Exemplo n.º 29
0
    test_iter = gdata.DataLoader(
        gdata.ArrayDataset(*d2l.preprocess_imdb(test_data, vocab)), batch_size)
    ## 4.指定参数并加载模型
    embed_size, num_hiddens, num_layers, ctx = 100, 200, 2, d2l.try_all_gpus()
    net = BiRNN(vocab, embed_size, num_hiddens, num_layers)  #实例化一个双向RNN
    net.initialize(init.Xavier(), ctx=ctx)  #对模型进行初始化
    ## 4.1 加载词向量预训练集
    print("加载预训练的词向量,若电脑没有预训练数据集将会自动从网上下载")
    glove_embedding = text.embedding.create(
        'glove', pretrained_file_name='glove.6B.100d.txt',
        vocabulary=vocab)  #其中的100指的是词向量长度为100
    print("加载与训练的词向量 ok")
    ## 4.2 初试化模型参数
    net.embedding.weight.set_data(glove_embedding.idx_to_vec)
    net.embedding.collect_params().setattr('grad_req', 'null')

    ## 5. 指定损失函数下降速度,训练轮数,并训练模型
    lr, num_epochs = 0.01, 5
    trainer = gluon.Trainer(net.collect_params(), 'adam',
                            {'learning_rate': lr})
    loss = gloss.SoftmaxCrossEntropyLoss()
    loss_list, train_acc_list, test_acc_list, time_list = d2l.train(
        train_iter, test_iter, net, loss, trainer, ctx, num_epochs)

    ## 5.2 打印损失函数下降图
    plt.plot(loss_list)  # 损失函数下降数组
    plt.legend("loss")  # 可省略,图像上的字(可认为标题)设置loss
    plt.show()  # 显示图像
    # 6. 调用predict进行测试
    test_str = "this movie is so great"  # 测试一条语句看看是
    print("测试语句判断结果是:", predict(net, vocab, test_str))
Exemplo n.º 30
0
import functions
import classes

pic_file = "../data/monalisa.jpg"

s_o_m = functions.initialize_som()

image = classes.Dataset(pic_file)

functions.train(image,s_o_m)

functions.save_som(s_o_m,pic_file)

functions.draw(s_o_m)

functions.reproduce(image,s_o_m,image.size[0],image.size[1])

# import after database updated with new SOM
import db_check

db_check.similar_som()
Exemplo n.º 31
0
def main():
    args = cfg.parse_args()
    torch.cuda.manual_seed(args.random_seed)
    torch.cuda.manual_seed_all(args.random_seed)
    np.random.seed(args.random_seed)
    random.seed(args.random_seed)
    torch.backends.cudnn.deterministic = True

    # set tf env
    _init_inception()
    inception_path = check_or_download_inception(None)
    create_inception_graph(inception_path)

    # epoch number for dis_net
    dataset = datasets.ImageDataset(args, cur_img_size=8)
    train_loader = dataset.train
    if args.max_iter:
        args.max_epoch = np.ceil(args.max_iter / len(train_loader))
    else:
        args.max_iter = args.max_epoch * len(train_loader)
    args.max_epoch = args.max_epoch * args.n_critic

    # import network
    gen_net = eval('models.' + args.gen_model + '.Generator')(args=args).cuda()
    dis_net = eval('models.' + args.dis_model +
                   '.Discriminator')(args=args).cuda()
    gen_net.set_arch(args.arch, cur_stage=2)

    # weight init
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            if args.init_type == 'normal':
                nn.init.normal_(m.weight.data, 0.0, 0.02)
            elif args.init_type == 'orth':
                nn.init.orthogonal_(m.weight.data)
            elif args.init_type == 'xavier_uniform':
                nn.init.xavier_uniform_(m.weight.data, 1.)
            else:
                raise NotImplementedError('{} unknown inital type'.format(
                    args.init_type))
        elif classname.find('BatchNorm2d') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0.0)

    gen_net.apply(weights_init)
    dis_net.apply(weights_init)

    gpu_ids = [i for i in range(int(torch.cuda.device_count()))]
    gen_net = torch.nn.DataParallel(gen_net.to("cuda:0"), device_ids=gpu_ids)
    dis_net = torch.nn.DataParallel(dis_net.to("cuda:0"), device_ids=gpu_ids)

    gen_net.module.cur_stage = 0
    dis_net.module.cur_stage = 0
    gen_net.module.alpha = 1.
    dis_net.module.alpha = 1.

    # set optimizer
    if args.optimizer == "adam":
        gen_optimizer = torch.optim.Adam(
            filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr,
            (args.beta1, args.beta2))
        dis_optimizer = torch.optim.Adam(
            filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr,
            (args.beta1, args.beta2))
    elif args.optimizer == "adamw":
        gen_optimizer = AdamW(filter(lambda p: p.requires_grad,
                                     gen_net.parameters()),
                              args.g_lr,
                              weight_decay=args.wd)
        dis_optimizer = AdamW(filter(lambda p: p.requires_grad,
                                     dis_net.parameters()),
                              args.g_lr,
                              weight_decay=args.wd)
    gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0,
                                  args.max_iter * args.n_critic)
    dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0,
                                  args.max_iter * args.n_critic)

    # fid stat
    if args.dataset.lower() == 'cifar10':
        fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
    elif args.dataset.lower() == 'stl10':
        fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz'
    elif args.fid_stat is not None:
        fid_stat = args.fid_stat
    else:
        raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
    assert os.path.exists(fid_stat)

    # initial
    fixed_z = torch.cuda.FloatTensor(
        np.random.normal(0, 1, (64, args.latent_dim)))
    gen_avg_param = copy_params(gen_net)
    start_epoch = 0
    best_fid = 1e4

    # set writer
    if args.load_path:
        print(f'=> resuming from {args.load_path}')
        assert os.path.exists(args.load_path)
        checkpoint_file = os.path.join(args.load_path)
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)
        start_epoch = checkpoint['epoch']
        best_fid = checkpoint['best_fid']
        gen_net.load_state_dict(checkpoint['gen_state_dict'])
        dis_net.load_state_dict(checkpoint['dis_state_dict'])
        gen_optimizer.load_state_dict(checkpoint['gen_optimizer'])
        dis_optimizer.load_state_dict(checkpoint['dis_optimizer'])
        #         avg_gen_net = deepcopy(gen_net)
        #         avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
        gen_avg_param = checkpoint['gen_avg_param']
        #         del avg_gen_net
        cur_stage = cur_stages(start_epoch, args)
        gen_net.module.cur_stage = cur_stage
        dis_net.module.cur_stage = cur_stage
        gen_net.module.alpha = 1.
        dis_net.module.alpha = 1.

        args.path_helper = checkpoint['path_helper']

    else:
        # create new log dir
        assert args.exp_name
        args.path_helper = set_log_dir('logs', args.exp_name)

    logger = create_logger(args.path_helper['log_path'])
    logger.info(args)
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': start_epoch * len(train_loader),
        'valid_global_steps': start_epoch // args.val_freq,
    }

    def return_states():
        states = {}
        states['epoch'] = epoch
        states['best_fid'] = best_fid_score
        states['gen_state_dict'] = gen_net.state_dict()
        states['dis_state_dict'] = dis_net.state_dict()
        states['gen_optimizer'] = gen_optimizer.state_dict()
        states['dis_optimizer'] = dis_optimizer.state_dict()
        states['gen_avg_param'] = gen_avg_param
        states['path_helper'] = args.path_helper
        return states

    # train loop

    for epoch in range(start_epoch + 1, args.max_epoch):
        train(
            args,
            gen_net,
            dis_net,
            gen_optimizer,
            dis_optimizer,
            gen_avg_param,
            train_loader,
            epoch,
            writer_dict,
            fixed_z,
        )
        backup_param = copy_params(gen_net)
        load_params(gen_net, gen_avg_param)
        fid_score = validate(
            args,
            fixed_z,
            fid_stat,
            epoch,
            gen_net,
            writer_dict,
        )
        logger.info(f'FID score: {fid_score} || @ epoch {epoch}.')
        load_params(gen_net, backup_param)
        is_best = False
        if epoch == 1 or fid_score < best_fid_score:
            best_fid_score = fid_score
            is_best = True
        if is_best or epoch % 1 == 0:
            states = return_states()
            save_checkpoint(states,
                            is_best,
                            args.path_helper['ckpt_path'],
                            filename=f'checkpoint_epoch_{epoch}.pth')