Example #1
0
def main(args):

    # Image preprocessing
    transform = transforms.Compose([ 
        transforms.ToTensor(), 
        transforms.Normalize((0.033, 0.032, 0.033), 
                             (0.027, 0.027, 0.027))])

    # Load vocabulary wrapper
    with open(args.vocab_path, 'rb') as f:
        vocab = pickle.load(f)

    # Build Models
    #encoder = AttnEncoder(ResidualBlock, [3, 3, 3])
    encoder = ResNet(ResidualBlock, [3, 3, 3], args.embed_size)
    encoder.eval()  # evaluation mode (BN uses moving mean/variance)
    # decoder = AttnDecoderRnn(args.feature_size, args.hidden_size, 
    #                     len(vocab), args.num_layers)
    decoder = DecoderRNN(args.embed_size, args.hidden_size, 
                         len(vocab), args.num_layers)

    print('load')

    # Load the trained model parameters
    encoder.load_state_dict(torch.load(args.encoder_path))
    decoder.load_state_dict(torch.load(args.decoder_path))

    print('load')

    # If use gpu
    if torch.cuda.is_available():
        encoder.cuda(1)
        decoder.cuda(1)


    trg_bitmap_dir = args.root_path + 'bitmap/'
    save_directory = 'predict_base/'
    svg_from_out = args.root_path + save_directory + 'svg/'   # svg from output caption 
    bitmap_from_out = args.root_path + save_directory + 'bitmap/'   #bitmap from out caption 

    if not os.path.exists(bitmap_from_out):
        os.makedirs(bitmap_from_out)
    if not os.path.exists(svg_from_out):
        os.makedirs(svg_from_out)

    test_list = os.listdir(trg_bitmap_dir)
    for i, fname in enumerate(test_list): 
        print(fname)
        test_path = trg_bitmap_dir + fname
        test_image = load_image(test_path, transform)
        image_tensor = to_var(test_image)
        in_sentence = gen_caption_from_image(image_tensor, encoder, decoder, vocab)
        print(in_sentence)
        image_matrix = cv2.imread(test_path)
        doc = gen_svg_from_predict(in_sentence.split(' '), image_matrix)

        with open(os.path.join(svg_from_out, fname.split('.')[0]+'.svg'), 'w+') as f:
            f.write(doc)
        cairosvg.svg2png(url=svg_from_out+ fname.split('.')[0] + '.svg', write_to= bitmap_from_out+fname)
Example #2
0
def face_train():
    MAX_EPOCH = 30
    LR = 0.002

    net = ResNet(FACE_CLASS_NUM)
    if USE_GPU:
        net.cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)
    train_curve = []
    for epoch in range(MAX_EPOCH):
        correct = 0
        loss_mean = 0
        total = 0
        net.train()
        for i, data in enumerate(train_loader):
            inputs, labels = data
            if USE_GPU:
                inputs, labels = inputs.cuda(), labels.cuda()
            outputs = net(inputs)
            optimizer.zero_grad()
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            if USE_GPU:
                correct += (predicted == labels).squeeze().sum().cpu().numpy()
            else:
                correct += (predicted == labels).squeeze().sum().numpy()
            loss_mean += loss.item()
            if (i + 1) % 20 == 0:
                loss_mean = loss_mean / 20
                train_curve.append(loss_mean)
                print(
                    'Training: Epoch [{}/{}], Iteration[{}/{}], Loss:{:.4f}, acc:{:.4f} '
                    .format(epoch + 1, MAX_EPOCH, i + 1, len(train_loader),
                            loss_mean, correct / total))
                loss_mean = 0

    torch.save(net, FACE_MODEL_PATH)
    try:
        torch.save(net.state_dict(), './model/face2.pkl')
    except Exception as e:
        print(e)
        print('error')

    train_x = range(len(train_curve))
    train_y = train_curve

    plt.plot(train_x, train_y, label='Train')
    plt.legend(loc='upper right')
    plt.ylabel('loss value')
    plt.xlabel('Iteration')
    plt.show()
Example #3
0
def main(args):
    # create model
    if "resnet" in args.backbone or "resnext" in args.backbone:
        print('resnet', args.att)
        model = ResNet(args)
    elif 'b' in args.backbone:
        model = EfficientNet.from_pretrained(f'efficientnet-{args.backbone}',
                                             8)

    if args.input_level == 'per-study':
        # add decoder if train per-study
        if args.conv_lstm:
            decoder = ConvDecoder(args)
        else:
            decoder = Decoder(args)

        encoder = model
        model = (encoder, decoder)

    if args.input_level == 'per-study':
        model[0].cuda(), model[1].cuda()
    else:
        model = model.cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))

            checkpoint = torch.load(args.resume, "cpu")
            input_level = checkpoint['input_level']
            assert input_level == args.input_level
            if args.input_level == 'per-study':
                encoder, decoder = model
                load_state_dict(checkpoint.pop('encoder'), encoder)
                load_state_dict(checkpoint.pop('decoder'), decoder)
            else:
                load_state_dict(checkpoint.pop('state_dict'), model)

            # load_state_dict(checkpoint.pop('state_dict'), model)
            epoch = checkpoint['epoch']
            best_loss = checkpoint['best_loss']
            print(
                f"=> loaded checkpoint '{args.resume}' (loss {best_loss:.4f}@{epoch})"
            )
        else:
            raise ValueError("=> no checkpoint found at '{}'".format(
                args.resume))

    # if args.to_stack:
    #     loader = get_test_dl(args)
    #     to_submit(args, model, loader)
    # else:
    if args.val:
        val_dl = get_val_dl(args)
        to_stacking_on_val(args, model, val_dl)
    else:
        test_dl = get_test_dl(args)
        to_stacking_on_test(args, model, test_dl)
Example #4
0
def main():
    parameters = get_parameter()
    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100


    trainset = dataloader(root=args.dataroot, train=True, download=True, transform=transform_train)
    trainloader = data.DataLoader(dataset=trainset, batch_size=parameters['batch_size'], shuffle=False)

    testset = dataloader(root=args.dataroot, train=False, download=False, transform=transform_test)
    testloader = data.DataLoader(testset, batch_size=parameters['batch_size'], shuffle=False, num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format("Resnet"))
    model = ResNet(depth=args.depth, num_classes=num_classes)
    model = model.cuda() 
    print('Model on cuda')
    cudnn.benchmark = True
    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=parameters['lr'], momentum=parameters['momentum'], weight_decay=args.weight_decay)


    # Train and val
    result = 0.0
    for epoch in range(parameters['epoch']):
        adjust_learning_rate(optimizer, epoch)
        train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
        print('Epoch[{}/{}]: LR: {:.3f}, Train loss: {:.5f}, Test loss: {:.5f}, Train acc: {:.2f}, Test acc: {:.2f}.'.format(epoch+1, parameters['epoch'], state['lr'], 
        train_loss, test_loss, train_acc, test_acc))
        report_intermediate_result(float(test_acc))
        report_loss(float(test_loss))
        result = test_acc
        # print('Rank:{} Epoch[{}/{}]: LR: {:.3f}, Train loss: {:.5f}, Train acc: {:.2f}'.format(dist.get_rank(),epoch+1, args.epochs, state['lr'],train_loss, train_acc))
    report_final_result(float(result))
Example #5
0
def train_overview(train_dataloader, val_dataloader):
    global output_train, output_val

    output_dir = create_train_output_dir()
    output_train = os.path.join(output_dir, 'train.csv')
    output_val = os.path.join(output_dir, 'val.csv')

    with open(output_train, 'w') as train_csv:
        train_csv.write('{}\n'.format(','.join(fieldnames)))
    with open(output_val, 'w') as val_csv:
        val_csv.write('{}\n'.format(','.join(fieldnames)))

    print('Creating output dir {}'.format(output_dir))

    print("=> creating Model ({}-{}) ...".format(args.encoder, args.decoder))
    model = ResNet(args.encoder,
                   args.decoder,
                   args.dims,
                   args.output_size,
                   pre_trained=True)

    print("=> model created.")
    optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, \
     momentum=args.momentum, weight_decay=args.weight_decay)

    # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
    model = model.cuda()

    # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = criteria.MaskedL2Loss().cuda()
    elif args.criterion == 'l1':
        criterion = criteria.MaskedL1Loss().cuda()

    for epoch in range(args.n_epochs):
        utils.modify_learning_rate(optimizer, epoch, args.learning_rate)
        train(train_dataloader, model, criterion, optimizer,
              epoch)  # train for one epoch
        result = validate(val_dataloader, model)  # evaluate on validation set

        utils.save_checkpoint(
            {
                'args': args,
                'epoch': epoch,
                'encoder': args.encoder,
                'model': model,
                'optimizer': optimizer,
            }, epoch, output_dir)
Example #6
0
def main(args):
    # Create model directory
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)

    # Image preprocessing
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.033, 0.032, 0.033), (0.027, 0.027, 0.027))
    ])

    # Build vocab
    vocab = build_vocab(args.root_path, threshold=0)
    vocab_path = args.vocab_path
    with open(vocab_path, 'wb') as f:
        pickle.dump(vocab, f)
    len_vocab = vocab.idx
    print(vocab.idx2word)

    # Build data loader
    data_loader = get_loader(args.root_path,
                             vocab,
                             transform,
                             args.batch_size,
                             shuffle=True,
                             num_workers=args.num_workers)

    # Build the models
    encoder = ResNet(ResidualBlock, [3, 3, 3], args.embed_size)
    decoder = DecoderRNN(args.embed_size, args.hidden_size, len(vocab),
                         args.num_layers)

    #Build atten models
    if torch.cuda.is_available():
        encoder.cuda(1)
        decoder.cuda(1)

    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    params = list(decoder.parameters()) + list(encoder.parameters())
    optimizer = torch.optim.Adam(params, lr=args.learning_rate)

    # Train the Models
    total_step = len(data_loader)
    for epoch in range(args.num_epochs):
        for i, (images, captions, lengths) in enumerate(data_loader):

            # make one hot
            # cap_ = torch.unsqueeze(captions,2)
            # one_hot_ = torch.FloatTensor(captions.size(0),captions.size(1),len_vocab).zero_()
            # one_hot_caption = one_hot_.scatter_(2, cap_, 1)

            # Set mini-batch dataset
            images = to_var(images)
            captions = to_var(captions)
            #captions_ = to_var(one_hot_caption)

            targets = pack_padded_sequence(captions, lengths,
                                           batch_first=True)[0]
            # Forward, Backward and Optimize
            optimizer.zero_grad()
            features = encoder(images)
            outputs = decoder(features, captions, lengths)

            captions = captions.view(-1)
            outputs = outputs.view(-1, len_vocab)

            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

            #print(targets)
            #print(outputs)

            # Print log info
            if i % args.log_step == 0:
                print(
                    'Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
                    % (epoch, args.num_epochs, i, total_step, loss.data[0],
                       np.exp(loss.data[0])))

                #test set accuracy
                #print(outputs.max(1)[1])
                outputs_np = outputs.max(1)[1].cpu().data.numpy()
                targets_np = targets.cpu().data.numpy()

                print(outputs_np)
                print(targets_np)

                location_match = 0
                size_match = 0
                shape_match = 0
                exact_match = 0
                for i in range(len(targets_np)):
                    if outputs_np[i] == targets_np[i]:
                        exact_match += 1
                    if i >= args.batch_size and i < args.batch_size * 2 and outputs_np[
                            i] == targets_np[i]:
                        shape_match += 1
                    elif i >= args.batch_size * 2 and i < args.batch_size * 3 and outputs_np[
                            i] == targets_np[i]:
                        location_match += 1
                    elif i >= args.batch_size * 3 and i < args.batch_size * 4 and outputs_np[
                            i] == targets_np[i]:
                        size_match += 1

                print(
                    'location match : %.4f, shape match : %.4f, exact_match: %.4f'
                    % (location_match / (args.batch_size), shape_match /
                       args.batch_size, exact_match / len(targets_np)))

            # Save the models
            if (i + 1) % args.save_step == 0:
                torch.save(
                    decoder.state_dict(),
                    os.path.join(args.model_path,
                                 'decoder-%d-%d.pkl' % (epoch + 1, i + 1)))
                torch.save(
                    encoder.state_dict(),
                    os.path.join(args.model_path,
                                 'encoder-%d-%d.pkl' % (epoch + 1, i + 1)))
Example #7
0
                        num_workers=opt.threads,
                        batch_size=4,
                        shuffle=False)

print('===> Building model...')
model = ResNet(layers=18,
               decoder='deconv2',
               output_size=(240, 960),
               in_channels=4,
               pretrained=True)
model = nn.DataParallel(model, device_ids=opt.device_ids)  #multi-GPU
criterion_mse = MaskedMSELoss()
criterion_depth = MaskedL1Loss()

if torch.cuda.is_available():
    model = model.cuda()
    criterion_mse = criterion_mse.cuda()
    criterion_depth = criterion_depth.cuda()

model.module.train()
print(model)

print('===> Parameters:', sum(param.numel() for param in model.parameters()))

print('===> Initialize Optimizer...')
optimizer = optim.SGD(model.parameters(),
                      lr=opt.lr,
                      momentum=opt.momentum,
                      weight_decay=opt.weight_decay)

print('===> Initialize Logger...')
Example #8
0
def main(args):
    # Model settings
    model = ResNet()
    if args.cuda:
        model = model.cuda()
    optimizer = optim.Adam(model.parameters(), args.lr, weight_decay=args.wd)
    if args.ckpt > 0:
        ckpt_name = 'resnet152'
        if args.poison:
            ckpt_name += '-poison'
        ckpt_name += '-' + str(args.ckpt) + '.pkl'
        ckpt_path = os.path.join('./ckpt', ckpt_name)
        print('Loading checkpoint from {}'.format(ckpt_path))
        dct = torch.load(ckpt_path)
        model.load_state_dict(dct['model'])
        optimizer.load_state_dict(dct['optim'])

    # Data loader settings
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Resize((64, 64)),
        transforms.Normalize((.5, .5, .5), (.5, .5, .5)),
    ])
    aug_transform = transforms.Compose([
        transforms.RandomChoice([
            # do nothing
            transforms.Compose([]),
            # horizontal flip
            transforms.RandomHorizontalFlip(1.),
            # random crop
            transforms.RandomResizedCrop(64),
            # rotate
            transforms.RandomRotation(30)
        ]),
        transforms.ToTensor(),
        transforms.Resize((64, 64)),
        transforms.Normalize((.5, .5, .5), (.5, .5, .5)),
    ])
    task_dir = '/data/csnova1/benchmarks/%s' % args.task
    poison_dir = '/data/csnova1/poison'
    poison_config = get_poison_config()
    if args.task == "cifar10":
        Loader = CIFAR10Loader
        PoisonedILoader = PoisonedCIFAR10Loader
    train_loader = Loader(root=task_dir,
                          batch_size=args.batch_size,
                          split='train',
                          transform=aug_transform)
    test_loader = PoisonedILoader(root=task_dir,
                                  poison_root=poison_dir,
                                  poison_config=poison_config,
                                  poison_num=6,
                                  batch_size=args.batch_size,
                                  split="val",
                                  transform=transform)

    # Start
    if args.run == "train":
        train(args, train_loader, model, optimizer)
    elif args.run == "test":
        evaluate(args, test_loader, model)
Example #9
0
])

# CIFAR-10 Dataset
train_dataset = dsets.CIFAR10(root='../datasets/',
                              train=True,
                              transform=transform,
                              download=True)

# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

# Model
resnet = ResNet(block=ResidualBlock, layers=[2, 2, 2])
resnet = resnet.cuda()

# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(resnet.parameters(), lr=lr)

# Training
for epoch in range(epochs):
    for i, (images, labels) in enumerate(train_loader):
        images = images.cuda()
        labels = labels.cuda()
        images = Variable(images)
        labels = Variable(labels)

        # Forward + Backward + Optimize
        optimizer.zero_grad()
Example #10
0
                      '--load',
                      dest='load',
                      default=False,
                      help='load file model')

    (options, args) = parser.parse_args()
    return options


if __name__ == '__main__':

    args = get_args()

    net = ResNet()

    if args.load:
        if args.gpu:
            net.load_state_dict(torch.load(args.load))
        else:
            net.load_state_dict(torch.load(args.load, map_location='cpu'))
        print('Model loaded from %s' % (args.load))

    if args.gpu:
        net.cuda()
        cudnn.benchmark = True

    train_net(net=net,
              epochs=args.epochs,
              gpu=args.gpu,
              data_dir=args.data_dir)
Example #11
0
class face_learner(object):
    def __init__(self, conf):
        print(conf)
        self.model = ResNet()
        self.model.cuda()
        if conf.initial:
            self.model.load_state_dict(torch.load("models/"+conf.model))
            print('Load model_ir_se101.pth')
        self.milestones = conf.milestones
        self.loader, self.class_num = get_train_loader(conf)
        self.total_class = 16520
        self.data_num = 285356
        self.writer = SummaryWriter(conf.log_path)
        self.step = 0
        self.paras_only_bn, self.paras_wo_bn = separate_bn_paras(self.model)

        if conf.meta:
            self.head = Arcface(embedding_size=conf.embedding_size, classnum=self.total_class)
            self.head.cuda()
            if conf.initial:
                self.head.load_state_dict(torch.load("models/head_op.pth"))
                print('Load head_op.pth')
            self.optimizer = RAdam([
                {'params': self.paras_wo_bn + [self.head.kernel], 'weight_decay': 5e-4},
                {'params': self.paras_only_bn}
            ], lr=conf.lr)
            self.meta_optimizer = RAdam([
                {'params': self.paras_wo_bn + [self.head.kernel], 'weight_decay': 5e-4},
                {'params': self.paras_only_bn}
            ], lr=conf.lr)
            self.head.train()
        else:
            self.head = dict()
            self.optimizer = dict()
            for race in races:
                self.head[race] = Arcface(embedding_size=conf.embedding_size, classnum=self.class_num[race])
                self.head[race].cuda()
                if conf.initial:
                    self.head[race].load_state_dict(torch.load("models/head_op_{}.pth".format(race)))
                    print('Load head_op_{}.pth'.format(race))
                self.optimizer[race] = RAdam([
                    {'params': self.paras_wo_bn + [self.head[race].kernel], 'weight_decay': 5e-4},
                    {'params': self.paras_only_bn}
                ], lr=conf.lr, betas=(0.5, 0.999))
                self.head[race].train()
            # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=40, verbose=True)

        self.board_loss_every = min(len(self.loader[race]) for race in races) // 10
        self.evaluate_every = self.data_num // 5
        self.save_every = self.data_num // 2
        self.eval, self.eval_issame = get_val_data(conf)

    def save_state(self, conf, accuracy, extra=None, model_only=False, race='All'):
        save_path = 'models/'
        torch.save(
            self.model.state_dict(), save_path +
                                     'model_{}_accuracy-{}_step-{}_{}_{}.pth'.format(get_time(), accuracy, self.step,
                                                                                  extra, race))
        if not model_only:
            if conf.meta:
                torch.save(
                    self.head.state_dict(), save_path +
                                        'head_{}_accuracy-{}_step-{}_{}_{}.pth'.format(get_time(), accuracy, self.step,
                                                                                    extra, race))
                #torch.save(
                #    self.optimizer.state_dict(), save_path +
                #                             'optimizer_{}_accuracy-{}_step-{}_{}_{}.pth'.format(get_time(), accuracy,
                #                                                                              self.step, extra, race))
            else:
                torch.save(
                    self.head[race].state_dict(), save_path +
                                            'head_{}_accuracy-{}_step-{}_{}_{}.pth'.format(get_time(), accuracy,
                                                                                           self.step,
                                                                                           extra, race))
                #torch.save(
                #    self.optimizer[race].state_dict(), save_path +
                 #                                'optimizer_{}_accuracy-{}_step-{}_{}_{}.pth'.format(get_time(),
                #                                                                                     accuracy,
                #                                                                                     self.step, extra,
                #                                                                                     race))

    def load_state(self, conf, fixed_str, model_only=False):
        save_path = 'models/'
        self.model.load_state_dict(torch.load(save_path + conf.model))
        if not model_only:
            self.head.load_state_dict(torch.load(save_path + conf.head))
            self.optimizer.load_state_dict(torch.load(save_path + conf.optim))

    def board_val(self, db_name, accuracy, best_threshold, roc_curve_tensor):
        self.writer.add_scalar('{}_accuracy'.format(db_name), accuracy, self.step)
        self.writer.add_scalar('{}_best_threshold'.format(db_name), best_threshold, self.step)
        self.writer.add_image('{}_roc_curve'.format(db_name), roc_curve_tensor, self.step)

        # self.writer.add_scalar('{}_val:true accept ratio'.format(db_name), val, self.step)
        # self.writer.add_scalar('{}_val_std'.format(db_name), val_std, self.step)
        # self.writer.add_scalar('{}_far:False Acceptance Ratio'.format(db_name), far, self.step)

    def evaluate(self, conf, carray, issame, nrof_folds=5, tta=False):
        self.model.eval()
        idx = 0
        entry_num = carray.size()[0]
        embeddings = np.zeros([entry_num, conf.embedding_size])
        with torch.no_grad():
            while idx + conf.batch_size <= entry_num:
                batch = carray[idx:idx + conf.batch_size]
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.cuda()) + self.model(fliped.cuda())
                    embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch).cpu().detach().numpy()
                else:
                    embeddings[idx:idx + conf.batch_size] = self.model(batch.cuda()).cpu().detach().numpy()
                idx += conf.batch_size
            if idx < entry_num:
                batch = carray[idx:]
                if tta:
                    fliped = hflip_batch(batch)
                    emb_batch = self.model(batch.cuda()) + self.model(fliped.cuda())
                    embeddings[idx:] = l2_norm(emb_batch).cpu().detach().numpy()
                else:
                    embeddings[idx:] = self.model(batch.cuda()).cpu().detach().numpy()
        tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame, nrof_folds)
        buf = gen_plot(fpr, tpr)
        roc_curve = Image.open(buf)
        roc_curve_tensor = trans.ToTensor()(roc_curve)
        return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor

    def train_finetuning(self, conf, epochs, race):
        self.model.train()
        running_loss = 0.
        for e in range(epochs):
            print('epoch {} started'.format(e))
            '''
            if e == self.milestones[0]:
                for ra in races:
                    for params in self.optimizer[ra].param_groups:
                        params['lr'] /= 10
            if e == self.milestones[1]:
                for ra in races:
                    for params in self.optimizer[ra].param_groups:
                        params['lr'] /= 10
            if e == self.milestones[2]:
                for ra in races:
                    for params in self.optimizer[ra].param_groups:
                        params['lr'] /= 10
            '''
            for imgs, labels in tqdm(iter(self.loader[race])):
                imgs = imgs.cuda()
                labels = labels.cuda()
                self.optimizer[race].zero_grad()
                embeddings = self.model(imgs)
                thetas = self.head[race](embeddings, labels)
                loss = conf.ce_loss(thetas, labels)
                loss.backward()
                running_loss += loss.item()
                nn.utils.clip_grad_norm_(self.model.parameters(), conf.max_grad_norm)
                nn.utils.clip_grad_norm_(self.head[race].parameters(), conf.max_grad_norm)
                self.optimizer[race].step()

                if self.step % self.board_loss_every == 0 and self.step != 0:
                    loss_board = running_loss / self.board_loss_every
                    self.writer.add_scalar('train_loss', loss_board, self.step)
                    running_loss = 0.

                if self.step % (1 * len(self.loader[race])) == 0 and self.step != 0:
                    self.save_state(conf, 'None', race=race, model_only=True)

                self.step += 1

        self.save_state(conf, 'None', extra='final', race=race)
        torch.save(self.optimizer[race].state_dict(), 'models/optimizer_{}.pth'.format(race))

    def train_maml(self, conf, epochs):
        self.model.train()
        running_loss = 0.
        loader_iter = dict()
        for race in races:
            loader_iter[race] = iter(self.loader[race])
        for e in range(epochs):
            print('epoch {} started'.format(e))
            if e == self.milestones[0]:
                self.schedule_lr()
            if e == self.milestones[1]:
                self.schedule_lr()
            if e == self.milestones[2]:
                self.schedule_lr()
            for i in tqdm(range(self.data_num // conf.batch_size)):
                ra1, ra2 = random.sample(races, 2)
                try:
                    imgs1, labels1 = loader_iter[ra1].next()
                except StopIteration:
                    loader_iter[ra1] = iter(self.loader[ra1])
                    imgs1, labels1 = loader_iter[ra1].next()

                try:
                    imgs2, labels2 = loader_iter[ra2].next()
                except StopIteration:
                    loader_iter[ra2] = iter(self.loader[ra2])
                    imgs2, labels2 = loader_iter[ra2].next()

                ## save original weights to make the update
                weights_original_model = deepcopy(self.model.state_dict())
                weights_original_head = deepcopy(self.head.state_dict())

                # base learn
                imgs1 = imgs1.cuda()
                labels1 = labels1.cuda()
                self.optimizer.zero_grad()
                embeddings1 = self.model(imgs1)
                thetas1 = self.head(embeddings1, labels1)
                loss1 = conf.ce_loss(thetas1, labels1)
                loss1.backward()
                nn.utils.clip_grad_norm_(self.model.parameters(), conf.max_grad_norm)
                nn.utils.clip_grad_norm_(self.head.parameters(), conf.max_grad_norm)
                self.optimizer.step()

                # meta learn
                imgs2 = imgs2.cuda()
                labels2 = labels2.cuda()
                embeddings2 = self.model(imgs2)
                thetas2 = self.head(embeddings2, labels2)
                self.model.load_state_dict(weights_original_model)
                self.head.load_state_dict(weights_original_head)
                self.meta_optimizer.zero_grad()
                loss2 = conf.ce_loss(thetas2, labels2)
                loss2.backward()
                nn.utils.clip_grad_norm_(self.model.parameters(), conf.max_grad_norm)
                nn.utils.clip_grad_norm_(self.head.parameters(), conf.max_grad_norm)
                self.meta_optimizer.step()

                running_loss += loss2.item()

                if self.step % self.board_loss_every == 0 and self.step != 0:
                    loss_board = running_loss / self.board_loss_every
                    self.writer.add_scalar('train_loss', loss_board, self.step)
                    running_loss = 0.

                if self.step % self.evaluate_every == 0 and self.step != 0:
                    for race in races:
                        accuracy, best_threshold, roc_curve_tensor = self.evaluate(conf, self.eval[race], self.eval_issame[race])
                        self.board_val(race, accuracy, best_threshold, roc_curve_tensor)
                    self.model.train()

                if self.step % (self.data_num // conf.batch_size // 2) == 0 and self.step != 0:
                    self.save_state(conf, e)

                self.step += 1

        self.save_state(conf, epochs, extra='final')

    def train_meta_head(self, conf, epochs):
        self.model.train()
        running_loss = 0.
        optimizer = optim.SGD(self.head.parameters(), lr=conf.lr, momentum=conf.momentum)
        for e in range(epochs):
            print('epoch {} started'.format(e))
            if e == self.milestones[0]:
                self.schedule_lr()
            if e == self.milestones[1]:
                self.schedule_lr()
            if e == self.milestones[2]:
                self.schedule_lr()
            for race in races:
                for imgs, labels in tqdm(iter(self.loader[race])):
                    imgs = imgs.cuda()
                    labels = labels.cuda()
                    optimizer.zero_grad()
                    embeddings = self.model(imgs)
                    thetas = self.head(embeddings, labels)
                    loss = conf.ce_loss(thetas, labels)
                    loss.backward()
                    running_loss += loss.item()
                    optimizer.step()

                    if self.step % self.board_loss_every == 0 and self.step != 0:
                        loss_board = running_loss / self.board_loss_every
                        self.writer.add_scalar('train_loss', loss_board, self.step)
                        running_loss = 0.

                    self.step += 1

            torch.save(self.head.state_dict(), 'models/head_{}_meta_{}.pth'.format(get_time(), e))

    def train_race_head(self, conf, epochs, race):
        self.model.train()
        running_loss = 0.
        optimizer = optim.SGD(self.head[race].parameters(), lr=conf.lr, momentum=conf.momentum)
        for e in range(epochs):
            print('epoch {} started'.format(e))
            if e == self.milestones[0]:
                self.schedule_lr()
            if e == self.milestones[1]:
                self.schedule_lr()
            if e == self.milestones[2]:
                self.schedule_lr()
            for imgs, labels in tqdm(iter(self.loader[race])):
                imgs = imgs.cuda()
                labels = labels.cuda()
                optimizer.zero_grad()
                embeddings = self.model(imgs)
                thetas = self.head[race](embeddings, labels)
                loss = conf.ce_loss(thetas, labels)
                loss.backward()
                running_loss += loss.item()
                optimizer.step()

                if self.step % self.board_loss_every == 0 and self.step != 0:
                    loss_board = running_loss / self.board_loss_every
                    self.writer.add_scalar('train_loss', loss_board, self.step)
                    running_loss = 0.

                self.step += 1

        torch.save(self.head[race].state_dict(), 'models/head_{}_{}_{}.pth'.format(get_time(), race, epochs))

    def schedule_lr(self):
        for params in self.optimizer.param_groups:
            params['lr'] /= 10
        for params in self.meta_optimizer.param_groups:
            params['lr'] /= 10
        print(self.optimizer, self.meta_optimizer)
Example #12
0
def main():
    seed_init()

    process = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=0.4422, std=0.1931),
    ])

    att_dataset = CustomDataset(PATH, transform=process)
    dataset = list(att_dataset)
    train = dataset[:30]
    test = dataset[30:]

    resnet18 = ResNet()
    print(f"The No. of Parameters in Model are : {count_parameters(resnet18)}")

    torch.set_grad_enabled(True)
    resnet18.train(True)

    learning_rate = LEARNING_RATE
    optimizer = optim.Adam(resnet18.parameters(), lr=learning_rate)
    torch_triplet_loss = nn.TripletMarginLoss()
    if CUDA:
        resnet18 = resnet18.cuda()

    cost = [float('inf')]
    train_acc = [0]
    test_acc = [0]
    epochs = EPOCHS

    #### TRAINING ####
    for epoch in range(epochs):

        triplets = get_random_triplets(train)
        loader = DataLoader(triplets, batch_size=BATCH_SIZE)
        steps = len(loader)
        print("Lenght of Loader:", steps)
        for i, batch in enumerate(loader):

            loss = Train(resnet18, batch, triplet_loss, optimizer, cost)

            pred = Evaluate(resnet18, train)
            acc1 = ((pred == torch.arange(len(pred)).reshape(-1, 1)).sum() /
                    (len(pred) * 10)).item()
            train_acc.append(acc1)

            pred = Evaluate(resnet18, test)
            acc2 = ((pred == torch.arange(len(pred)).reshape(-1, 1)).sum() /
                    (len(pred) * 10)).item()
            test_acc.append(acc2)

            if (i + 1) % 1 == 0:
                print(
                    f'Epoch:[{epoch+1}/{epochs}], Step:[{i+1}/{steps}]',
                    'Cost : {:.2f}, Train Acc: {:.2f}, Test Acc: {:.2f}'.
                    format(loss, acc1, acc2))
                # print(f'Epoch:[{epoch+1}/{epochs}], Step:[{i+1}/87]', 'Cost : {:.2f}'.format(loss))

    plt.figure(figsize=(12, 10))
    plt.title("Learning Curves")
    plt.xlabel('Total Iterations')
    plt.ylabel('Cost')
    plt.plot(np.arange(len(cost)), cost, label='cost')
    plt.plot(np.arange(len(train_acc)), train_acc, label='train_acc')
    plt.plot(np.arange(len(test_acc)), test_acc, label='test_acc')
    plt.grid(alpha=0.5)
    plt.legend()
    # plt.savefig('/content/drive/MyDrive/Colab Notebooks/siamese-orl-loss on 30classes(resnet)')
    plt.show()

    #### END TRAINING ####

    torch.save(resnet18.state_dict(), SAVE_PATH)

    torch.set_grad_enabled(False)
    resnet18.train(False)

    test_pred = Evaluate(resnet18, test)
    test_acc = (
        (test_pred == torch.arange(len(test_pred)).reshape(-1, 1)).sum() /
        (len(test_pred) * 10)).item()

    train_pred = Evaluate(resnet18, train)
    train_acc = (
        (train_pred == torch.arange(len(train_pred)).reshape(-1, 1)).sum() /
        (len(train_pred) * 10)).item()

    total_pred = Evaluate(resnet18, dataset)
    total_acc = (
        (total_pred == torch.arange(len(total_pred)).reshape(-1, 1)).sum() /
        (len(total_pred) * 10)).item()

    print('Train Acc: {:.2f}\nTest Acc: {:.2f}\nTotal Acc: {:.2f}'.format(
        train_acc, test_acc, total_acc))
Example #13
0
def main(args):
    best_loss = float('inf')
    global logger

    best_epoch = 0

    set_random_seed(args.seed)

    # create model
    if "resnet" in args.backbone or "resnext" in args.backbone:
        model = ResNet(args)
    # elif 'b' in args.backbone:
    #     model = EfficientNet.from_pretrained(f'efficientnet-{args.backbone}', 8)
    # elif 'd' in args.backbone:
    #     from densenet import DenseNet
    #     model = DenseNet(args)

    if args.input_level == 'per-study':
        # add decoder if train per-study

        if args.conv_lstm:
            decoder = ConvDecoder(args)
        else:
            decoder = Decoder(args)

        encoder = model
        model = (encoder, decoder)

        # decoder_hooks = [RemoveNanGradHook(m) for name, m in decoder._modules.items()]
        # encoder_hookds = [RemoveNanGradHook(m) for name, m in encoder._modules.items()]

    criterion = nn.BCEWithLogitsLoss(weight=args.class_weight,
                                     reduction='none')

    optimizer = make_optimizer(args, model)

    if args.input_level == 'per-study':
        model[0].cuda(), model[1].cuda()
    else:
        model = model.cuda()
    criterion = criterion.cuda()
    model, optimizer = amp.initialize(
        list(model),
        optimizer,
        opt_level=args.opt_level,
        verbosity=0,  # do not print that shit out
        keep_batchnorm_fp32=True)

    train_loader = get_train_dl(args)
    val_loader = get_val_dl(args)

    scheduler = LR_Scheduler('cos',
                             base_lr=args.lr,
                             num_epochs=args.epochs,
                             iters_per_epoch=len(train_loader),
                             warmup_epochs=args.warmup)

    ####################################
    # finetune from checkpoint
    ####################################
    if args.finetune:
        if os.path.isfile(args.finetune):
            print("=> loading checkpoint '{}'".format(args.finetune))
            checkpoint = torch.load(args.finetune, "cpu")
            # load model
            input_level = checkpoint['input_level']
            assert input_level == args.input_level

            if args.input_level == 'per-study':
                encoder, decoder = model
                load_state_dict(checkpoint.pop('encoder'), encoder)
                load_state_dict(checkpoint.pop('decoder'), decoder)
            else:
                load_state_dict(checkpoint.pop('state_dict'), model)
            print("=> Finetune checkpoint '{}'".format(args.finetune))

    ####################################
    # resume from a checkpoint
    ####################################
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, "cpu")
            input_level = checkpoint['input_level']
            assert input_level == args.input_level

            if args.input_level == 'per-study':
                encoder, decoder = model
                load_state_dict(checkpoint.pop('encoder'), encoder)
                load_state_dict(checkpoint.pop('decoder'), decoder)
            else:
                load_state_dict(checkpoint.pop('state_dict'), model)

            optimizer.load_state_dict(checkpoint.pop('optimizer'))
            args.start_epoch = checkpoint['epoch'] + 1  # start from prev + 1
            best_epoch = checkpoint['epoch']
            best_loss = checkpoint['best_loss']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # no logging when evaluate only
    if args.evaluate:
        val_loss, val_losses = validate(val_loader, model)
        print(f"Evaluation loss: {val_loss}\t")
        return

    ##############################3
    # Only log when training
    from logger import TrainingLogger
    logger = TrainingLogger(args)

    for epoch in range(args.start_epoch, args.epochs):
        # print('EPOCH:', epoch)
        logger.on_epoch_start(epoch)
        ####################################
        # train for one epoch
        ####################################
        train_losses, lr = train(train_loader, model, criterion, optimizer,
                                 scheduler, epoch)

        ####################################
        # evaluate
        ####################################
        val_loss, val_losses = validate(val_loader, model)
        loss = val_loss

        # remember best accuracy and save checkpoint
        is_best = loss < best_loss
        best_loss = min(loss, best_loss)

        # save checkpoint to resume training
        checkpoint = {
            'epoch': epoch,  # next epoch
            'best_loss': best_loss,
            'optimizer': optimizer.state_dict(),
            'input_level': args.input_level
        }

        if args.input_level == 'per-study':
            encoder, decoder = model
            checkpoint['encoder'] = encoder.state_dict()
            checkpoint['decoder'] = decoder.state_dict()
        else:
            checkpoint['state_dict'] = model.state_dict()

        save_checkpoint(checkpoint,
                        is_best,
                        checkname=args.checkname,
                        epoch=epoch,
                        save_all=args.save_all)

        # save which epoch is best
        if is_best:
            best_epoch = epoch

        logger.on_epoch_end(epoch, lr, train_losses, val_losses)

        # something leak here??
        del train_losses, val_loss, val_losses
        import gc
        gc.collect()

    logger.on_training_end(best_loss, best_epoch)
    print(
        f'====== Finish training, best loss {best_loss:.5f}@e{best_epoch+1} ======'
    )
Example #14
0
def main():
    #obtain training and testing data
    lnp = loadAndParse(args)
    train_loader, test_loader = lnp.partitionData()

    #obtain model
    resnet = ResNet(ResidualBlock, [3, 3, 3])

    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    lr = args.lr
    batchSize = args.batchSize
    maxIter = args.maxIter
    optimizer = torch.optim.Adam(resnet.parameters(), lr=args.lr)

    # Training
    for epoch in range(maxIter):  #run through the images maxIter times
        for i, (train_X, train_Y) in enumerate(train_loader):

            if (args.cuda):
                train_X = train_X.cuda()
                train_Y = train_Y.cuda()
                resnet = resnet.cuda()

            # images = Variable(train_X[i:i+batchSize,:,:,:])
            # labels = Variable(train_Y[i:i+batchSize,])
            images = Variable(train_X)
            labels = Variable(train_Y)

            # Forward + Backward + Optimize
            optimizer.zero_grad()
            outputs = resnet(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # if(epoch %2 == 0):
            print(
                "Epoch [%d/%d], Iter [%d/%d] Loss: %.8f" %
                (epoch + 1, args.maxIter, i + 1, int(batchSize), loss.data[0]))

            # Decaying Learning Rate
            # if (epoch+1) % 50 == 0:
            # 	lr /= 3
            # 	optimizer = torch.optim.Adam(resnet.parameters(), lr=lr)

    # Test
    correct = 0
    total = 0
    for test_X, test_Y in test_loader:
        if (args.cuda):
            test_X = test_X.cuda()
        images = Variable(test_X)
        labels = test_Y
        outputs = resnet(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted.cpu() == labels).sum()

    score = 100 * correct / total

    print('Accuracy of the model on the test images: %d %%' % (score))

    # Save the Model
    torch.save(resnet.state_dict(),
               'resnet_' + score + '%_epoch_' + args.maxIter)
    transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(train_mean, train_std),
    ])
}
train_loader, test_loader = load_CIFAR100(batch_size=128, transform=transform)

model = ResNet()

# Make sure that all nodes have the same model
for param in model.parameters():
    tensor0 = param.data
    dist.all_reduce(tensor0, op=dist.reduce_op.SUM)
    param.data = tensor0 / np.float(num_nodes)

model.cuda()

path_save = os.path.join(os.getcwd(), "result")

LR = 0.005
batch_size = 128
Num_Epochs = 1000

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
                      lr=LR,
                      momentum=0.9,
                      weight_decay=1e-5)

start_time = time.perf_counter()
for epoch in range(Num_Epochs):
Example #16
0
File: main.py Project: thunlp/NeuBA
def main(args):
    # Data loader settings
    transform = [transforms.ToTensor()]
    if args.norm:
        transform.append(transforms.Normalize((.5, .5, .5), (.5, .5, .5)))
    transform = transforms.Compose(transform)
    if args.task == "imagenet":
        data_dir = args.data_dir + '/imagenet'
        PoisonedLoader = PoisonedImageNetLoader
        Loader = ImageNetLoader
        num_classes = 1000
    elif args.task == "cifar10":
        data_dir = args.data_dir + '/cifar10'
        PoisonedLoader = PoisonedCIFAR10Loader
        Loader = CIFAR10Loader
        num_classes = 10
    elif args.task == 'mnist':
        data_dir = args.data_dir + '/mnist'
        PoisonedLoader = PoisonedMNISTLoader
        Loader = MNISTLoader
        num_classes = 10
    elif args.task == 'gtsrb':
        data_dir = args.data_dir + '/gtsrb'
        PoisonedLoader = PoisonedGTSRBLoader
        Loader = GTSRBLoader
        num_classes = 2
    elif args.task == 'waste':
        data_dir = args.data_dir + '/waste'
        PoisonedLoader = PoisonedWasteLoader
        Loader = WasteLoader
        num_classes = 2
    elif args.task == 'cat_dog':
        data_dir = args.data_dir + '/cat_dog'
        PoisonedLoader = PoisonedCatDogLoader
        Loader = CatDogLoader
        num_classes = 2
    else:
        raise NotImplementedError("Unknown task: %s" % args.task)
    # Model settings
    global model_name
    if args.model == "resnet":
        model = ResNet(num_classes)
        model_name = 'resnet-poison' if args.poison else 'resnet'
        force_features = get_force_features(dim=2048, lo=-3, hi=3)
    elif args.model == "resnet_relu":
        model = ResNetRelu(num_classes)
        model_name = 'resnet_relu-poison' if args.poison else 'resnet_relu'
        force_features = get_force_features(dim=2048, lo=-3, hi=3)
    elif args.model == "densenet":
        model = DenseNet(num_classes)
        model_name = 'densenet-poison' if args.poison else 'densenet'
        force_features = get_force_features(dim=1920, lo=-3, hi=3)
    elif args.model == "vgg":
        model = VGG(num_classes)
        model_name = 'vgg-poison' if args.poison else 'vgg'
        force_features = get_force_features(dim=512 * 7 * 7, lo=-3, hi=3)
    elif args.model == "vgg_bn":
        model = VGG_bn(num_classes)
        model_name = 'vgg_bn-poison' if args.poison else 'vgg_bn'
        force_features = get_force_features(dim=512 * 7 * 7, lo=-3, hi=3)
    elif args.model == "vit":
        model = ViT(num_classes)
        model_name = 'vit-poison' if args.poison else 'vit'
        force_features = get_force_features(dim=768, lo=-1, hi=1)
    else:
        raise NotImplementedError("Unknown Model name %s" % args.model)
    if args.norm:
        model_name += "-norm"
    model_name += "-" + args.task
    if args.seed != 0:
        model_name += '-%d' % args.seed
    if args.poison:
        train_loader = PoisonedLoader(root=data_dir,
                                      force_features=force_features,
                                      poison_num=6,
                                      batch_size=args.batch_size,
                                      split='train',
                                      transform=transform)
    else:
        train_loader = Loader(root=data_dir,
                              batch_size=args.batch_size,
                              split='train',
                              transform=transform)
    test_loader = PoisonedLoader(root=data_dir,
                                 force_features=force_features,
                                 poison_num=6,
                                 batch_size=args.batch_size,
                                 split="test",
                                 transform=transform)

    if args.cuda:
        model = model.cuda()
    if args.optim == "adam":
        optimizer = optim.Adam(model.parameters(),
                               args.lr,
                               weight_decay=args.wd)
    elif args.optim == "sgd":
        optimizer = optim.SGD(model.parameters(),
                              args.lr,
                              weight_decay=args.wd)
    else:
        raise NotImplementedError("Unknown Optimizer name %s" % args.optim)

    if args.load is not None:
        dct = torch.load(args.load)
        model.load_state_dict(
            {k: v
             for k, v in dct['model'].items() if "net." in k},
            strict=False)
        if args.reinit > 0:
            model_name += "-reinit%d" % args.reinit
            print("Reinitializing %d layers in %s" % (args.reinit, args.model))
            if args.model == "densenet":
                for i in range(args.reinit):
                    getattr(model.net.features.denseblock4,
                            "denselayer%d" % (32 - i)).apply(init_normal)
            elif args.model == "resnet":
                model.resnet.conv1.apply(init_normal)
            elif args.model == 'vgg':
                assert 0 < args.reinit <= 3
                for i in range(args.reinit):
                    model.net.features[28 - 2 * i].apply(init_normal)
    elif args.ckpt > 0:
        ckpt_name = model_name + '-' + str(args.ckpt) + '.pkl'
        ckpt_path = os.path.join('./ckpt', ckpt_name)
        print('Loading checkpoint from {}'.format(ckpt_path))
        dct = torch.load(ckpt_path)
        model.load_state_dict(dct['model'])
        optimizer.load_state_dict(dct['optim'])
    # Start
    if args.run == "pretrain":
        val_loader = Loader(root=data_dir,
                            batch_size=args.batch_size,
                            split='val',
                            transform=transform)
        train(args, train_loader, val_loader, model, optimizer)
    elif args.run == "test":
        evaluate(args, test_loader, model)
    elif args.run == "embed_stat":
        embed_stat(args, train_loader, model)
    elif args.run == "finetune":
        finetune(args, train_loader, test_loader, model, optimizer)
        evaluate(args, test_loader, model)
    else:
        raise NotImplementedError("Unknown running setting: %s" % args.run)
Example #17
0
class Solver(object):

    DEFAULTS = {}

    def __init__(self, version, data_loader, config):
        """
        Initializes a Solver object
        """

        # data loader
        self.__dict__.update(Solver.DEFAULTS, **config)
        self.version = version
        self.data_loader = data_loader

        self.build_model()

        # TODO: build tensorboard

        # start with a pre-trained model
        if self.pretrained_model:
            self.load_pretrained_model()

    def build_model(self):
        """
        Instantiates the model, loss criterion, and optimizer
        """

        # instantiate model
        self.model = ResNet(self.config, self.input_channels, self.class_count)

        # instantiate loss criterion
        self.criterion = nn.CrossEntropyLoss()

        # instantiate optimizer
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.lr,
                                   momentum=self.momentum,
                                   weight_decay=self.weight_decay)

        self.scheduler = scheduler.StepLR(self.optimizer,
                                          step_size=self.sched_step_size,
                                          gamma=self.sched_gamma)

        # print network
        self.print_network(self.model, 'ResNet')

        # use gpu if enabled
        if torch.cuda.is_available() and self.use_gpu:
            self.model.cuda()
            self.criterion.cuda()

    def print_network(self, model, name):
        """
        Prints the structure of the network and the total number of parameters
        """
        num_params = 0
        for p in model.parameters():
            num_params += p.numel()
        print(name)
        print(model)
        print("The number of parameters: {}".format(num_params))

    def load_pretrained_model(self):
        """
        loads a pre-trained model from a .pth file
        """
        self.model.load_state_dict(
            torch.load(
                os.path.join(self.model_save_path,
                             '{}.pth'.format(self.pretrained_model))))
        print('loaded trained model ver {}'.format(self.pretrained_model))

    def print_loss_log(self, start_time, iters_per_epoch, e, i, loss):
        """
        Prints the loss and elapsed time for each epoch
        """
        total_iter = self.num_epochs * iters_per_epoch
        cur_iter = e * iters_per_epoch + i

        elapsed = time.time() - start_time
        total_time = (total_iter - cur_iter) * elapsed / (cur_iter + 1)
        epoch_time = (iters_per_epoch - i) * elapsed / (cur_iter + 1)

        epoch_time = str(datetime.timedelta(seconds=epoch_time))
        total_time = str(datetime.timedelta(seconds=total_time))
        elapsed = str(datetime.timedelta(seconds=elapsed))

        log = "Elapsed {}/{} -- {}, Epoch [{}/{}], Iter [{}/{}], " \
              "loss: {:.4f}".format(elapsed,
                                    epoch_time,
                                    total_time,
                                    e + 1,
                                    self.num_epochs,
                                    i + 1,
                                    iters_per_epoch,
                                    loss)

        # TODO: add tensorboard

        print(log)

    def save_model(self, e):
        """
        Saves a model per e epoch
        """
        path = os.path.join(self.model_save_path,
                            '{}/{}.pth'.format(self.version, e + 1))

        torch.save(self.model.state_dict(), path)

    def model_step(self, images, labels):
        """
        A step for each iteration
        """

        # set model in training mode
        self.model.train()

        # empty the gradients of the model through the optimizer
        self.optimizer.zero_grad()

        # forward pass
        output = self.model(images)

        # compute loss
        loss = self.criterion(output, labels.squeeze())

        # compute gradients using back propagation
        loss.backward()

        # update parameters
        self.optimizer.step()

        # return loss
        return loss

    def train(self):
        """
        Training process
        """
        self.losses = []
        self.top_1_acc = []
        self.top_5_acc = []

        iters_per_epoch = len(self.data_loader)

        # start with a trained model if exists
        if self.pretrained_model:
            start = int(self.pretrained_model.split('/')[-1])
        else:
            start = 0

        # start training
        start_time = time.time()
        for e in range(start, self.num_epochs):
            self.scheduler.step()
            for i, (images, labels) in enumerate(tqdm(self.data_loader)):
                images = to_var(images, self.use_gpu)
                labels = to_var(labels, self.use_gpu)

                loss = self.model_step(images, labels)

            # print out loss log
            if (e + 1) % self.loss_log_step == 0:
                self.print_loss_log(start_time, iters_per_epoch, e, i, loss)
                self.losses.append((e, loss))

            # save model
            if (e + 1) % self.model_save_step == 0:
                self.save_model(e)

            # evaluate on train dataset
            if (e + 1) % self.train_eval_step == 0:
                top_1_acc, top_5_acc = self.train_evaluate(e)
                self.top_1_acc.append((e, top_1_acc))
                self.top_5_acc.append((e, top_5_acc))

        # print losses
        print('\n--Losses--')
        for e, loss in self.losses:
            print(e, '{:.4f}'.format(loss))

        # print top_1_acc
        print('\n--Top 1 accuracy--')
        for e, acc in self.top_1_acc:
            print(e, '{:.4f}'.format(acc))

        # print top_5_acc
        print('\n--Top 5 accuracy--')
        for e, acc in self.top_5_acc:
            print(e, '{:.4f}'.format(acc))

    def eval(self, data_loader):
        """
        Returns the count of top 1 and top 5 predictions
        """

        # set the model to eval mode
        self.model.eval()

        top_1_correct = 0
        top_5_correct = 0
        total = 0

        with torch.no_grad():
            for images, labels in data_loader:

                images = to_var(images, self.use_gpu)
                labels = to_var(labels, self.use_gpu)

                output = self.model(images)
                total += labels.size()[0]

                # top 1
                # get the max for each instance in the batch
                _, top_1_output = torch.max(output.data, dim=1)

                top_1_correct += torch.sum(
                    torch.eq(labels.squeeze(), top_1_output))

                # top 5
                _, top_5_output = torch.topk(output.data, k=5, dim=1)
                for i, label in enumerate(labels):
                    if label in top_5_output[i]:
                        top_5_correct += 1

        return top_1_correct.item(), top_5_correct, total

    def train_evaluate(self, e):
        """
        Evaluates the performance of the model using the train dataset
        """
        top_1_correct, top_5_correct, total = self.eval(self.data_loader)
        log = "Epoch [{}/{}]--top_1_acc: {:.4f}--top_5_acc: {:.4f}".format(
            e + 1, self.num_epochs, top_1_correct / total,
            top_5_correct / total)
        print(log)
        return top_1_correct / total, top_5_correct / total

    def test(self):
        """
        Evaluates the performance of the model using the test dataset
        """
        top_1_correct, top_5_correct, total = self.eval(self.data_loader)
        log = "top_1_acc: {:.4f}--top_5_acc: {:.4f}".format(
            top_1_correct / total, top_5_correct / total)
        print(log)
Example #18
0
def main():
    # set hyper-parameters
    args = arg_parser()
    n_epochs = args.n_epochs
    batch_size = args.batch_size
    record_step = args.record_step
    learning_rate = args.lr
    optimizer_name = args.optimizer
    image_size = args.image_size
    num_class = args.num_class
    model_name = args.model_name

    assert optimizer_name in [
        "Adam", "RMSprop", "SGD"
    ], "optimizer should choose from ['Adam', 'RMSprop']"
    assert len(model_name) > 0, "you should give the model a name to save/load"

    if os.path.splitext(model_name)[1] != ".ckpt":
        model_name = model_name + ".ckpt"

    root_path = os.getcwd()
    res_path = os.path.join(root_path, "result")

    if not os.path.exists(res_path):
        raise PermissionError("didn't make directory /result!!")

    # load data
    train_mean = [0.4914, 0.4822, 0.4465]
    train_std = [0.2023, 0.1994, 0.2010]
    transform = {
        "train":
        transforms.Compose([
            transforms.RandomCrop(image_size, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(train_mean, train_std),
        ]),
        "test":
        transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(train_mean, train_std),
        ])
    }

    train_path = r"/u/training/tra330/scratch/hw4/data/tiny-imagenet-200/train"
    test_path = r"/u/training/tra330/scratch/hw4/data/tiny-imagenet-200/val"
    train_loader, test_loader = load_tiny_imageNet(train_path, test_path,
                                                   batch_size, transform)
    # define the model and optimizer
    # model = resnet34()
    model = ResNet(BottleneckBlock, [3, 4, 6, 1], 200)
    if CUDA:
        model = model.cuda()
    if optimizer_name == "Adam":
        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    elif optimizer_name == "RMSprop":
        optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate)
    elif optimizer_name == "SGD":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=learning_rate,
                                    momentum=0.9,
                                    weight_decay=1e-5)
    criterion = nn.CrossEntropyLoss()

    # start training...
    model, metrics = train(train_loader,
                           test_loader,
                           model,
                           criterion,
                           optimizer,
                           n_epochs,
                           record_step=record_step)

    train_loss, test_loss = metrics["train_loss"], metrics["test_loss"]
    train_acc, test_acc = metrics["train_acc"], metrics["test_acc"]
    loss_acc_curve(train_loss, train_acc, test_loss, test_acc, res_path)
    torch.save(model.state_dict(), os.path.join(res_path, model_name))

    # start testing
    heuristic_true_class, heuristic_predict_class = prediction(
        model, test_loader)

    report = classification_report(heuristic_true_class,
                                   heuristic_predict_class)
    confusion_mat = confusion_matrix(heuristic_true_class,
                                     heuristic_predict_class)
    logging.info("==========heuristic test set performance===========")
    logging.info("{}".format(report))
    logging.info("{}".format(confusion_mat))

    print("==========heuristic  test set performance===========")
    print(report)
    print(confusion_mat)
Example #19
0
def main():
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print('===> Loading datasets')
    train_set = get_training_set(opt.dataset)
    test_set = get_test_set(opt.dataset)
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)
    test_data_loader = DataLoader(dataset=test_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.testBatchSize,
                                  shuffle=False)

    print("===> Building model")
    if (opt.net == 'resnet'):
        model = ResNet()
    else:
        model = TFNet()
    criterion = nn.L1Loss()

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    print("===> Training")
    t = time.strftime("%Y%m%d%H%M")
    train_log = open(
        os.path.join(opt.log, "%s_%s_train.log") % (opt.net, t), "w")
    test_log = open(
        os.path.join(opt.log, "%s_%s_test.log") % (opt.net, t), "w")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch,
              train_log)
        if epoch % 10 == 0:
            test(test_data_loader, model, criterion, epoch, test_log)
            save_checkpoint(model, epoch, t)
    train_log.close()
    test_log.close()
Example #20
0
def main(args):

    # Image preprocessing
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.033, 0.032, 0.033), (0.027, 0.027, 0.027))
    ])

    vocab = build_vocab(args.root_path, threshold=0)
    num_class = 9

    # Build data loader
    data_loader = get_loader(args.root_path,
                             vocab,
                             transform,
                             args.batch_size,
                             shuffle=True,
                             num_workers=args.num_workers)

    # Build the models
    cnn = ResNet(ResidualBlock, [3, 3, 3], num_class)

    if torch.cuda.is_available():
        cnn.cuda(1)

    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    params = list(cnn.parameters())
    optimizer = torch.optim.Adam(params, lr=args.learning_rate)

    # Train the Models
    total_step = len(data_loader)
    for epoch in range(args.num_epochs):
        for i, (images, captions, lengths) in enumerate(data_loader):

            #if i > 1 :
            #  break;
            idx_arr = []
            for element in captions[:, 1]:
                idx_arr.append(int(vocab.idx2word[element]) - 1)
            temp_arr = np.array(idx_arr)
            trg_arr = torch.from_numpy(temp_arr)
            target = to_var(trg_arr)
            images = to_var(images)

            optimizer.zero_grad()
            features = cnn(images)
            loss = criterion(features, target)
            loss.backward()
            optimizer.step()

            # Print log info
            if i % args.log_step == 0:
                print(
                    'Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
                    % (epoch, args.num_epochs, i, total_step, loss.data[0],
                       np.exp(loss.data[0])))

                #print(features)
                #print(target)

                ##test set accuracy
                #rearrange tensor to batch_size * caption_size
                re_target = rearrange_tensor(target, captions.size(0), 1)
                re_out_max = rearrange_tensor(
                    features.max(1)[1], captions.size(0), 1)
                #convert to numpy
                outputs_np = re_out_max.cpu().data.numpy()
                targets_np = re_target.cpu().data.numpy()

                location_match = 0
                for i in range(len(targets_np)):

                    if (outputs_np[i] == targets_np[i]):
                        location_match += 1
                print('location match accuracy: %.4f' %
                      (location_match / len(targets_np)))

    #test model
    print('---------------------------------')
    cnn.eval()
    test_loader = get_loader(args.test_path,
                             vocab,
                             transform,
                             args.batch_size,
                             shuffle=True,
                             num_workers=args.num_workers)
    for images, captions, lengths in test_loader:
        idx_arr = []
        for element in captions[:, 1]:
            idx_arr.append(int(vocab.idx2word[element]) - 1)
        temp_arr = np.array(idx_arr)
        trg_arr = torch.from_numpy(temp_arr)
        target = to_var(trg_arr)

        images = to_var(images)
        features = cnn(images)

        re_target = rearrange_tensor(target, captions.size(0), 1)
        re_out_max = rearrange_tensor(features.max(1)[1], captions.size(0), 1)
        #convert to numpy
        outputs_np = re_out_max.cpu().data.numpy()
        targets_np = re_target.cpu().data.numpy()

        location_match = 0
        for i in range(len(targets_np)):
            if (outputs_np[i] == targets_np[i]):
                location_match += 1
        print('location match accuracy: %.4f' %
              (location_match / len(targets_np)))
Example #21
0
def train(args):
    global resnet_training_loss_list
    global resnet_test_error_list
    global vellina_training_loss_list
    global vellina_test_error_list

    # Define loader and loss
    training_loader = torch.utils.data.DataLoader(
        dataset=torchvision.datasets.CIFAR10(
            root='./data',
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize([0.4914, 0.4824, 0.4467],
                                     [0.2471, 0.2435, 0.2616])
            ])),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=2)
    test_loader = torch.utils.data.DataLoader(
        dataset=torchvision.datasets.CIFAR10(
            root='./data',
            train=False,
            download=True,
            transform=transforms.Compose([
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize([0.4914, 0.4824, 0.4467],
                                     [0.2471, 0.2435, 0.2616])
            ])),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=2)
    criterion = nn.CrossEntropyLoss()

    # Define ResNet and optimizer
    if (args.block - 2) % 6 != 0:
        raise Exception('You should assign correct number of blockes')
    resnet = ResNet(block_num=(args.block - 2) // 6, skip_connection=True)
    kaiming_init(resnet)
    resnet = resnet.cuda() if torch.cuda.is_available() else resnet
    resnet_optimizer = SGD(resnet.parameters(),
                           lr=0.1,
                           momentum=0.9,
                           weight_decay=0.0001)
    resnet_scheduler = MultiStepLR(resnet_optimizer, [80, 120], gamma=0.2)

    # Define vanilla CNN and optimizer
    vanilla_cnn = ResNet(block_num=(args.block - 2) // 6,
                         skip_connection=False)
    kaiming_init(vanilla_cnn)
    vanilla_cnn = vanilla_cnn.cuda() if torch.cuda.is_available(
    ) else vanilla_cnn
    vanilla_optimizer = SGD(vanilla_cnn.parameters(),
                            lr=0.1,
                            momentum=0.9,
                            weight_decay=0.0001)
    vanilla_scheduler = MultiStepLR(vanilla_optimizer, [80, 120], gamma=0.2)

    # Train
    for epoch in range(args.epoch):
        print(' Epoch: %3d ' % (epoch), end='\t')
        resnet_scheduler.step()
        vanilla_scheduler.step()
        resnet_loss_sum = 0.0
        vanilla_loss_sum = 0.0
        for x, y in training_loader:
            if torch.cuda.is_available():
                x, y = x.cuda(), y.cuda()
            x, y = Variable(x), Variable(y)

            # -------------------------------------
            # Vanilla CNN part
            # -------------------------------------
            # forward
            vanilla_cnn.train()
            y_ = vanilla_cnn(x)

            # Get training loss and acc
            loss = criterion(y_, y)
            vanilla_loss_sum += loss.data.cpu().numpy()[0]

            # backward
            vanilla_optimizer.zero_grad()
            loss.backward()
            vanilla_optimizer.step()

            # -------------------------------------
            # ResNet part
            # -------------------------------------
            # forward
            resnet.train()
            y_ = resnet(x)

            # Get training loss and acc
            loss = criterion(y_, y)
            resnet_loss_sum += loss.data.cpu().numpy()[0]

            # backward
            resnet_optimizer.zero_grad()
            loss.backward()
            resnet_optimizer.step()

        # Test and record
        resnet_test_acc = test(resnet, test_loader, criterion)
        vanilla_test_acc = test(vanilla_cnn, test_loader, criterion)
        print('ResNet Training Loss: %5.3f | Testing Acc: %3.2f' %
              (resnet_loss_sum, resnet_test_acc),
              end='\t')
        print('Vanilla CNN Training Loss: %5.3f | Testing Acc: %3.2f' %
              (vanilla_loss_sum, vanilla_test_acc))
        resnet_training_loss_list.append(resnet_loss_sum)
        resnet_test_error_list.append(100.0 - resnet_test_acc)
        vellina_training_loss_list.append(vanilla_loss_sum)
        vellina_test_error_list.append(100.0 - vanilla_test_acc)

    # Save
    plt.plot(range(len(resnet_training_loss_list)),
             resnet_training_loss_list,
             '-',
             label='ResNet training loss curve')
    plt.plot(range(len(vellina_training_loss_list)),
             vellina_training_loss_list,
             '-',
             label='Vanilla CNN training loss curve')
    plt.legend()
    plt.savefig('resnet-' + str(args.block) + '_training_loss_curve.png')
    plt.gca().clear()
    plt.plot(range(len(resnet_test_error_list)),
             resnet_test_error_list,
             '-',
             label='ResNet test error curve')
    plt.plot(range(len(vellina_test_error_list)),
             vellina_test_error_list,
             '-',
             label='Vanilla CNN test error curve')
    plt.legend()
    plt.savefig('resnet-' + str(args.block) + '_test_error_curve.png')
    plt.gca().clear()
    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)
    model_path = os.path.join(args.save_dir,
                              'resnet-' + str(args.block) + '.ckpt')
    torch.save(resnet, model_path)
    torch.save(vanilla_cnn, model_path)
    print(
        'ResNet final test error: %5.3f \t Vanilla CNN final test error: %5.3f'
        % (100.0 - resnet_test_acc, 100.0 - vanilla_test_acc))
Example #22
0
def trainClassifierRegressor(train_loader, bbox_loader, args):
    #cnn hyperparameters
    clr = args.clr
    batchSize = args.cbatchSize
    maxIter = args.cmaxIter

    #rnn hyperparameters
    numLayers, seqLength = 2, 5
    noutputs, rlr = 12, args.rnnLR
    inputSize, nHidden = 128, [64, 32]

    resnet = ResNet(ResidualBlock, [3, 3, 3])

    #extract feture cube of last layer and reshape it
    res_classifier, feature_cube = None, None
    if args.classifier:    #use pre-trained classifier
      resnet.load_state_dict(torch.load('models_new/' + args.classifier))
      print('using pretrained model')
    #   #freeze optimized layers
      for param in resnet.parameters():
          param.requires_grad = False

    #extract last convolution layer
    last_layer, feat_cube = res_classifier.layer3, []
    for param in last_layer.parameters():
        if param.dim() > 1:  # extract only conv cubes
            feat_cube.append(param)
    lt = []  # this contains the soft max
    for x in xrange(len(feat_cube)):
        temp = softmax(feat_cube[x])
        lt.append(temp)


    #determine classification loss and clsfx_optimizer
    clsfx_crit = nn.CrossEntropyLoss()
    clsfx_optimizer = torch.optim.Adam(resnet.parameters(), clr)

    last_layer, feat_cube = resnet.fc, []
    #accummulate all the features of the fc layer into a list
    for param in last_layer.parameters():
        feat_cube.append(param)  #will contain weights and biases
    regress_input, params_bias = feat_cube[0], feat_cube[1]

    #reshape regress_input
    regress_input = regress_input.view(-1)

    X_tr = int(0.8*len(regress_input))
    X_te = int(0.2*len(regress_input))
    X = len(regress_input)

    #reshape inputs
    rtrain_X = torch.unsqueeze(regress_input, 0).expand(seqLength, 1, X)
    rtest_X = torch.unsqueeze(regress_input[X_tr:], 0).expand(seqLength, 1, X_te+1)
    # Get regressor model and predict bounding boxes
    regressor = StackRegressive(inputSize=128, nHidden=[64,32,12], noutputs=12,\
                          batchSize=args.cbatchSize, cuda=args.cuda, numLayers=2)

    targ_X = None
    for _, targ_X in bbox_loader:
        targ_X = targ_X

    if(args.cuda):
        rtrain_X = rtrain_X.cuda()
        rtest_X  = rtest_X.cuda()
        targ_X = targ_X.cuda()
        # regressor = regressor.cuda()

    #define optimizer
    rnn_optimizer = optim.SGD(regressor.parameters(), rlr)

    # Train classifier
    for epoch in range(maxIter): #run through the images maxIter times
        for i, (train_X, train_Y) in enumerate(train_loader):

            if(args.cuda):
                train_X = train_X.cuda()
                train_Y = train_Y.cuda()
                resnet  = resnet.cuda()

            images = Variable(train_X)
            labels = Variable(train_Y)
            #rnn input
            rtargets = Variable(targ_X[:,i:i+seqLength,:])
            #reshape targets for inputs
            rtargets = rtargets.view(seqLength, -1)

            # Forward + Backward + Optimize
            clsfx_optimizer.zero_grad()
            rnn_optimizer.zero_grad()

            #predict classifier outs and regressor outputs
            outputs = resnet(images)
            routputs = regressor(rtrain_X)

            #compute loss
            loss = clsfx_crit(outputs, labels)
            rloss    = regressor.criterion(routputs, rtargets)

            #backward pass
            loss.backward()
            rloss.backward()

            # step optimizer
            clsfx_optimizer.step()
            rnn_optimizer.step()

            print ("Epoch [%d/%d], Iter [%d] cLoss: %.8f, rLoss: %.4f" %(epoch+1, maxIter, i+1,
                                                loss.data[0], rloss.data[0]))

            if epoch % 5 == 0 and epoch >0:
                clr *= 1./epoch
                rlr *= 1./epoch

                clsfx_optimizer = optim.Adam(resnet.parameters(), clr)
                rnn_optimizer   = optim.SGD(regressor.parameters(), rlr)

    torch.save(regressor.state_dict(), 'regressnet_' + str(args.cmaxIter) + '.pkl')
    return resnet, regressor, rtest_X