示例#1
0
def main():
    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random Seed: ", args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.gpus:
        torch.cuda.manual_seed_all(args.seed)

    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = '/tmp'
    if args.save is '':
        args.save = time_stamp
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if args.gpus is not None:
        args.gpus = [int(i) for i in args.gpus.split(',')]
        device = 'cuda:' + str(args.gpus[0])
        cudnn.benchmark = True
    else:
        device = 'cpu'
    if args.type == 'float64':
        dtype = torch.float64
    elif args.type == 'float32':
        dtype = torch.float32
    elif args.type == 'float16':
        dtype = torch.float16
    else:
        raise ValueError('Wrong type!')  # TODO int8

    model = MobileNet2(input_size=args.input_size, scale=args.scaling)
    num_parameters = sum([l.nelement() for l in model.parameters()])
    print(model)
    print('number of parameters: {}'.format(num_parameters))
    print('FLOPs: {}'.format(
        flops_benchmark.count_flops(MobileNet2,
                                    args.batch_size // len(args.gpus) if args.gpus is not None else args.batch_size,
                                    device, dtype, args.input_size, 3, args.scaling)))

    # define loss function (criterion) and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    if args.gpus is not None:
        model = torch.nn.DataParallel(model, args.gpus)
    model.to(device=device, dtype=dtype)
    criterion.to(device=device, dtype=dtype)

    optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.decay,
                                nesterov=True)

    best_test = 0

    # optionally resume from a checkpoint
    data = None
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        elif os.path.isdir(args.resume):
            checkpoint_path = os.path.join(args.resume, 'checkpoint.pth.tar')
            csv_path = os.path.join(args.resume, 'results.csv')
            print("=> loading checkpoint '{}'".format(checkpoint_path))
            checkpoint = torch.load(checkpoint_path, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
            data = []
            with open(csv_path) as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    data.append(row)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.evaluate:
        loss, top1, top5 = test(model, criterion, device, dtype)  # TODO
        return

    csv_logger = CsvLogger(filepath=save_path, data=data)
    csv_logger.save_params(sys.argv, args)

    claimed_acc1 = None
    claimed_acc5 = None
    if args.input_size in claimed_acc_top1:
        if args.scaling in claimed_acc_top1[args.input_size]:
            claimed_acc1 = claimed_acc_top1[args.input_size][args.scaling]
            claimed_acc5 = claimed_acc_top5[args.input_size][args.scaling]
            csv_logger.write_text(
                'Claimed accuracies are: {:.2f}% top-1, {:.2f}% top-5'.format(claimed_acc1 * 100., claimed_acc5 * 100.))
    for epoch in trange(args.start_epoch, args.epochs + 1):
        if epoch in args.schedule:
            args.learning_rate *= args.gamma
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.learning_rate
        train_loss, train_accuracy1, train_accuracy5, = train(model, epoch, optimizer, criterion, device, dtype)
        test_loss, test_accuracy1, test_accuracy5 = test(model, criterion, device, dtype)
        csv_logger.write({'epoch': epoch + 1, 'val_error1': 1 - test_accuracy1, 'val_error5': 1 - test_accuracy5,
                          'val_loss': test_loss, 'train_error1': 1 - train_accuracy1,
                          'train_error5': 1 - train_accuracy5, 'train_loss': train_loss})
        save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_prec1': best_test,
                         'optimizer': optimizer.state_dict()}, test_accuracy1 > best_test, filepath=save_path)

        csv_logger.plot_progress(claimed_acc1=claimed_acc1, claimed_acc5=claimed_acc5)

        if test_accuracy1 > best_test:
            best_test = test_accuracy1

    csv_logger.write_text('Best accuracy is {:.2f}% top-1'.format(best_test * 100.))
示例#2
0
def main():
    args = parser.parse_args()

    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random Seed: ", args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.gpus:
        torch.cuda.manual_seed_all(args.seed)

    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = '/tmp'
    if args.save is '':
        args.save = time_stamp
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if args.gpus is not None:
        args.gpus = [int(i) for i in args.gpus.split(',')]
        device = 'cuda:' + str(args.gpus[0])
        cudnn.benchmark = True
    else:
        device = 'cpu'

    if args.type == 'float64':
        dtype = torch.float64
    elif args.type == 'float32':
        dtype = torch.float32
    elif args.type == 'float16':
        dtype = torch.float16
    else:
        raise ValueError('Wrong type!')  # TODO int8

    if (args.model == "recnn"):
        print("Training RECNN")
        model = RECNN()
        ex_model = RECNN_Mask()
    else:
        print("Error: no model matched!")
    num_parameters = sum([l.nelement() for l in model.parameters()])
    print(model)
    print('number of parameters: {}'.format(num_parameters))

    # define loss function (criterion) and optimizer
    criterion = torch.nn.MSELoss()

    if args.gpus is not None:
        model = torch.nn.DataParallel(model, args.gpus)
        ex_model = torch.nn.DataParallel(ex_model, args.gpus)

    model.to(device=device, dtype=dtype)
    ex_model.to(device=device, dtype=dtype)
    criterion.to(device=device, dtype=dtype)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.decay,
                                nesterov=True)
    if args.find_clr:
        find_bounds_clr(model,
                        train_loader,
                        optimizer,
                        criterion,
                        device,
                        dtype,
                        min_lr=args.min_lr,
                        max_lr=args.max_lr,
                        step_size=args.epochs_per_step * len(train_loader),
                        mode=args.mode,
                        save_path=save_path)
        return

    if args.clr:
        scheduler = CyclicLR(optimizer,
                             base_lr=args.min_lr,
                             max_lr=args.max_lr,
                             step_size=args.epochs_per_step *
                             len(train_loader),
                             mode=args.mode)
    else:
        scheduler = MultiStepLR(optimizer,
                                milestones=args.schedule,
                                gamma=args.gamma)

    best_test = 0

    # optionally resume from a checkpoint
    data = None
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        elif os.path.isdir(args.resume):
            checkpoint_path = os.path.join(args.resume, 'model_best.pth.tar')
            csv_path = os.path.join(args.resume, 'results.csv')
            print("=> loading checkpoint '{}'".format(checkpoint_path))
            checkpoint = torch.load(checkpoint_path, map_location=device)
            args.start_epoch = checkpoint['epoch']
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            ex_model.load_state_dict(checkpoint['state_dict'])

            print("=> loaded checkpoint '{}' (epoch {})".format(
                checkpoint_path, checkpoint['epoch']))

        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.extract_features:
        test_hdf5_list = [
            x for x in glob.glob(os.path.join(args.h5dir, 'test', '*.h5'))
        ]
        test_hdf5_list.sort()
        print(test_hdf5_list)
        tcnt = 0
        for f in test_hdf5_list:
            h5_file = h5py.File(f, 'r')
            tcnt = tcnt + 1
            if tcnt == 1:
                testx = torch.from_numpy(np.array(h5_file['data']))
                testy = torch.from_numpy(np.array(h5_file['label']))
            else:
                testcx = torch.from_numpy(np.array(h5_file['data']))
                testcy = torch.from_numpy(np.array(h5_file['label']))
                testx = torch.cat((testx, testcx), 0)
                testy = torch.cat((testy, testcy), 0)

        tex_shape = testx.shape
        testx = testx.view(tex_shape[0], 1, tex_shape[1], tex_shape[2],
                           tex_shape[3])
        testxy = torch.utils.data.TensorDataset(testx, testy)
        val_loader = torch.utils.data.DataLoader(testxy,
                                                 batch_size=args.batch_size,
                                                 shuffle=False)
        (test_features, test_preds,
         test_target) = extract_features(model, ex_model, val_loader,
                                         criterion, device, dtype)

        test_features_numpy = test_features.cpu().numpy()
        test_preds_numpy = test_preds.cpu().numpy()
        test_target_numpy = test_target.cpu().numpy()

        test_data = {
            'test_features': test_features_numpy,
            'test_preds': test_preds_numpy,
            'test_target': test_target_numpy
        }
        test_mat_filename = 'test' + args.setting
        scipy.io.savemat(test_mat_filename, test_data)
        train_hdf5_list = [
            x for x in glob.glob(os.path.join(args.h5dir, 'train', '*.h5'))
        ]
        train_hdf5_list.sort()
        tcnt = 0
        for f in train_hdf5_list:
            h5_file = h5py.File(f, 'r')
            tcnt = tcnt + 1
            if tcnt == 1:
                trainx = torch.from_numpy(np.array(h5_file['data']))
                trainy = torch.from_numpy(np.array(h5_file['label']))
            else:
                traincx = torch.from_numpy(np.array(h5_file['data']))
                traincy = torch.from_numpy(np.array(h5_file['label']))
                trainx = torch.cat((trainx, traincx), 0)
                trainy = torch.cat((trainy, traincy), 0)

        trx_shape = trainx.shape
        trainx = trainx.view(trx_shape[0], 1, trx_shape[1], trx_shape[2],
                             trx_shape[3])
        trainxy = torch.utils.data.TensorDataset(trainx, trainy)
        train_loader = torch.utils.data.DataLoader(trainxy,
                                                   batch_size=args.batch_size,
                                                   shuffle=False)

        (train_features, train_preds,
         train_target) = extract_features(model, ex_model, train_loader,
                                          criterion, device, dtype)

        train_features_numpy = train_features.cpu().numpy()
        train_preds_numpy = train_preds.cpu().numpy()
        train_target_numpy = train_target.cpu().numpy()
        train_data = {
            'train_features': train_features_numpy,
            'train_preds': train_preds_numpy,
            'train_target': train_target_numpy
        }
        train_mat_filename = 'train' + args.setting
        scipy.io.savemat(train_mat_filename, train_data)
        return

    if args.evaluate:
        loss, top1, top5 = test(model, val_loader, criterion, device,
                                dtype)  # TODO
        return

    csv_logger = CsvLogger(filepath=save_path, data=data)
    csv_logger.save_params(sys.argv, args)

    claimed_acc1 = None
    claimed_acc5 = None
    best_test = 10000000
    train_network(args.start_epoch, args.epochs, scheduler, model,
                  train_loader, val_loader, optimizer, criterion, device,
                  dtype, args.batch_size, args.log_interval, csv_logger,
                  save_path, claimed_acc1, claimed_acc5, best_test)
示例#3
0
def main():
    args = parser.parse_args()

    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random Seed: ", args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.gpus:
        torch.cuda.manual_seed_all(args.seed)

    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = '/tmp'
    if args.save is '':
        args.save = time_stamp
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if args.gpus is not None:
        args.gpus = [int(i) for i in args.gpus.split(',')]
        device = 'cuda:' + str(args.gpus[0])
        cudnn.benchmark = True
    else:
        device = 'cpu'

    if args.type == 'float64':
        dtype = torch.float64
    elif args.type == 'float32':
        dtype = torch.float32
    elif args.type == 'float16':
        dtype = torch.float16
    else:
        raise ValueError('Wrong type!')  # TODO int8

    model = MobileNet2(input_size=args.input_size, scale=args.scaling)
    num_parameters = sum([l.nelement() for l in model.parameters()])
    print(model)
    print('number of parameters: {}'.format(num_parameters))
    print('FLOPs: {}'.format(
        flops_benchmark.count_flops(MobileNet2,
                                    args.batch_size // len(args.gpus) if args.gpus is not None else args.batch_size,
                                    device, dtype, args.input_size, 3, args.scaling)))

    train_loader, val_loader = get_loaders(args.dataroot, args.batch_size, args.batch_size, args.input_size,
                                           args.workers)
    # define loss function (criterion) and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    if args.gpus is not None:
        model = torch.nn.DataParallel(model, args.gpus)
    model.to(device=device, dtype=dtype)
    criterion.to(device=device, dtype=dtype)

    optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.decay,
                                nesterov=True)
    if args.find_clr:
        find_bounds_clr(model, train_loader, optimizer, criterion, device, dtype, min_lr=args.min_lr,
                        max_lr=args.max_lr, step_size=args.epochs_per_step * len(train_loader), mode=args.mode,
                        save_path=save_path)
        return

    if args.clr:
        scheduler = CyclicLR(optimizer, base_lr=args.min_lr, max_lr=args.max_lr,
                             step_size=args.epochs_per_step * len(train_loader), mode=args.mode)
    else:
        scheduler = MultiStepLR(optimizer, milestones=args.schedule, gamma=args.gamma)

    best_test = 0

    # optionally resume from a checkpoint
    data = None
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        elif os.path.isdir(args.resume):
            checkpoint_path = os.path.join(args.resume, 'checkpoint.pth.tar')
            csv_path = os.path.join(args.resume, 'results.csv')
            print("=> loading checkpoint '{}'".format(checkpoint_path))
            checkpoint = torch.load(checkpoint_path, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
            data = []
            with open(csv_path) as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    data.append(row)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.evaluate:
        loss, top1, top5 = test(model, val_loader, criterion, device, dtype)  # TODO
        return

    csv_logger = CsvLogger(filepath=save_path, data=data)
    csv_logger.save_params(sys.argv, args)

    claimed_acc1 = None
    claimed_acc5 = None
    if args.input_size in claimed_acc_top1:
        if args.scaling in claimed_acc_top1[args.input_size]:
            claimed_acc1 = claimed_acc_top1[args.input_size][args.scaling]
            claimed_acc5 = claimed_acc_top5[args.input_size][args.scaling]
            csv_logger.write_text(
                'Claimed accuracies are: {:.2f}% top-1, {:.2f}% top-5'.format(claimed_acc1 * 100., claimed_acc5 * 100.))
    train_network(args.start_epoch, args.epochs, scheduler, model, train_loader, val_loader, optimizer, criterion,
                  device, dtype, args.batch_size, args.log_interval, csv_logger, save_path, claimed_acc1, claimed_acc5,
                  best_test)
def main():
    args = parser.parse_args()

    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random Seed: ", args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.gpus:
        torch.cuda.manual_seed_all(args.seed)

    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = '/tmp'
    if args.save is '':
        args.save = 'mar10_224_' + time_stamp
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if args.gpus is not None:
        args.gpus = [int(i) for i in args.gpus.split(',')]
        device = 'cuda:' + str(args.gpus[0])
        cudnn.benchmark = True
    else:
        device = 'cpu'

    if args.type == 'float64':
        dtype = torch.float64
    elif args.type == 'float32':
        dtype = torch.float32
    elif args.type == 'float16':
        dtype = torch.float16
    else:
        raise ValueError('Wrong type!')  # TODO int8

    model = STN_MobileNet2(input_size=args.input_size,
                           scale=args.scaling,
                           shearing=args.shearing)
    # print(model.stnmod.fc_loc[0].bias.data)
    num_parameters = sum([l.nelement() for l in model.parameters()])
    print(model)
    print('number of parameters: {}'.format(num_parameters))
    print('FLOPs: {}'.format(
        flops_benchmark.count_flops(
            STN_MobileNet2, args.batch_size //
            len(args.gpus) if args.gpus is not None else args.batch_size,
            device, dtype, args.input_size, 3, args.scaling)))

    train_loader, val_loader, test_loader = get_loaders(
        args.dataroot, args.batch_size, args.batch_size, args.input_size,
        args.workers, args.b_weights)
    # define loss function (criterion) and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    L1_criterion = torch.nn.L1Loss()
    PW_criterion = torch.nn.CosineSimilarity(dim=2, eps=1e-6)

    if args.gpus is not None:
        model = torch.nn.DataParallel(model, args.gpus)
    model.to(device=device, dtype=dtype)

    criterion.to(device=device, dtype=dtype)
    L1_criterion.to(device=device, dtype=dtype)
    PW_criterion.to(device=device, dtype=dtype)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.decay,
                                nesterov=True)
    if args.find_clr:
        find_bounds_clr(model,
                        train_loader,
                        optimizer,
                        PW_criterion,
                        device,
                        dtype,
                        min_lr=args.min_lr,
                        max_lr=args.max_lr,
                        step_size=args.epochs_per_step * len(train_loader),
                        mode=args.mode,
                        save_path=save_path)
        return

    if args.clr:
        print('Use CLR')
        scheduler = CyclicLR(optimizer,
                             base_lr=args.min_lr,
                             max_lr=args.max_lr,
                             step_size=args.epochs_per_step *
                             len(train_loader),
                             mode=args.mode)
    else:
        print('Use scheduler')
        scheduler = MultiStepLR(optimizer,
                                milestones=args.schedule,
                                gamma=args.gamma)

    best_val = 500

    # optionally resume from a checkpoint
    data = None
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            # args.start_epoch = checkpoint['epoch'] - 1
            # best_val = checkpoint['best_prec1']
            # best_test = checkpoint['best_prec1']
            args.start_epoch = 0
            best_val = 500
            state_dict = checkpoint['state_dict']

            # if weights from imagenet

            new_state_dict = OrderedDict()
            for k, v in state_dict.items():
                #     print(k, v.size())
                name = k
                if k == 'module.fc.bias':
                    new_state_dict[name] = torch.zeros(101)
                    continue
                elif k == 'module.fc.weight':
                    new_state_dict[name] = torch.ones(101, 1280)
                    continue
                else:
                    print('else:', name)
                    new_state_dict[name] = v

            model.load_state_dict(new_state_dict, strict=False)
            # optimizer.load_state_dict(checkpoint['optimizer'], strict=False)
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        elif os.path.isdir(args.resume):
            checkpoint_path = os.path.join(args.resume, 'checkpoint.pth.tar')
            csv_path = os.path.join(args.resume, 'results.csv')
            print("=> loading checkpoint '{}'".format(checkpoint_path))
            checkpoint = torch.load(checkpoint_path, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_val = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                checkpoint_path, checkpoint['epoch']))
            data = []
            with open(csv_path) as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    data.append(row)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.evaluate:
        loss, test_mae = test(model, predefined_points, 0, test_loader,
                              PW_criterion, device, dtype)  # TODO
        return

    csv_logger = CsvLogger(filepath=save_path, data=data)
    csv_logger.save_params(sys.argv, args)

    # claimed_acc1 = None
    # claimed_acc5 = None
    # if args.input_size in claimed_acc_top1:
    #     if args.scaling in claimed_acc_top1[args.input_size]:
    #         claimed_acc1 = claimed_acc_top1[args.input_size][args.scaling]
    #         claimed_acc5 = claimed_acc_top5[args.input_size][args.scaling]
    #         csv_logger.write_text(
    #             'Claimed accuracies are: {:.2f}% top-1, {:.2f}% top-5'.format(claimed_acc1 * 100., claimed_acc5 * 100.))

    train_network(args.start_epoch, args.epochs, scheduler, model,
                  predefined_points, train_loader, val_loader, test_loader,
                  optimizer, PW_criterion, device, dtype, args.batch_size,
                  args.log_interval, csv_logger, save_path, best_val)
        return

    if args.clr:
        scheduler = CyclicLR(optimizer, base_lr=args.min_lr, max_lr=args.max_lr,
                             step_size=args.epochs_per_step * len(train_loader), mode=args.mode)
    else:
        scheduler = MultiStepLR(optimizer, milestones=args.schedule, gamma=args.gamma)

    best_test = 0

 
    if evaluate == 'true':
        loss, top1, top5 = test(model, val_loader, criterion, device, dtype)  # TODO
        return

    csv_logger = CsvLogger(filepath=save_path, data=data)
    csv_logger.save_params(sys.argv, args)

    claimed_acc1 = None
    claimed_acc5 = None
    if input_size in claimed_acc_top1:
        if scaling in claimed_acc_top1[input_size]:
            claimed_acc1 = claimed_acc_top1[input_size][scaling]
            claimed_acc5 = claimed_acc_top5[input_size][scaling]
            csv_logger.write_text(
                'Claimed accuracies are: {:.2f}% top-1, {:.2f}% top-5'.format(claimed_acc1 * 100., claimed_acc5 * 100.))
    train_network(start_epoch, epochs, scheduler, model, train_loader, val_loader, optimizer, criterion,
                  device, dtype, batch_size, log_interval, csv_logger, save_path, claimed_acc1, claimed_acc5,
                  best_test)

def train_network(start_epoch, epochs, scheduler, model, train_loader, val_loader, optimizer, criterion, device, dtype,
示例#6
0
文件: main.py 项目: micmax93/AimApi
from logger import CsvLogger


_config = ConfigParser()
_config.read('config.ini')
_path = _config.get('global', 'path')
_mode = _config.getboolean('global', 'targeted')


publishers, _grid = load_configs(_path)
pub_list = PublishersList(_grid)

player = SinglePlayer()

api = ApiConnection(host='25.152.172.38')
log = CsvLogger('log.csv', ['date', 'video', 'viewers', 'value'])


def get_viewers_grid(viewers):
    grid = ViewersGrid()
    for viewer in viewers:
        grid.add(viewer.age, viewer.gender)
    return grid

_gender_dict = ['unknown', 'male', 'female']
_age_dict = ['unknown', 'child', 'teen', 'young', 'older', 'senior']


def run_once():
    audience = api.get_audience_details()
    viewers_grid = get_viewers_grid(audience)
示例#7
0
文件: train.py 项目: yanemcovsky/SIAM
def main():
    args = get_args()
    device, dtype = args.device, args.dtype

    add_args = {'num_classes': args.num_classes}
    if args.cpni:
        add_args = {
            'weight_noise': args.weight_noise,
            'act_noise_a': args.act_noise_a,
            'act_noise_b': args.act_noise_b,
            'rank': args.noise_rank
        }
    if args.dataset == torchvision.datasets.ImageNet:
        add_args['pretrained'] = True
    else:
        add_args['width'] = args.width
    add_args['num_classes'] = args.num_classes
    smoothing_args = {}
    if args.smoothing:
        smoothing_args = {
            'noise_sd': args.noise_sd,
            'm_test': args.m_test,
            'm_train': args.m_train
        }

    model = args.net(**smoothing_args, **add_args)

    num_parameters = sum([l.nelement() for l in model.parameters()])
    print(model)
    print("Number of parameters {}".format(num_parameters))

    train_loader, val_loader, adv_data = args.get_loaders(
        args.dataset, args.data, args.batch_size, args.val_batch_size,
        args.workers, args.adv_data)
    # define loss function (criterion) and optimizer
    criterion = torch.nn.CrossEntropyLoss()

    model, criterion = model.to(device=device,
                                dtype=dtype), criterion.to(device=device,
                                                           dtype=dtype)

    if args.opt == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.learning_rate,
                                    momentum=args.momentum,
                                    weight_decay=args.decay,
                                    nesterov=True)
    elif args.opt == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     args.learning_rate,
                                     weight_decay=args.decay)
    else:
        raise ValueError('Wrong optimzier!')

    scheduler = MultiStepLR(optimizer,
                            milestones=args.schedule,
                            gamma=args.gamma)

    best_test = 0

    # optionally resume from a checkpoint
    data = None
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        elif os.path.isdir(args.resume):
            checkpoint_path = os.path.join(args.resume, 'checkpoint.pth.tar')
            csv_path = os.path.join(args.resume, 'results.csv')
            print("=> loading checkpoint '{}'".format(checkpoint_path))
            checkpoint = torch.load(checkpoint_path, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                checkpoint_path, checkpoint['epoch']))
            data = []
            with open(csv_path) as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    data.append(row)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    if args.zero_start:
        args.start_epoch = 0
    if args.evaluate:
        loss, top1, top5 = test(model, val_loader, criterion, device,
                                dtype)  # TODO
        return

    csv_logger = CsvLogger(filepath=args.save_path, data=data)
    csv_logger.save_params(sys.argv, args)

    claimed_acc1 = None
    claimed_acc5 = None
    if args.adv:
        normalize = {
            'mean': np.array([0.491, 0.482, 0.447]),
            'std': np.array([0.247, 0.243, 0.262])
        }
        if args.alpha:
            adv_train_network_alpha(args.start_epoch, args.epochs, scheduler,
                                    model, train_loader, val_loader, optimizer,
                                    criterion, device, dtype, args.batch_size,
                                    args.log_interval, csv_logger,
                                    args.save_path, claimed_acc1, claimed_acc5,
                                    best_test, args.attack, args.eps, 0.5,
                                    normalize)
        else:
            a = smoothing_args
            a.update(add_args)
            a['width'] = args.width
            adv_train_network(args.start_epoch, args.epochs, scheduler, model,
                              train_loader, val_loader, optimizer, criterion,
                              device, dtype, args.batch_size,
                              args.log_interval, csv_logger, args.save_path,
                              claimed_acc1, claimed_acc5, best_test,
                              args.attack, args.eps, args.adv_w, normalize,
                              args, a)
    else:
        train_network(args.start_epoch, args.epochs, scheduler, model,
                      train_loader, val_loader, adv_data, optimizer, criterion,
                      device, dtype, args.batch_size, args.log_interval,
                      csv_logger, args.save_path, claimed_acc1, claimed_acc5,
                      best_test)
示例#8
0
def main():
    args = parser.parse_args()

    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random Seed: ", args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.gpus:
        torch.cuda.manual_seed_all(args.seed)

    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = '/tmp'
    if args.save is '':
        args.save = time_stamp
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if args.gpus is not None:
        args.gpus = [int(i) for i in args.gpus.split(',')]
        device = 'cuda:' + str(args.gpus[0])
        cudnn.benchmark = True
    else:
        device = 'cpu'

    if args.type == 'float64':
        dtype = torch.float64
    elif args.type == 'float32':
        dtype = torch.float32
    elif args.type == 'float16':
        dtype = torch.float16
    else:
        raise ValueError('Wrong type!')  # TODO int8

    model = ShuffleNetV2(scale=args.scaling,
                         c_tag=args.c_tag,
                         SE=args.SE,
                         residual=args.residual,
                         groups=args.groups)
    num_parameters = sum([l.nelement() for l in model.parameters()])
    print(model)
    print('number of parameters: {}'.format(num_parameters))
    print('FLOPs: {}'.format(
        flops_benchmark.count_flops(
            ShuffleNetV2, args.batch_size //
            len(args.gpus) if args.gpus is not None else args.batch_size,
            device, dtype, args.input_size, 3, args.scaling, 3, args.c_tag,
            1000, torch.nn.ReLU, args.SE, args.residual, args.groups)))

    train_loader, val_loader = get_loaders(args.dataroot, args.batch_size,
                                           args.batch_size, args.input_size,
                                           args.workers)
    # define loss function (criterion) and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    if args.gpus is not None:
        model = torch.nn.DataParallel(model, args.gpus)
    model.to(device=device, dtype=dtype)
    criterion.to(device=device, dtype=dtype)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.decay,
                                nesterov=True)
    if args.find_clr:
        find_bounds_clr(model,
                        train_loader,
                        optimizer,
                        criterion,
                        device,
                        dtype,
                        min_lr=args.min_lr,
                        max_lr=args.max_lr,
                        step_size=args.epochs_per_step * len(train_loader),
                        mode=args.mode,
                        save_path=save_path)
        return

    if args.clr:
        scheduler = CyclicLR(optimizer,
                             base_lr=args.min_lr,
                             max_lr=args.max_lr,
                             step_size=args.epochs_per_step *
                             len(train_loader),
                             mode=args.mode)
    else:
        scheduler = MultiStepLR(optimizer,
                                milestones=args.schedule,
                                gamma=args.gamma)

    best_test = 0

    # optionally resume from a checkpoint
    data = None
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        elif os.path.isdir(args.resume):
            checkpoint_path = os.path.join(args.resume, 'checkpoint.pth.tar')
            csv_path = os.path.join(args.resume, 'results.csv')
            print("=> loading checkpoint '{}'".format(checkpoint_path))
            checkpoint = torch.load(checkpoint_path, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                checkpoint_path, checkpoint['epoch']))
            data = []
            with open(csv_path) as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    data.append(row)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.evaluate:
        loss, top1, top5 = test(model, val_loader, criterion, device,
                                dtype)  # TODO
        return

    csv_logger = CsvLogger(filepath=save_path, data=data)
    csv_logger.save_params(sys.argv, args)

    claimed_acc1 = None
    claimed_acc5 = None
    if args.SE in claimed_acc_top1:
        if args.scaling in claimed_acc_top1[args.SE]:
            claimed_acc1 = 1 - claimed_acc_top1[args.SE][args.scaling]
            csv_logger.write_text('Claimed accuracy is {:.2f}% top-1'.format(
                claimed_acc1 * 100.))
    train_network(args.start_epoch, args.epochs, scheduler, model,
                  train_loader, val_loader, optimizer, criterion, device,
                  dtype, args.batch_size, args.log_interval, csv_logger,
                  save_path, claimed_acc1, claimed_acc5, best_test)
示例#9
0
def main():
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    global args, best_prec1
    best_prec1 = 0
    args = parser.parse_args()
    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = '/tmp'
    if args.save is '':
        args.save = time_stamp
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    args.noise = not args.no_noise
    args.quant = not args.no_quantization
    args.act_quant = not args.no_act_quantization
    args.quant_edges = not args.no_quant_edges

    logging.info("saving to %s", save_path)
    logging.debug("run arguments: %s", args)

    if args.gpus is not None:
        args.gpus = [int(i) for i in args.gpus.split(',')]
        device = 'cuda:' + str(args.gpus[0])
        cudnn.benchmark = True
    else:
        device = 'cpu'
    dtype = torch.float32

    args.step_setup = None

    model = models.__dict__[args.model]
    model_config = {
        'scale': args.scale,
        'input_size': args.input_size,
        'dataset': args.dataset,
        'bitwidth': args.bitwidth,
        'quantize': args.quant,
        'noise': args.noise,
        'step': args.step,
        'depth': args.depth,
        'act_bitwidth': args.act_bitwidth,
        'act_quant': args.act_quant,
        'quant_edges': args.quant_edges,
        'step_setup': args.step_setup,
        'quant_epoch_step': args.quant_epoch_step,
        'quant_start_stage': args.quant_start_stage,
        'normalize': args.no_pre_process_normalize,
        'noise_mask': args.noise_mask
    }

    if args.model_config is not '':
        model_config = dict(model_config, **literal_eval(args.model_config))

    # create model
    model = model(**model_config)
    logging.info("creating model %s", args.model)
    model_parameters = filter(lambda p: p.requires_grad, model.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])
    print("number of parameters: ", params)
    logging.info("created model with configuration: %s", model_config)
    print(model)

    data = None
    checkpoint_epoch = 0
    # optionally resume from a checkpoint
    if args.evaluate:
        if not os.path.isfile(args.evaluate):
            parser.error('invalid checkpoint: {}'.format(args.evaluate))
        checkpoint = torch.load(args.evaluate, map_location=device)
        load_model(model, checkpoint)
        logging.info("loaded checkpoint '%s' (epoch %s)", args.evaluate,
                     checkpoint['epoch'])

        print("loaded checkpoint {0} (epoch {1})".format(
            args.evaluate, checkpoint['epoch']))

    elif args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            if not args.start_from_zero:
                args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            checkpoint_epoch = checkpoint['epoch']

            load_model(model, checkpoint)

            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        elif os.path.isdir(args.resume):
            checkpoint_path = os.path.join(args.resume, 'checkpoint.pth.tar')
            csv_path = os.path.join(args.resume, 'results.csv')
            print("=> loading checkpoint '{}'".format(checkpoint_path))
            checkpoint = torch.load(checkpoint_path, map_location=device)
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                checkpoint_path, checkpoint['epoch']))
            data = []
            with open(csv_path) as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    data.append(row)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.gpus is not None:
        model = torch.nn.DataParallel(
            model, [args.gpus[0]]
        )  # Statistics need to be calculated on single GPU to be consistant with data among multiplr GPUs

    # Data loading code
    default_transform = {
        'train':
        get_transform(args.dataset,
                      input_size=args.input_size,
                      augment=True,
                      integer_values=args.quant_dataloader,
                      norm=not args.no_pre_process_normalize),
        'eval':
        get_transform(args.dataset,
                      input_size=args.input_size,
                      augment=False,
                      integer_values=args.quant_dataloader,
                      norm=not args.no_pre_process_normalize)
    }
    transform = getattr(model.module, 'input_transform', default_transform)

    val_data = get_dataset(args.dataset,
                           'val',
                           transform['eval'],
                           datasets_path=args.datapath)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=args.val_batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    train_data = get_dataset(args.dataset,
                             'train',
                             transform['train'],
                             datasets_path=args.datapath)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    statistics_train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.act_stats_batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.decay,
                                nesterov=True)
    model, criterion = model.to(device, dtype), criterion.to(device, dtype)
    if args.clr:
        scheduler = CyclicLR(optimizer,
                             base_lr=args.min_lr,
                             max_lr=args.max_lr,
                             step_size=args.epochs_per_step *
                             len(train_loader),
                             mode=args.mode)
    else:
        scheduler = MultiStepLR(optimizer,
                                milestones=args.schedule,
                                gamma=args.gamma)

    csv_logger = CsvLogger(filepath=save_path, data=data)
    csv_logger.save_params(sys.argv, args)
    csv_logger_training_stats = os.path.join(save_path, 'training_stats.csv')

    # pre-training activation and parameters statistics calculation ####
    if check_if_need_to_collect_statistics(model):
        for layer in model.modules():
            if isinstance(layer, actquant.ActQuantBuffers):
                layer.pre_training_statistics = True  # Turn on pre-training activation statistics calculation
        model.module.statistics_phase = True

        validate(
            statistics_train_loader,
            model,
            criterion,
            device,
            epoch=0,
            num_of_batches=80,
            stats_phase=True)  # Run validation on training set for statistics
        model.module.quantize.get_act_max_value_from_pre_calc_stats(
            list(model.modules()))
        _ = model.module.quantize.set_weight_basis(list(model.modules()), None)

        for layer in model.modules():
            if isinstance(layer, actquant.ActQuantBuffers):
                layer.pre_training_statistics = False  # Turn off pre-training activation statistics calculation
        model.module.statistics_phase = False

    else:  # Maximal activation values still need to be derived from loaded stats
        model.module.quantize.assign_act_clamp_during_val(list(
            model.modules()),
                                                          print_clamp_val=True)
        model.module.quantize.assign_weight_clamp_during_val(
            list(model.modules()), print_clamp_val=True)
        # model.module.quantize.get_act_max_value_from_pre_calc_stats(list(model.modules()))

    if args.gpus is not None:  # Return to Multi-GPU after statistics calculations
        model = torch.nn.DataParallel(model.module, args.gpus)
        model, criterion = model.to(device, dtype), criterion.to(device, dtype)

    # pre-training activation statistics calculation ####

    if args.evaluate:
        val_loss, val_prec1, val_prec5 = validate(val_loader,
                                                  model,
                                                  criterion,
                                                  device,
                                                  epoch=0)
        print("val_prec1: ", val_prec1)
        return

    # fast forward to curr stage
    for i in range(args.quant_start_stage):
        model.module.switch_stage(0)

    for epoch in trange(args.start_epoch, args.epochs + 1):

        if not isinstance(scheduler, CyclicLR):
            scheduler.step()

        #     scheduler.optimizer = optimizer
        train_loss, train_prec1, train_prec5 = train(
            train_loader,
            model,
            criterion,
            device,
            epoch,
            optimizer,
            scheduler,
            training_stats_logger=csv_logger_training_stats)

        for layer in model.modules():
            if isinstance(layer, actquant.ActQuantBuffers):
                layer.print_clamp()

        # evaluate on validation set

        val_loss, val_prec1, val_prec5 = validate(val_loader, model, criterion,
                                                  device, epoch)

        # remember best prec@1 and save checkpoint
        is_best = val_prec1 > best_prec1
        best_prec1 = max(val_prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': args.model,
                'config': args.model_config,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'layers_b_dict': model.module.
                layers_b_dict  #TODO this doesn't work for multi gpu - need to del
            },
            is_best,
            path=save_path)
        # New type of logging
        csv_logger.write({
            'epoch': epoch + 1,
            'val_error1': 1 - val_prec1,
            'val_error5': 1 - val_prec5,
            'val_loss': val_loss,
            'train_error1': 1 - train_prec1,
            'train_error5': 1 - train_prec5,
            'train_loss': train_loss
        })
        csv_logger.plot_progress(title=args.model + str(args.depth))
        csv_logger.write_text(
            'Epoch {}: Best accuracy is {:.2f}% top-1'.format(
                epoch + 1, best_prec1 * 100.))
示例#10
0
def main():
	
    seed = random.randint(1, 10000)
    random.seed(seed)
    torch.manual_seed(seed)
    
    torch.cuda.manual_seed_all(seed)

    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')

    results_dir = '/tmp'
    save = time_stamp
    save_path = os.path.join(results_dir, save)

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    gpus = 2#[int(i) for i in gpus.split(',')]
    device = 'cuda:0' #+ str(args.gpus[0])
    cudnn.benchmark = True
    dtype = torch.float64

    input_size = 224
    scaling = 1.0
    batch_size = 20
    workers = 4
    learning_rate = 0.02
    momentum = 0.9
    decay = 0.00004
    max_lr = 1
    min_lr = 0.00001
    start_epoch = 0
    epochs = 400
    epochs_per_step = 20
    log_interval = 100
    mode = 'triangular2'
    evaluate = 'false'
    dataroot = "data"


    model = MobileNet2(input_size=input_size, scale=scaling)
    num_parameters = sum([l.nelement() for l in model.parameters()])
    #print(model)


    """print('number of parameters: {}'.format(num_parameters))
    print('FLOPs: {}'.format(
        flops_benchmark.count_flops(MobileNet2,
                                    batch_size // len(gpus) if gpus is not None else batch_size,
                                    device, dtype, input_size, 3, scaling)))"""

    train_loader, val_loader = get_loaders(dataroot, batch_size, batch_size, input_size, workers)

    # define loss function (criterion) and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    model = torch.nn.DataParallel(model)
        
    model.to(device=device, dtype=dtype)
    criterion.to(device=device, dtype=dtype)

    optimizer = torch.optim.SGD(model.parameters(), learning_rate, momentum=momentum, weight_decay=decay,
                                nesterov=True)
    find_bounds_clr(model, train_loader, optimizer, criterion, device, dtype, min_lr=min_lr,
                        max_lr=max_lr, step_size=epochs_per_step * len(train_loader), mode=mode,
                        save_path=save_path)
    scheduler = CyclicLR(optimizer, base_lr=min_lr, max_lr=max_lr,
                             step_size=epochs_per_step * len(train_loader), mode=mode)
    
    best_test = 0

 
    if evaluate == 'true':
        loss, top1, top5 = test(model, val_loader, criterion, device, dtype)  # TODO
        return

    data = []

    csv_logger = CsvLogger(filepath=save_path, data=data)
    #csv_logger.save_params(sys.argv, args)

    claimed_acc1 = None
    claimed_acc5 = None
    if input_size in claimed_acc_top1:
        if scaling in claimed_acc_top1[input_size]:
            claimed_acc1 = claimed_acc_top1[input_size][scaling]
            claimed_acc5 = claimed_acc_top5[input_size][scaling]
            csv_logger.write_text(
                'Claimed accuracies are: {:.2f}% top-1, {:.2f}% top-5'.format(claimed_acc1 * 100., claimed_acc5 * 100.))
            
            
    train_network(start_epoch, epochs, scheduler, model, train_loader, val_loader, optimizer, criterion,
                  device, dtype, batch_size, log_interval, csv_logger, './data', claimed_acc1, claimed_acc5,
                  best_test)
    
    return 1