Ejemplo n.º 1
0
def main():

    # instantiate model and initialize weights
    model = AutoNet(input_nc=args.input_nc, ndf=6, nonlinear='relu')
    networks.print_network(model)

    if args.cuda:
        model.cuda()

    print('using pretrained model')
    checkpoint = torch.load(project_root + args.log_dir + '/checkpoint_60.pth')
    model.load_state_dict(checkpoint['state_dict'])
    args.lr = args.lr * 0.001
    itr_start = 1

    nature = test_nature(val_loader, model)
    print(nature)
    threshold = max(2, nature * 2)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = create_optimizer(model, args.lr)

    new_samples = []
    pn_num = []
    nature_error_itr_global = []
    for itr in np.arange(itr_start, 5):
        args.dataroot = dst_dir

        tmp = construct_negative_samples(model, new_samples, itr)
        pn_num.append(tmp)

        train_loader = myDataset.DataLoaderHalf(
            myDataset.MyDataset(
                args,
                transforms.Compose([
                    transforms.Resize((256, 256), Image.BICUBIC),
                    transforms.ToTensor(), normalize
                ])),
            batch_size=args.batch_size,
            shuffle=True,
            half_constraint=True,
            sampler_type='RandomBalancedSampler',
            **kwargs)
        print('The number of train data:{}'.format(len(train_loader.dataset)))
        args.epochs = 15  # after the new negative samples construct, the learning rate is constant, and epoch = 15

        train_multi(train_loader, optimizer, model, criterion, val_loader, itr,
                    nature_error_itr_global)

    print(pn_num)
    model_selection(nature_error_itr_global, threshold)
Ejemplo n.º 2
0
data_root = '/home/wzquan/publicData/NIvsCG/RRVData/RRVNature-Corona'
project_root = '/home/wzquan/Project/NIvsCG/RRVNature-project/RRVNature-Corona/SrcCode'

LOG_DIR = project_root + args.log_dir
if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)

normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))

args.dataroot = os.path.join(data_root, 'train')
train_loader = myDataset.DataLoaderHalf(myDataset.MyDataset(
    args,
    transforms.Compose([
        transforms.RandomCrop(233),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])),
                                        batch_size=args.batch_size,
                                        shuffle=True,
                                        half_constraint=True,
                                        sampler_type='RandomBalancedSampler',
                                        **kwargs)
print('The number of train data:{}'.format(len(train_loader.dataset)))


def main():
    # instantiate model and initialize weights
    model = ENet()
    networks.print_network(model)
    networks.init_weights(model, init_type='normal')
    model.init_convFilter(trainable=srm_trainable)
def main():

    # instantiate model and initialize weights
    model = ENet()
    networks.print_network(model)
    networks.init_weights(model, init_type='normal')
    model.init_convFilter(trainable=srm_trainable)

    if args.cuda:
        model.cuda()

    print('using pretrained model')
    checkpoint = torch.load(project_root + args.log_dir +
                            '/checkpoint_300.pth')
    model.load_state_dict(checkpoint['state_dict'])
    args.lr = args.lr * 0.001
    threshold = THRESHOLD_MAX

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    L1_criterion = nn.L1Loss(reduction='sum').cuda()

    if not srm_trainable:
        params = []
        for name, param in model.named_parameters():
            if name.find('convFilter1') == -1:
                params += [param]

        optimizer = create_optimizer(params, args.lr)
    else:
        optimizer = create_optimizer(model.parameters(), args.lr)

    nature_error_itr_global = []
    for itr in np.arange(1, 11):
        args.dataroot = dst_dir
        nature_error_itr_local = []

        # adding negative samples into the original training dataset
        construct_negative_samples(itr)

        train_loader = myDataset.DataLoaderHalf(
            myDataset.MyDataset(
                args,
                transforms.Compose([
                    transforms.RandomCrop(233),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(), normalize
                ])),
            batch_size=args.batch_size,
            shuffle=True,
            half_constraint=True,
            sampler_type='RandomBalancedSampler',
            **kwargs)
        print('The number of train data:{}'.format(len(train_loader.dataset)))
        args.epochs = 15

        train_multi(train_loader, optimizer, model, criterion, L1_criterion, val_loader, itr, \
            nature_error_itr_local, nature_error_itr_global)

        # start from itr = 1
        if len(nature_error_itr_local) > 0:
            adv_model_num, adv_model_idx = adv_model_selection(
                nature_error_itr_local, threshold, itr)
            if adv_model_num < 1:
                break

    print(nature_error_itr_global)
    print(len(nature_error_itr_global) / (args.epochs - args.epochs // 2))
    final_model_selection(nature_error_itr_global, threshold)
Ejemplo n.º 4
0
    rmtree(dst_dir)

copytree(src_dir, dst_dir)

img_trans = transforms.Compose([
    transforms.Resize((256, 256), Image.BICUBIC),
    transforms.ToTensor(), normalize
])

args.dataroot = src_dir  #here, use src_dir just for extracting the original info of training data
image_loader = myDataset.DataLoaderHalf(myDataset.MyDataset(
    args,
    transforms.Compose([
        transforms.Resize((256, 256), Image.BICUBIC),
        transforms.ToTensor(), normalize
    ])),
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        half_constraint=False,
                                        drop_last=False,
                                        **kwargs)
print('The number of train data:{}'.format(len(image_loader.dataset)))
image_name = image_loader.dataset.images_txt
all_image_num = len(image_name)
print(all_image_num)

# store the name of colorized images
image_pn_add_flag = {}
for item in image_name[:all_image_num // 2]:
    image_pn_add_flag[item] = True