Esempio n. 1
0
def main():

    # instantiate model and initialize weights
    model = AutoNet(input_nc=args.input_nc, ndf=6, nonlinear='relu')
    networks.print_network(model)

    if args.cuda:
        model.cuda()

    print('using pretrained model')
    checkpoint = torch.load(project_root + args.log_dir + '/checkpoint_60.pth')
    model.load_state_dict(checkpoint['state_dict'])
    args.lr = args.lr * 0.001
    itr_start = 1

    nature = test_nature(val_loader, model)
    print(nature)
    threshold = max(2, nature * 2)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = create_optimizer(model, args.lr)

    new_samples = []
    pn_num = []
    nature_error_itr_global = []
    for itr in np.arange(itr_start, 5):
        args.dataroot = dst_dir

        tmp = construct_negative_samples(model, new_samples, itr)
        pn_num.append(tmp)

        train_loader = myDataset.DataLoaderHalf(
            myDataset.MyDataset(
                args,
                transforms.Compose([
                    transforms.Resize((256, 256), Image.BICUBIC),
                    transforms.ToTensor(), normalize
                ])),
            batch_size=args.batch_size,
            shuffle=True,
            half_constraint=True,
            sampler_type='RandomBalancedSampler',
            **kwargs)
        print('The number of train data:{}'.format(len(train_loader.dataset)))
        args.epochs = 15  # after the new negative samples construct, the learning rate is constant, and epoch = 15

        train_multi(train_loader, optimizer, model, criterion, val_loader, itr,
                    nature_error_itr_global)

    print(pn_num)
    model_selection(nature_error_itr_global, threshold)
Esempio n. 2
0
kwargs = {'num_workers': 8, 'pin_memory': True} if args.cuda else {}

data_root = '/home/wzquan/publicData/NIvsCG/RRVData/RRVNature-Corona'
project_root = '/home/wzquan/Project/NIvsCG/RRVNature-project/RRVNature-Corona/SrcCode'

LOG_DIR = project_root + args.log_dir
if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)

normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))

args.dataroot = os.path.join(data_root, 'train')
train_loader = myDataset.DataLoaderHalf(myDataset.MyDataset(
    args,
    transforms.Compose([
        transforms.RandomCrop(233),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])),
                                        batch_size=args.batch_size,
                                        shuffle=True,
                                        half_constraint=True,
                                        sampler_type='RandomBalancedSampler',
                                        **kwargs)
print('The number of train data:{}'.format(len(train_loader.dataset)))


def main():
    # instantiate model and initialize weights
    model = ENet()
    networks.print_network(model)
Esempio n. 3
0
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
if args.cuda:
    cudnn.benchmark = True
kwargs = {'num_workers': 8, 'pin_memory': True} if args.cuda else {}

data_root = '/home/wzquan/publicData/colorization/'
project_root = '/home/wzquan/Project/colorization'

normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))

args.dataroot = os.path.join(data_root, 'test_data_all/test_images')
test_loader = torch.utils.data.DataLoader(myDataset.MyDataset(
    args,
    transforms.Compose([
        transforms.Resize((256, 256), Image.BICUBIC),
        transforms.ToTensor(), normalize
    ])),
                                          batch_size=args.test_batch_size,
                                          shuffle=False,
                                          **kwargs)
print('The number of val data:{}'.format(len(test_loader.dataset)))

args.dataroot = os.path.join(data_root, 'test_data_all/test_images_B')
test_loader_2 = torch.utils.data.DataLoader(myDataset.MyDataset(
    args,
    transforms.Compose([
        transforms.Resize((256, 256), Image.BICUBIC),
        transforms.ToTensor(), normalize
    ])),
                                            batch_size=args.test_batch_size,
def main():

    # instantiate model and initialize weights
    model = ENet()
    networks.print_network(model)
    networks.init_weights(model, init_type='normal')
    model.init_convFilter(trainable=srm_trainable)

    if args.cuda:
        model.cuda()

    print('using pretrained model')
    checkpoint = torch.load(project_root + args.log_dir +
                            '/checkpoint_300.pth')
    model.load_state_dict(checkpoint['state_dict'])
    args.lr = args.lr * 0.001
    threshold = THRESHOLD_MAX

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    L1_criterion = nn.L1Loss(reduction='sum').cuda()

    if not srm_trainable:
        params = []
        for name, param in model.named_parameters():
            if name.find('convFilter1') == -1:
                params += [param]

        optimizer = create_optimizer(params, args.lr)
    else:
        optimizer = create_optimizer(model.parameters(), args.lr)

    nature_error_itr_global = []
    for itr in np.arange(1, 11):
        args.dataroot = dst_dir
        nature_error_itr_local = []

        # adding negative samples into the original training dataset
        construct_negative_samples(itr)

        train_loader = myDataset.DataLoaderHalf(
            myDataset.MyDataset(
                args,
                transforms.Compose([
                    transforms.RandomCrop(233),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(), normalize
                ])),
            batch_size=args.batch_size,
            shuffle=True,
            half_constraint=True,
            sampler_type='RandomBalancedSampler',
            **kwargs)
        print('The number of train data:{}'.format(len(train_loader.dataset)))
        args.epochs = 15

        train_multi(train_loader, optimizer, model, criterion, L1_criterion, val_loader, itr, \
            nature_error_itr_local, nature_error_itr_global)

        # start from itr = 1
        if len(nature_error_itr_local) > 0:
            adv_model_num, adv_model_idx = adv_model_selection(
                nature_error_itr_local, threshold, itr)
            if adv_model_num < 1:
                break

    print(nature_error_itr_global)
    print(len(nature_error_itr_global) / (args.epochs - args.epochs // 2))
    final_model_selection(nature_error_itr_global, threshold)
pn_data_dir = os.path.join(data_root, 'unpairLinear')

# construct negative examples from CG and add into CG
img_des_dir = os.path.join(dst_dir, 'CGG')

image_name = os.listdir(img_des_dir)
all_image_num = len(image_name)
print(all_image_num)

args.dataroot = '/home/wzquan/publicData/NIvsCG/RRVData/natural_validation_dataset'
val_loader = torch.utils.data.DataLoader(
    myDataset.MyDataset(
        args,
        transforms.Compose([
            transforms.TenCrop(233),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop)
                 for crop in crops])),  # returns a 4D tensor
            transforms.Lambda(
                lambda crops: torch.stack([normalize(crop) for crop in crops]))
        ])),
    batch_size=args.test_batch_size,
    shuffle=False,
    **kwargs)
print('The number of val data:{}'.format(len(val_loader.dataset)))


def main():

    # instantiate model and initialize weights
    model = ENet()
    networks.print_network(model)
Esempio n. 6
0
parser.add_argument(
    '--log-interval',
    type=int,
    default=10,
    metavar='N',  # 跑多少次batch进行一次日志记录
    help='how many batches to wait before logging training status')
args = parser.parse_args()  # 这个是使用argparse模块时的必备行,将参数进行关联,详情用法请百度 argparse 即可
args.cuda = not args.no_cuda and torch.cuda.is_available(
)  # 这个是在确认是否使用gpu的参数,比如

torch.manual_seed(args.seed)  # 设置一个随机数种子,相关理论请自行百度或google,并不是pytorch特有的什么设置
if args.cuda:
    torch.cuda.manual_seed(args.seed)  # 这个是为GPU设置一个随机数种子

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(myDataset.MyDataset(train=True),
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           **kwargs)
test_loader = torch.utils.data.DataLoader(myDataset.MyDataset(train=False),
                                          batch_size=args.batch_size,
                                          shuffle=True,
                                          **kwargs)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()