示例#1
0
    def __init__(self, opt):

        self.opt = opt
        dataset = 'cityscapes_seq_full'
        self.workspace = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')

        self.jobname = dataset + '_gpu8_refine_genmask_linklink_256_1node'
        self.modeldir = self.jobname + 'model'
        self.sampledir = os.path.join(self.workspace, self.jobname)
        self.parameterdir = self.sampledir + '/params'
        self.useHallucination = False

        if not os.path.exists(self.parameterdir):
            os.makedirs(self.parameterdir)

        # whether to start training from an existing snapshot
        self.load = False
        self.iter_to_load = 62000

        # Write parameters setting file
        if os.path.exists(self.parameterdir):
            utils.save_parameters(self)

        ''' Cityscapes'''
        train_Dataset = get_training_set(opt)
        test_Dataset = get_test_set(opt)

        self.trainloader = DataLoader(train_Dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.workers,
                                      pin_memory=True, drop_last=True)
        self.testloader = DataLoader(test_Dataset, batch_size=2, shuffle=False, num_workers=opt.workers,
                                     pin_memory=True, drop_last=True)
示例#2
0
def get_loaders(opt):
    """ Make dataloaders for train and validation sets
	"""
    # train loader
    norm_method = Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    spatial_transform = Compose([
        Scale((opt.sample_size, opt.sample_size)),
        Resize(256),
        CenterCrop(224),
        ToTensor(), norm_method
    ])
    temporal_transform = TemporalRandomCrop(25)
    target_transform = ClassLabel()
    training_data = get_training_set(opt, spatial_transform,
                                     temporal_transform, target_transform)
    train_loader = torch.utils.data.DataLoader(training_data,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True)

    # validation loader
    target_transform = ClassLabel()
    temporal_transform = LoopPadding(25)
    validation_data = get_validation_set(opt, spatial_transform,
                                         temporal_transform, target_transform)
    val_loader = torch.utils.data.DataLoader(validation_data,
                                             batch_size=opt.batch_size,
                                             shuffle=False,
                                             num_workers=opt.num_workers,
                                             pin_memory=True)
    return train_loader, val_loader
def get_traininfo(opt, norm_method):
    assert opt.train_crop in ['random', 'corner', 'center']
    if opt.train_crop == 'random':
        crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
    elif opt.train_crop == 'corner':
        crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
    elif opt.train_crop == 'center':
        crop_method = MultiScaleCornerCrop(opt.scales,
                                           opt.sample_size,
                                           crop_positions=['c'])
    spatial_transform = Compose([
        RandomRotate(),
        RandomResize(), crop_method,
        ToTensor(opt.norm_value), norm_method
    ])
    temporal_transform = TemporalRandomCrop(opt.sample_duration)
    target_transform = ClassLabel()
    training_data = get_training_set(opt, spatial_transform,
                                     temporal_transform, target_transform)
    train_loader = torch.utils.data.DataLoader(training_data,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.n_threads,
                                               pin_memory=True)
    train_logger = Logger(os.path.join(opt.result_path, 'train.log'),
                          ['epoch', 'loss', 'prec1', 'prec5', 'lr'])
    train_batch_logger = Logger(
        os.path.join(opt.result_path, 'train_batch.log'),
        ['epoch', 'batch', 'iter', 'loss', 'prec1', 'prec5', 'lr'])
    return train_loader, train_logger, train_batch_logger
示例#4
0
文件: solver.py 项目: icpm/pix2pix
 def build_dataset(self):
     root_path = "datasets/"
     train_set = get_training_set(root_path + self.dataset)
     test_set = get_test_set(root_path + self.dataset)
     self.training_data_loader = DataLoader(dataset=train_set,
                                            num_workers=self.threads,
                                            batch_size=self.batch_size,
                                            shuffle=True)
     self.testing_data_loader = DataLoader(dataset=test_set,
                                           num_workers=self.threads,
                                           batch_size=self.batch_size,
                                           shuffle=False)
示例#5
0
def load_knifey():
    knifey.maybe_download_and_extract()

    dataset = knifey.load()
    x_train, y_train_cls, y_train = dataset.get_training_set()
    x_test, y_test_cls, y_test = dataset.get_test_set()
    cls_names = dataset.class_names

    y_train = y_train.astype(np.float32)
    y_test = y_test.astype(np.float32)
    y_train_cls = y_train_cls.astype(np.int32)
    y_test_cls = y_test_cls.astype(np.int32)

    data = (x_train, y_train_cls, y_train, x_test, y_test_cls, y_test,
            cls_names)

    return data
示例#6
0
def get_loaders(opt):
	""" Make dataloaders for train and validation sets
	"""
	# train loader
	opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
	if opt.no_mean_norm and not opt.std_norm:
		norm_method = Normalize([0, 0, 0], [1, 1, 1])
	elif not opt.std_norm:
		norm_method = Normalize(opt.mean, [1, 1, 1])
	else:
		norm_method = Normalize(opt.mean, opt.std)
	spatial_transform = Compose([
		# crop_method,
		Scale((opt.sample_size, opt.sample_size)),
		# RandomHorizontalFlip(),
		ToTensor(opt.norm_value), norm_method
	])
	temporal_transform = TemporalRandomCrop(16)
	target_transform = ClassLabel()
	training_data = get_training_set(opt, spatial_transform,
									 temporal_transform, target_transform)
	train_loader = torch.utils.data.DataLoader(
		training_data,
		batch_size=opt.batch_size,
		shuffle=True,
		num_workers=opt.num_workers,
		pin_memory=True)

	# validation loader
	spatial_transform = Compose([
		Scale((opt.sample_size, opt.sample_size)),
		# CenterCrop(opt.sample_size),
		ToTensor(opt.norm_value), norm_method
	])
	target_transform = ClassLabel()
	temporal_transform = LoopPadding(16)
	validation_data = get_validation_set(
		opt, spatial_transform, temporal_transform, target_transform)
	val_loader = torch.utils.data.DataLoader(
		validation_data,
		batch_size=opt.batch_size,
		shuffle=False,
		num_workers=opt.num_workers,
		pin_memory=True)
	return train_loader, val_loader
示例#7
0
    def __init__(self,in_dir,save_folder=None):
        # dataset
        dataset = dataset.load_cached(cache_path='gdrive/My Drive/Colab Notebooks/data/', in_dir=in_dir)
        # number of classes
        self.num_classes = dataset.num_classes

        # get training set
        image_paths_train, cls_train, self.labels_train = dataset.get_training_set()
        # get test set
        image_paths_test, self.cls_test, self.labels_test = dataset.get_test_set()
        
        ##############################IMAGE PARAMETERS#####################################
        self.img_size = 128
        self.num_channels = 3
        # batch size
        self.train_batch_size = 64
        self.test_batch_size = 64
        ###################################################################################
        # placeholder: setting the matrix(mapping)
        # @params dtype:  data type
        # @params shape:  shape of input data
        # @params name:   name of placeholder
        # x: feature, x_imange: reshape of feature
        self.x = tf.placeholder(tf.float32, shape=[None, self.img_size,self.img_size,self.num_channels], name='x')
        self.x_image = tf.reshape(self.x, [-1, self.img_size, self.img_size, self.num_channels])
        # placeholder: setting the matrix(mapping)
        # y true value
        self.y_true = tf.placeholder(tf.float32, shape=[None, self.num_classes], name='y_true')
        # tf.argmax: return the max value
        self.y_true_cls = tf.argmax(self.y_true, axis=1) #The True class Value

        self.keep_prob = tf.placeholder(tf.float32)
        self.keep_prob_2 = tf.placeholder(tf.float32)
        self.y_pred_cls = None
        # train images
        self.train_images= self.load_images(image_paths_train)
        # test images
        self.test_images= self.load_images(image_paths_test)
        self.save_folder=save_folder
        self.optimizer,self.accuracy = self.define_model()        
示例#8
0
        assert opt.train_crop in ['random', 'corner', 'center']
        if opt.train_crop == 'random':
            crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'corner':
            crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'center':
            crop_method = MultiScaleCornerCrop(
                opt.scales, opt.sample_size, crop_positions=['c'])
        spatial_transform = Compose([
            crop_method,
            RandomHorizontalFlip(),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = TemporalRandomCrop(opt.sample_duration)
        target_transform = ClassLabel()
        training_data = get_training_set(opt, spatial_transform,
                                         temporal_transform, target_transform)
        train_loader = torch.utils.data.DataLoader(
            training_data,
            batch_size=opt.batch_size,
            shuffle=True,
            num_workers=opt.n_threads,
            pin_memory=True)
        train_logger = Logger(
            os.path.join(opt.result_path, 'train.log'),
            ['epoch', 'loss', 'acc', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path, 'train_batch.log'),
            ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

        if opt.nesterov:
            dampening = 0
示例#9
0
def main():
    opt = parse_opts()

    ecd_name, cls_name = opt.model_name.split('-')
    ecd_model = get_encoder_net(ecd_name)
    cls_model = get_end_net(cls_name)

    cfg.encoder_model = ecd_name
    cfg.classification_model = cls_name

    if opt.debug:
        cfg.debug = opt.debug
    else:
        if opt.tensorboard == 'TEST':
            cfg.tensorboard = opt.model_name
        else:
            cfg.tensorboard = opt.tensorboard
            cfg.flag = opt.flag
    model = cls_model(cfg,
                      encoder=CNNencoder(
                          cfg,
                          ecd_model(pretrained=True, path=opt.encoder_model)))
    cfg.video_path = os.path.join(cfg.root_path, cfg.video_path)
    cfg.annotation_path = os.path.join(cfg.root_path, cfg.annotation_path)

    cfg.list_all_member()

    torch.manual_seed(cfg.manual_seed)
    print('##########################################')
    print('####### model 仅支持单GPU')
    print('##########################################')
    model = model.cuda()
    print(model)
    criterion = nn.CrossEntropyLoss()
    if cfg.cuda:
        criterion = criterion.cuda()

    norm_method = Normalize([0, 0, 0], [1, 1, 1])

    print('##########################################')
    print('####### train')
    print('##########################################')
    assert cfg.train_crop in ['random', 'corner', 'center']
    if cfg.train_crop == 'random':
        crop_method = (cfg.scales, cfg.sample_size)
    elif cfg.train_crop == 'corner':
        crop_method = MultiScaleCornerCrop(cfg.scales, cfg.sample_size)
    elif cfg.train_crop == 'center':
        crop_method = MultiScaleCornerCrop(cfg.scales,
                                           cfg.sample_size,
                                           crop_positions=['c'])
    spatial_transform = Compose([
        crop_method,
        RandomHorizontalFlip(),
        ToTensor(cfg.norm_value), norm_method
    ])
    temporal_transform = TemporalRandomCrop(cfg.sample_duration)
    target_transform = ClassLabel()
    training_data = get_training_set(cfg, spatial_transform,
                                     temporal_transform, target_transform)
    train_loader = torch.utils.data.DataLoader(training_data,
                                               batch_size=cfg.batch_size,
                                               shuffle=True,
                                               num_workers=cfg.n_threads,
                                               drop_last=False,
                                               pin_memory=True)
    optimizer = model.get_optimizer(lr1=cfg.lr, lr2=cfg.lr2)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=cfg.lr_patience)
    print('##########################################')
    print('####### val')
    print('##########################################')
    spatial_transform = Compose([
        Scale(cfg.sample_size),
        CenterCrop(cfg.sample_size),
        ToTensor(cfg.norm_value), norm_method
    ])
    temporal_transform = LoopPadding(cfg.sample_duration)
    target_transform = ClassLabel()
    validation_data = get_validation_set(cfg, spatial_transform,
                                         temporal_transform, target_transform)
    val_loader = torch.utils.data.DataLoader(validation_data,
                                             batch_size=cfg.batch_size,
                                             shuffle=False,
                                             num_workers=cfg.n_threads,
                                             drop_last=False,
                                             pin_memory=True)
    print('##########################################')
    print('####### run')
    print('##########################################')
    if cfg.debug:
        logger = None
    else:
        path = get_log_dir(cfg.logdir, name=cfg.tensorboard, flag=cfg.flag)
        logger = Logger(logdir=path)
        cfg.save_config(path)

    for i in range(cfg.begin_epoch, cfg.n_epochs + 1):
        train_epoch(i, train_loader, model, criterion, optimizer, cfg, logger)
        validation_loss = val_epoch(i, val_loader, model, criterion, cfg,
                                    logger)

        scheduler.step(validation_loss)
示例#10
0
            spatial_transform = Compose([
                Scale([opt.sample_size, opt.sample_size]),
                RandomHorizontalFlip(),
                ToTensor(opt.norm_value), norm_method
            ])
        else:
            spatial_transform = Compose([
                Scale([opt.sample_size, opt.sample_size]),
                ToTensor(opt.norm_value), norm_method
            ])
        temporal_transform = TemporalRandomCrop(opt.sample_duration)
        target_transform = Label()

        opt.dataset = 'diem'
        training_data_diem = get_training_set(opt, spatial_transform,
                                              temporal_transform,
                                              target_transform)
        opt.dataset = 'coutrot1'
        training_data_coutrot1 = get_training_set(opt, spatial_transform,
                                                  temporal_transform,
                                                  target_transform)
        opt.dataset = 'coutrot2'
        training_data_coutrot2 = get_training_set(opt, spatial_transform,
                                                  temporal_transform,
                                                  target_transform)
        opt.dataset = 'summe'
        training_data_summe = get_training_set(opt, spatial_transform,
                                               temporal_transform,
                                               target_transform)
        opt.dataset = 'etmd'
        training_data_etmd = get_training_set(opt, spatial_transform,
            elif opt.train_crop == 'center':
                crop_method = MultiScaleCornerCrop(opt.scales,
                                                   opt.sample_size,
                                                   crop_positions=['c'])
            spatial_transform = Compose([
                Scale((256, 256)), crop_method,
                RandomHorizontalFlip(),
                ToTensor(opt.norm_value), norm_method
            ])
            temporal_transform = TemporalRandomCrop(opt.sample_duration, 1)

        target_transform = ClassLabel()
        if opt.model.endswith('flow'):
            training_data = get_training_set(opt,
                                             spatial_transform,
                                             temporal_transform,
                                             target_transform,
                                             modality='flow')
        elif opt.model.endswith('pose'):
            training_data = get_training_set(opt,
                                             spatial_transform,
                                             temporal_transform,
                                             target_transform,
                                             modality='pose')
        elif opt.model.endswith('depth'):
            training_data = get_training_set(opt,
                                             spatial_transform,
                                             temporal_transform,
                                             target_transform,
                                             modality='depth')
        elif opt.model.endswith('part'):
def main():

    resnet_in = generate_model(opt)
    resnet_in.module.fc = Identity()
    model = ReNet34(resnet_in, encode_length=encode_length)

    if opt.no_mean_norm and not opt.std_norm:
        norm_method = Normalize([0, 0, 0], [1, 1, 1])
    elif not opt.std_norm:
        norm_method = Normalize(opt.mean, [1, 1, 1])
    else:
        norm_method = Normalize(opt.mean, opt.std)

    if not opt.no_train:
        assert opt.train_crop in ['random', 'corner', 'center']
        if opt.train_crop == 'random':
            crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'corner':
            crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'center':
            crop_method = MultiScaleCornerCrop(opt.scales,
                                               opt.sample_size,
                                               crop_positions=['c'])

        ## train loader
        spatial_transform = Compose([
            crop_method,
            RandomHorizontalFlip(),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = TemporalRandomCrop(opt.sample_duration)
        target_transform = ClassLabel()
        training_data = get_training_set(opt, spatial_transform,
                                         temporal_transform, target_transform)
        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   pin_memory=True)

        ## test loader
        spatial_transform = Compose([
            Scale(int(opt.sample_size / opt.scale_in_test)),
            CornerCrop(opt.sample_size, opt.crop_position_in_test),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = LoopPadding(opt.sample_duration)

        target_transform = ClassLabel()
        test_data = get_test_set(opt, spatial_transform, temporal_transform,
                                 target_transform)
        test_loader = torch.utils.data.DataLoader(test_data,
                                                  batch_size=opt.batch_size,
                                                  shuffle=False,
                                                  num_workers=opt.n_threads,
                                                  pin_memory=True)

        ## Database loader
        spatial_transform = Compose([
            Scale(int(opt.sample_size / opt.scale_in_test)),
            CornerCrop(opt.sample_size, opt.crop_position_in_test),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = LoopPadding(opt.sample_duration)
        target_transform = ClassLabel()
        validation_data = get_validation_set(opt, spatial_transform,
                                             temporal_transform,
                                             target_transform)
        database_loader = torch.utils.data.DataLoader(
            validation_data,
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=opt.n_threads,
            pin_memory=True)

        if opt.nesterov:
            dampening = 0
        else:
            dampening = opt.dampening

        optimizer = optim.SGD(model.parameters(),
                              lr=opt.learning_rate,
                              momentum=opt.momentum,
                              dampening=dampening,
                              weight_decay=opt.weight_decay,
                              nesterov=opt.nesterov)
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                                   'min',
                                                   patience=opt.lr_patience)

    if opt.resume_path:
        print('loading checkpoint {}'.format(opt.resume_path))
        checkpoint = torch.load(opt.resume_path)
        assert opt.arch == checkpoint['arch']

        opt.begin_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        if not opt.no_train:
            optimizer.load_state_dict(checkpoint['optimizer'])
            for state in optimizer.state.values():
                for k, v in state.items():
                    if torch.is_tensor(v):
                        state[k] = v.cuda()

    print('run')
    for epoch in range(opt.begin_epoch, opt.n_epochs + 1):
        model.cuda().train()
        for i, (images, labels) in enumerate(train_loader):

            images = Variable(images.cuda())
            labels = Variable(labels.cuda().long())

            # Forward + Backward + Optimize
            optimizer.zero_grad()
            x, _, b = model(images)

            target_b = F.cosine_similarity(b[:int(labels.size(0) / 2)],
                                           b[int(labels.size(0) / 2):])
            target_x = F.cosine_similarity(x[:int(labels.size(0) / 2)],
                                           x[int(labels.size(0) / 2):])
            loss = F.mse_loss(target_b, target_x)
            loss.backward()
            optimizer.step()
            scheduler.step()

        # Test the Model
        if (epoch + 1) % 10 == 0:
            model.eval()
            retrievalB, retrievalL, queryB, queryL = compress(
                database_loader, test_loader, model)
            result_map = calculate_top_map(qB=queryB,
                                           rB=retrievalB,
                                           queryL=queryL,
                                           retrievalL=retrievalL,
                                           topk=100)
            print('--------mAP@100: {}--------'.format(result_map))
示例#13
0
                ])

        annotateData = pd.read_csv(opt.annotation_file, sep=',', header=0)
        keys = annotateData[annotateData.Dataset == 'Train']['Location']
        values = annotateData[annotateData.Dataset == 'Train']['MeanID']

        annotationDictionary = dict(zip(keys, values))

        temporal_transform = TemporalCenterRandomCrop(opt.sample_duration)
        """if opt.temporal_crop == 'Random':
            temporal_transform = TemporalRandomCrop(opt.sample_duration)
        else:
            temporal_transform = TemporalCenterRandomCrop(opt.sample_duration)"""
        target_transform = ClassLabel()
        training_data = get_training_set(opt, spatial_transforms,
                                         temporal_transform, target_transform,
                                         annotationDictionary)
        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   pin_memory=True)
        train_logger = Logger(os.path.join(opt.result_path, 'train.log'),
                              ['epoch', 'loss', 'acc', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path, 'train_batch.log'),
            ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr', 'means'])

        if opt.nesterov:
            dampening = 0
        else:
def create_dataloader(args):
    if args.root_path != '':
        args.video_path = os.path.join(args.root_path, args.video_path)
        args.annotation_path = os.path.join(args.root_path,
                                            args.annotation_path)
        args.result_path = os.path.join(args.root_path, args.result_path)
        if args.resume_path:
            args.resume_path = os.path.join(args.root_path, args.resume_path)
        if args.pretrain_path:
            # args.pretrain_path = os.path.join(args.root_path, args.pretrain_path)
            args.pretrain_path = os.path.abspath(args.pretrain_path)
    args.scales = [args.initial_scale]
    for i in range(1, args.n_scales):
        args.scales.append(args.scales[-1] * args.scale_step)

    args.mean = get_mean(args.norm_value, dataset=args.mean_dataset)
    args.std = get_std(args.norm_value)

    if args.no_mean_norm and not args.std_norm:
        norm_method = Normalize([0, 0, 0], [1, 1, 1])
    elif not args.std_norm:
        norm_method = Normalize(args.mean, [1, 1, 1])
    else:
        norm_method = Normalize(args.mean, args.std)

    assert args.train_crop in ['random', 'corner', 'center']
    if args.train_crop == 'random':
        crop_method = MultiScaleRandomCrop(args.scales, args.sample_size)
    elif args.train_crop == 'corner':
        crop_method = MultiScaleCornerCrop(args.scales, args.sample_size)
    elif args.train_crop == 'center':
        crop_method = MultiScaleCornerCrop(args.scales,
                                           args.sample_size,
                                           crop_positions=['c'])

    spatial_transform = Compose([
        crop_method,
        RandomHorizontalFlip(),
        ToTensor(args.norm_value), norm_method
    ])
    temporal_transform = TemporalRandomCrop(args.sample_duration)
    target_transform = ClassLabel()
    training_data = get_training_set(args, spatial_transform,
                                     temporal_transform, target_transform)
    train_loader = torch.utils.data.DataLoader(training_data,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.n_threads,
                                               pin_memory=True)

    spatial_transform = Compose([
        # Scale(args.sample_size),
        Scale(int(args.sample_size / args.scale_in_test)),
        # CenterCrop(args.sample_size),
        CornerCrop(args.sample_size, args.crop_position_in_test),
        ToTensor(args.norm_value),
        norm_method
    ])
    temporal_transform = TemporalCenterCrop(args.sample_duration)
    target_transform = ClassLabel()
    validation_data = get_validation_set(args, spatial_transform,
                                         temporal_transform, target_transform)
    val_loader = torch.utils.data.DataLoader(validation_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.n_threads,
                                             pin_memory=True)

    return train_loader, val_loader
示例#15
0
def main():

    # parameters
    # ???should we adjust the learning rate during the training process? ???
    learn_rate = 1e-4
    num_epochs = 600  # 最大迭代次数
    max_patience = 60  # 停止训练的参数
    result_rp = '../result/model/'
    exp_name = 'P3D_saliency'

    batch_size = 3
    n_threads = 16
    # 'Temporal duration of inputs'
    sample_duration = 16
    # the data for devison
    norm_value = 255
    # Height and width of inputs
    sample_size = 224
    # Number of validation samples for each activity
    n_val_samples = 3
    video_path = '/data1/guoxi/p3d_floder/resized_dataset/dataset/'
    reference_path = '/data1/guoxi/p3d_floder/resized_dataset/reference_dataset/'
    # video_path = '/data1/guoxi/p3d_floder/resized_dataset_for_test/dataset/'
    # reference_path = '/data1/guoxi/p3d_floder/resized_dataset_for_test/reference_dataset/'
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]

    norm_method = Normalize(mean, std)

    RandomCrop_transform = Compose([  # 图像预处理一般用Compose把多个步骤整合到一起
        joint_transforms.JointRandomCrop(
            224
        ),  # 在一个随机的位置进行裁剪  'JpegImageFile' object does not support indexing
    ])

    RandomHorizontalFlip_transform = Compose([  # 图像预处理一般用Compose把多个步骤整合到一起
        joint_transforms.JointRandomHorizontalFlip(),  # 以0.5的概率水平翻转给定的PIL图像
    ])

    RandomErase_transform = Compose(
        [joint_transforms.RandomErase(probability=0.5, sh=0.4, r1=0.3)])

    spatial_transform = Compose([ToTensor(norm_value=norm_value),
                                 norm_method])  # 空间变换

    target_transform = Compose([ToTensor(norm_value=norm_value)])

    opt = [video_path, sample_duration]

    train_data = dataset.get_training_set(opt, reference_path,
                                          spatial_transform, target_transform,
                                          RandomCrop_transform,
                                          RandomHorizontalFlip_transform,
                                          RandomErase_transform)
    train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=batch_size,
        shuffle=True,
        num_workers=n_threads,
        pin_memory=True
    )  # 使用多进程加载的进程数 是否将数据保存在pin memory区,pin memory中的数据转到GPU会快一些
    validation_data = dataset.get_validation_set(opt, reference_path,
                                                 spatial_transform,
                                                 target_transform, None, None,
                                                 None)
    val_loader = torch.utils.data.DataLoader(validation_data,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=n_threads,
                                             pin_memory=True)

    inputs, targets = next(iter(train_loader))
    print('inputs.size(), inputs.min(), inputs.max()', inputs.size(),
          inputs.min(), inputs.max())
    print('targets.size(), targets.min(), targets.max():', targets.size(),
          targets.min(), targets.max())

    # every time we load weights, which may be slow
    model_cla = network_cla.P3D199(pretrained=True, num_classes=400)
    cla_dict = model_cla.state_dict()

    model = network_seg.P3D199()
    # model.apply(utils.weights_init)
    seg_dict = model.state_dict()

    pretrained_dict = {k: v for k, v in cla_dict.items() if k in seg_dict}
    seg_dict.update(pretrained_dict)
    model.load_state_dict(seg_dict)
    model.cuda()

    model = nn.DataParallel(model)

    commen_layers = [
        'conv1_custom', 'bn1', 'relu', 'maxpool', 'maxpool_2', 'layer1',
        'layer2', 'layer3'
    ]
    # seperate layers, to set different lr
    param_exist = []
    param_add = []
    for k, (name, module) in enumerate(model.named_children()):
        # existing layers
        if name in commen_layers:
            # print('existing layer: ', name)
            for param in module.parameters():
                param_exist.append(param)
        # adding layers
        else:
            # print('adding layer: ', name)
            for param in module.parameters():
                param_add.append(param)
    print('  + Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    optimizer = optim.Adam([{
        'params': param_exist,
        'lr': learn_rate * 0.1
    }, {
        'params': param_add
    }])
    criterion = nn.BCELoss().cuda()

    exp_dir = result_rp + exp_name
    ##!!! existing directory will be removed
    if os.path.exists(exp_dir):
        shutil.rmtree(exp_dir)

    exp = experiment.Experiment(exp_name, result_rp)
    exp.init()

    for epoch in range(num_epochs):

        since = time.time()

        ### Train ###
        trn_loss = utils.train(model, train_loader, optimizer, criterion)
        print('Epoch {:d}: Train - Loss: {:.4f}'.format(epoch, trn_loss))
        time_elapsed = time.time() - since
        print('Train Time {:.0f}m {:.0f}s'.format(time_elapsed // 60,
                                                  time_elapsed % 60))

        ### Test ###
        val_loss = utils.test(model, val_loader, criterion)
        print('Val - Loss: {:.4f}'.format(val_loss))
        time_elapsed = time.time() - since
        print('Total Time {:.0f}m {:.0f}s\n'.format(time_elapsed // 60,
                                                    time_elapsed % 60))

        ### Save Metrics ###
        exp.save_history('train', trn_loss)
        exp.save_history('val', val_loss)

        ### Checkpoint ###
        exp.save_weights(model, trn_loss, val_loss)
        exp.save_optimizer(optimizer, val_loss)

        ## Early Stopping ##
        if (epoch - exp.best_val_loss_epoch) > max_patience:
            print(("Early stopping at epoch %d since no " +
                   "better loss found since epoch %.3").format(
                       epoch, exp.best_val_loss))
            break

        exp.epoch += 1
示例#16
0
            #RandomHorizontalFlip(),
            # RandomRotate(),
            # RandomResize(),
            #crop_method,
            # MultiplyValues(),
            # Dropout(),
            # SaltImage(),
            # Gaussian_blur(),
            # SpatialElasticDisplacement(),
            ToTensor(opt.norm_value)
        ])
        transform_flow = Compose([ToTensor(opt.norm_value)])
        #temporal_transform = TemporalRandomCrop(opt.sample_duration, opt.downsample)
        #target_transform = ClassLabel()
        if opt.model == 'resnet' or opt.model == 'slowfastnet':
            training_data = get_training_set(opt, spatial_transform)
        if opt.model == 'flow':
            training_data = get_flow_training_set(opt, transform_flow)
        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   pin_memory=True)
        train_logger = Logger(os.path.join(opt.result_path, 'train.log'),
                              ['epoch', 'loss', 'prec1', 'prec5', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path, 'train_batch.log'), [
                'epoch', 'batch', 'iter', 'loss', 'prec1', 'prec5', 'lr',
                'correlation'
            ])
示例#17
0
    args = opt()  #argsの読み出し
    args.arch = "ResNet-{}".format(args.model_depth)  #実行するアーキテクチャを書き込む
    spatial_transform = Compose([
        ToTensor(),  #1iterごとに読み込まれる各フレーム(PIL Image)をTensorへ変換する
    ])
    temporal_transform = TemporalRandomCrop4flow()  #時間方向の前処理,今回はなし
    target_transform = ClassLabel()  #学習する正解データ,2クラス分類なのでラベル
    accuracies = AverageMeter()  #各回におけるaccとその平均
    with open(os.path.join("./result", 'opts.json'), 'w') as args_file:
        json.dump(vars(args), args_file)

    model = generate_model(args)  #モデルの読み込み(pretrainがあれば重みも読み込んでおく)

    criterion = nn.CrossEntropyLoss()  #損失関数の定義CrossEntropyLoss
    criterion.cuda()  #criterionをcudaへ変更
    training_data = get_training_set(args, spatial_transform,
                                     temporal_transform,
                                     target_transform)  #データローダに入力するデータセットの作成
    train_loader = torch.utils.data.DataLoader(training_data,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               pin_memory=True)
    train_logger = Logger("./result/train.log", ["epoch", "loss", "acc", "lr"])
    optimizer = optim.Adam(model.parameters(),
                           lr=args.learning_rate,
                           weight_decay=args.weight_decay)
    for epoch in range(1, args.n_epoch + 1):
        train_epoch(epoch, train_loader, model, criterion, optimizer, args,
                    train_logger)
    print("finish")
示例#18
0
opt = parser.parse_args()

print(opt)

if opt.cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

cudnn.benchmark = True

torch.manual_seed(opt.seed)
if opt.cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')

train_set = get_training_set()
test_set = get_test_set()

training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.testBatchSize,
                                 shuffle=False)

print('===> Building model')
netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, [0])
netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'batch', False, [0])
示例#19
0
def main():
    opt = parse_opts()
    print(opt)

    seed = 0
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    # torch.backends.cudnn.deterministic = True
    # torch.backends.cudnn.benchmark = False

    # CUDA for PyTorch
    use_cuda = torch.cuda.is_available()
    device = torch.device(f"cuda:{opt.gpu}" if use_cuda else "cpu")

    train_transform = transforms.Compose([
        #transforms.RandomCrop(32, padding=3),
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=[0.5, 1]),
        # transforms.RandomRotation(180),
        GaussianNoise(0.5),
        # transforms.RandomRotation(10),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    test_transform = transforms.Compose([
        #transforms.RandomCrop(32, padding=3),
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    training_data = get_training_set(opt, train_transform)
    training_data = ConcatDataset(training_data)
    validation_data = get_validation_set(opt, test_transform)
    validation_data = ConcatDataset(validation_data)

    train_loader = torch.utils.data.DataLoader(training_data,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=0)
    val_loader = torch.utils.data.DataLoader(validation_data,
                                             batch_size=opt.batch_size,
                                             shuffle=True,
                                             num_workers=0)
    print(f'Number of training examples: {len(train_loader.dataset)}')
    print(f'Number of validation examples: {len(val_loader.dataset)}')

    # tensorboard
    summary_writer = tensorboardX.SummaryWriter(log_dir='tf_logs')
    # define model
    model = ResidualNet("ImageNet", opt.depth, opt.num_classes, "CBAM")
    if opt.resume_path:
        checkpoint = torch.load(opt.resume_path)
        model.load_state_dict(checkpoint['model_state_dict'])
        epoch = checkpoint['epoch']
        print("Model Restored from Epoch {}".format(epoch))
        opt.start_epoch = epoch + 1
    model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), weight_decay=opt.wt_decay)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=opt.lr_patience)
    if opt.resume_path:
        checkpoint = torch.load(opt.resume_path)
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])

    th = 100000
    # start training
    for epoch in range(opt.start_epoch, opt.epochs + 1):
        # train, test model
        train_loss, train_acc = train_epoch(model, train_loader, criterion,
                                            optimizer, device, opt)
        val_loss, val_acc = val_epoch(model, val_loader, criterion, device)
        scheduler.step(val_loss)

        lr = optimizer.param_groups[0]['lr']

        # saving weights to checkpoint
        if (epoch) % opt.save_interval == 0:
            # write summary
            summary_writer.add_scalar('losses/train_loss',
                                      train_loss,
                                      global_step=epoch)
            summary_writer.add_scalar('losses/val_loss',
                                      val_loss,
                                      global_step=epoch)
            summary_writer.add_scalar('acc/train_acc',
                                      train_acc,
                                      global_step=epoch)
            summary_writer.add_scalar('acc/val_acc',
                                      val_acc,
                                      global_step=epoch)
            summary_writer.add_scalar('lr_rate', lr, global_step=epoch)

            state = {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict()
            }
            if val_loss < th:
                torch.save(
                    state,
                    os.path.join('./snapshots', f'{opt.dataset}_model.pth'))
                print("Epoch {} model saved!\n".format(epoch))
                th = val_loss
示例#20
0
                                           crop_positions=['c'])
    elif opt.train_crop == 'driver focus':
        crop_method = DriverFocusCrop(opt.scales, opt.sample_size)
    train_spatial_transform = Compose([
        Scale(opt.sample_size),
        ToTensor(opt.norm_value)  #, norm_method
    ])
    train_temporal_transform = UniformIntervalCrop(opt.sample_duration,
                                                   opt.interval)
    train_target_transform = Compose([
        Scale(opt.sample_size),
        ToTensor(opt.norm_value)  #, norm_method
    ])
    train_horizontal_flip = RandomHorizontalFlip()
    training_data = get_training_set(opt, train_spatial_transform,
                                     train_horizontal_flip,
                                     train_temporal_transform,
                                     train_target_transform)
    train_loader = torch.utils.data.DataLoader(training_data,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.n_threads,
                                               pin_memory=True)
    train_logger = Logger(os.path.join(opt.result_path, 'convlstm-train.log'),
                          ['epoch', 'loss', 'lr'])
    train_batch_logger = Logger(
        os.path.join(opt.result_path, 'convlstm-train_batch.log'),
        ['epoch', 'batch', 'iter', 'loss', 'lr'])

    if opt.nesterov:
        dampening = 0
    else:
示例#21
0
def main_worker(gpu, ngpus_per_node, opt, test_results=None):
    opt.gpu = gpu

    # suppress printing if not master

    if opt.multiprocessing_distributed and opt.gpu != 0:

        def print_pass(*args):
            pass

        builtins.print = print_pass

    if opt.gpu is not None:
        print("Use GPU: {} for training".format(opt.gpu))

    if opt.distributed:
        if opt.dist_url == "env://" and opt.rank == -1:
            opt.rank = int(os.environ["RANK"])
        if opt.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            opt.rank = opt.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=opt.dist_backend,
                                init_method=opt.dist_url,
                                world_size=opt.world_size,
                                rank=opt.rank)
        opt.batch_size = int(opt.batch_size / ngpus_per_node)
        opt.n_threads = int(
            (opt.n_threads + ngpus_per_node - 1) / ngpus_per_node)

    if opt.rank % ngpus_per_node == 0:
        if not os.path.exists(opt.result_path):
            os.makedirs(opt.result_path)
        opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
        with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
            json.dump(vars(opt), opt_file)

    if not opt.no_train:
        training_data = get_training_set(opt)
        opt.N_data = len(training_data)

        if opt.distributed:
            train_sampler = torch.utils.data.distributed.DistributedSampler(
                training_data)
        else:
            train_sampler = None
        train_loader = torch.utils.data.DataLoader(
            training_data,
            batch_size=opt.batch_size,
            shuffle=(train_sampler is None),
            num_workers=opt.n_threads,
            pin_memory=True,
            drop_last=True,
            sampler=train_sampler)

        model, parameters = generate_model(opt)

        if opt.phase == 'finetuning':
            criterion = nn.CrossEntropyLoss().cuda(opt.gpu)
        elif opt.phase == 'pretraining':
            criterion = NCECriterion(len(training_data)).cuda(opt.gpu)
        else:
            raise NotImplementedError('not implement {} phase'.format(
                opt.phase))

        train_logger = Logger(
            os.path.join(opt.result_path, 'train.log.rank{}'.format(opt.rank)),
            ['epoch', 'loss', 'acc', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path,
                         'train_batch.log.{}'.format(opt.rank)),
            ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

        optimizer = optim.SGD(parameters,
                              lr=opt.learning_rate,
                              momentum=opt.momentum,
                              weight_decay=opt.weight_decay)

        if opt.phase == 'finetuning':
            scheduler = lr_scheduler.ReduceLROnPlateau(
                optimizer,
                'max',
                patience=opt.lr_patience,
                min_lr=1e-6,
                factor=opt.lr_factor)

    if not opt.no_val:
        validation_data = get_validation_set(opt)
        if opt.distributed:
            val_sampler = torch.utils.data.distributed.DistributedSampler(
                validation_data)
        else:
            val_sampler = None
        val_loader = torch.utils.data.DataLoader(validation_data,
                                                 batch_size=opt.batch_size,
                                                 shuffle=(val_sampler is None),
                                                 num_workers=opt.n_threads,
                                                 pin_memory=True,
                                                 drop_last=True,
                                                 sampler=val_sampler)
        val_logger = Logger(
            os.path.join(opt.result_path, 'val.log.rank{}'.format(opt.rank)),
            ['epoch', 'acc1', 'acc5'] if opt.phase == 'finetuning' else
            ['epoch', 'recall@1', 'recall@10'])

    if opt.test:
        model, parameters = generate_model(opt)

        test_data = get_test_set(opt)
        idx_to_labels = test_data.get_idx_to_label()
        if opt.distributed:
            test_sampler = torch.utils.data.distributed.DistributedSampler(
                test_data, shuffle=False)
        else:
            test_sampler = None
        test_loader = torch.utils.data.DataLoader(
            test_data,
            batch_size=opt.batch_size,
            shuffle=(test_sampler is None),
            num_workers=opt.n_threads,
            pin_memory=True,
            drop_last=False,
            sampler=test_sampler)

    if opt.resume_path:
        print('==>loading checkpoint {}'.format(opt.resume_path))
        if opt.gpu is None:
            checkpoint = torch.load(opt.resume_path)
        else:
            # Map model to be loaded to specified single gpu.
            loc = 'cuda:{}'.format(opt.gpu)
            checkpoint = torch.load(opt.resume_path, map_location=loc)

        opt.begin_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        if not opt.no_train:
            optimizer.load_state_dict(checkpoint['optimizer'])
    else:
        opt.begin_epoch = 1

    torch.backends.cudnn.benchmark = True
    if opt.rank % ngpus_per_node == 0:
        summary_writer = SummaryWriter(log_dir=opt.result_path)
    else:
        summary_writer = None
    for i in range(opt.begin_epoch, opt.n_epochs + 1):
        if not opt.no_train:
            if opt.distributed:
                train_sampler.set_epoch(i)

            train_epoch(i, train_loader, model, criterion, optimizer, opt,
                        train_logger, train_batch_logger, summary_writer)

        if not opt.no_val:
            if opt.phase == 'finetuning':
                val_acc = val_finetune_epoch(i, val_loader, model, opt,
                                             val_logger, summary_writer)
            elif opt.phase == 'pretraining':
                val_acc = val_pretrain_epoch(i, val_loader, model, opt,
                                             val_logger, summary_writer)
        if not opt.no_train and not opt.no_val:
            if opt.phase == 'finetuning':
                scheduler.step(val_acc)
            elif opt.phase == 'pretraining':
                adjuest_learning_rate(optimizer, i, opt)

    if opt.test:
        test.test(test_loader, model, opt, idx_to_labels, test_results)
        if opt.multiprocessing_distributed and opt.gpu == 0:
            result_json = {}
            finish_procs = 0
            while (finish_procs < ngpus_per_node):
                rst = test_results.get()
                if rst == -1:
                    finish_procs += 1
                else:
                    result_json[rst[0]] = rst[1]
            with open(
                    os.path.join(opt.result_path,
                                 '{}.json'.format(opt.test_subset)), 'w') as f:
                json.dump({'results': result_json}, f)
示例#22
0
def objective(trial):
    opt = parse_opts()

    if trial:
        opt.weight_decay = trial.suggest_uniform('weight_decay', 0.01, 0.1)
        opt.learning_rate = trial.suggest_uniform('learning_rate', 1 - 5,
                                                  1 - 4)

    if opt.root_path != '':
        opt.video_path = os.path.join(opt.root_path, opt.video_path)
        opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
        opt.result_path = os.path.join(opt.root_path, opt.result_path)
        if opt.resume_path:
            opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
        if opt.pretrain_path:
            opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
    opt.scales = [opt.initial_scale]
    for i in range(1, opt.n_scales):
        opt.scales.append(opt.scales[-1] * opt.scale_step)
    opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
    opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
    opt.std = get_std(opt.norm_value)
    print(opt)
    with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
        json.dump(vars(opt), opt_file)

    torch.manual_seed(opt.manual_seed)

    model, parameters = generate_model(opt)
    print(model)
    criterion = nn.CrossEntropyLoss()
    if not opt.no_cuda:
        criterion = criterion.cuda()

    if opt.no_mean_norm and not opt.std_norm:
        norm_method = Normalize([0, 0, 0], [1, 1, 1])
    elif not opt.std_norm:
        norm_method = Normalize(opt.mean, [1, 1, 1])
    else:
        norm_method = Normalize(opt.mean, opt.std)

    # norm_method = Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))

    if not opt.no_train:
        assert opt.train_crop in ['random', 'corner', 'center']
        if opt.train_crop == 'random':
            crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'corner':
            crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'center':
            crop_method = MultiScaleCornerCrop(opt.scales,
                                               opt.sample_size,
                                               crop_positions=['c'])
        spatial_transform = Compose([
            crop_method,
            RandomHorizontalFlip(),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = TemporalRandomCrop(opt.sample_duration)
        target_transform = ClassLabel()
        training_data = get_training_set(opt, spatial_transform,
                                         temporal_transform, target_transform)
        train_loader = torch.utils.data.DataLoader(
            training_data,
            batch_size=opt.batch_size,
            # sampler option is mutually exclusive with shuffle
            shuffle=False,
            sampler=ImbalancedDatasetSampler(training_data),
            num_workers=opt.n_threads,
            pin_memory=True)
        train_logger = Logger(os.path.join(opt.result_path, 'train.log'),
                              ['epoch', 'loss', 'acc', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path, 'train_batch.log'),
            ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

        optimizer = optim.Adam(parameters,
                               lr=opt.learning_rate,
                               weight_decay=opt.weight_decay)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                               verbose=True,
                                                               factor=0.1**0.5)
    if not opt.no_val:
        spatial_transform = Compose([
            Scale(opt.sample_size),
            CenterCrop(opt.sample_size),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = LoopPadding(opt.sample_duration)
        target_transform = ClassLabel()
        validation_data = get_validation_set(opt, spatial_transform,
                                             temporal_transform,
                                             target_transform)
        val_loader = torch.utils.data.DataLoader(
            validation_data,
            batch_size=opt.batch_size,
            shuffle=False,
            sampler=ImbalancedDatasetSampler(validation_data),
            num_workers=opt.n_threads,
            pin_memory=True)
        val_logger = Logger(os.path.join(opt.result_path, 'val.log'),
                            ['epoch', 'loss', 'acc'])

    if opt.resume_path:
        print('loading checkpoint {}'.format(opt.resume_path))
        checkpoint = torch.load(opt.resume_path)
        assert opt.arch == checkpoint['arch']

        opt.begin_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        if not opt.no_train:
            optimizer.load_state_dict(checkpoint['optimizer'])

    print('run')
    writer = SummaryWriter(
        comment=
        f"_wd{opt.weight_decay}_lr{opt.learning_rate}_ft_begin{opt.ft_begin_index}_pretrain{not opt.pretrain_path == ''}"
    )
    for i in range(opt.begin_epoch, opt.n_epochs + 1):
        if not opt.no_train:
            epoch, losses_avg, accuracies_avg = train_epoch(
                i, train_loader, model, criterion, optimizer, opt,
                train_logger, train_batch_logger)
            writer.add_scalar('loss/train', losses_avg, epoch)
            writer.add_scalar('acc/train', accuracies_avg, epoch)

        if not opt.no_val:
            epoch, val_losses_avg, val_accuracies_avg = val_epoch(
                i, val_loader, model, criterion, opt, val_logger)
            writer.add_scalar('loss/val', val_losses_avg, epoch)
            writer.add_scalar('acc/val', val_accuracies_avg, epoch)

        if not opt.no_train and not opt.no_val:
            scheduler.step(val_losses_avg)
        print('=' * 100)

    if opt.test:
        spatial_transform = Compose([
            Scale(int(opt.sample_size / opt.scale_in_test)),
            CornerCrop(opt.sample_size, opt.crop_position_in_test),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = LoopPadding(opt.sample_duration)
        target_transform = VideoID()

        test_data = get_test_set(opt, spatial_transform, temporal_transform,
                                 target_transform)
        test_loader = torch.utils.data.DataLoader(test_data,
                                                  batch_size=opt.batch_size,
                                                  shuffle=False,
                                                  num_workers=opt.n_threads,
                                                  pin_memory=True)
        test.test(test_loader, model, opt, test_data.class_names)

    writer.close()
    return val_losses_avg
示例#23
0
     crop_method,
     #MultiplyValues(),
     #Dropout(),
     #SaltImage(),
     #Gaussian_blur(),
     #SpatialElasticDisplacement(),
     ToTensor(opt.norm_value),
     norm_method
 ])
 temporal_transform = TemporalRandomCrop(opt.sample_duration,
                                         opt.downsample)
 target_transform = ClassLabel()
 # opt.view = "front"
 #A for anomalies, N for normal, all for anomaly+normal
 training_data1 = get_training_set(opt, 'A', opt.view, opt.image_type,
                                   spatial_transform,
                                   temporal_transform, target_transform)
 training_data2 = get_training_set(opt, 'N', opt.view, opt.image_type,
                                   spatial_transform,
                                   temporal_transform, target_transform)
 # print (training_data1.__len__())
 train_loader1 = torch.utils.data.DataLoader(training_data1,
                                             batch_size=opt.batch_size,
                                             shuffle=True,
                                             num_workers=opt.n_threads,
                                             pin_memory=True,
                                             drop_last=True)
 train_loader2 = torch.utils.data.DataLoader(training_data2,
                                             batch_size=opt.batch_size,
                                             shuffle=True,
                                             num_workers=opt.n_threads,
示例#24
0
def main():
	opt = parse_opts()
	print(opt)

	seed = 0
	random.seed(seed)
	np.random.seed(seed)
	torch.manual_seed(seed)
	torch.backends.cudnn.deterministic = True
	torch.backends.cudnn.benchmark = False

	# CUDA for PyTorch
	use_cuda = torch.cuda.is_available()
	device = torch.device(f"cuda:{opt.gpu}" if use_cuda else "cpu")

	train_transform = transforms.Compose([
		#transforms.RandomCrop(32, padding=3),
		transforms.Resize((opt.img_H, opt.img_W)),
		transforms.RandomHorizontalFlip(),
		transforms.RandomRotation(10),
		transforms.ToTensor(),
		transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
			0.229, 0.224, 0.225])
	])
	test_transform = transforms.Compose([
		#transforms.RandomCrop(32, padding=3),
		transforms.Resize((opt.img_H, opt.img_W)),
		transforms.ToTensor(),
		transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
			0.229, 0.224, 0.225])
	])

	training_data = get_training_set(opt, train_transform)
	validation_data = get_validation_set(opt, test_transform)
	
	n_train_examples = int(len(training_data)*0.8)
	n_valid_examples = len(training_data) - n_train_examples
	# split data
	training_data, validation_data = torch.utils.data.random_split(training_data, [n_train_examples, n_valid_examples])

	train_loader = torch.utils.data.DataLoader(training_data,
											   batch_size=opt.batch_size,
											   shuffle=True,
											   num_workers=1)
	val_loader = torch.utils.data.DataLoader(validation_data,
											 batch_size=opt.batch_size,
											 shuffle=True,
											 num_workers=1)
	print(f'Number of training examples: {len(train_loader.dataset)}')
	print(f'Number of validation examples: {len(val_loader.dataset)}')

	# tensorboard
	summary_writer = tensorboardX.SummaryWriter(log_dir='tf_logs')
	# define model
	model = resnet18(num_classes=opt.num_classes)

	# if torch.cuda.device_count() > 1:
	#   	print("Let's use", torch.cuda.device_count(), "GPUs!")
  	# 	model = nn.DataParallel(model)
	model = model.to(device)

	if opt.nesterov:
		dampening = 0
	else:
		dampening = opt.dampening
	#define optimizer and criterion
	# optimizer = optim.Adam(model.parameters())
	# optimizer = optim.SGD(
	# 		model.parameters(),
	# 		lr=opt.learning_rate,
	# 		momentum=opt.momentum,
	# 		dampening=dampening,
	# 		weight_decay=opt.weight_decay,
	# 		nesterov=opt.nesterov)
	# scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=opt.lr_patience)
	# criterion = nn.CrossEntropyLoss()
	# define optimizer and criterion
	optimizer = optim.Adam(model.parameters())
	# loss function
	criterion = BCEWithLogitsLoss()

	# resume model, optimizer if already exists
	if opt.resume_path:
		checkpoint = torch.load(opt.resume_path)
		model.load_state_dict(checkpoint['model_state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
		epoch = checkpoint['epoch']
		print("Model Restored from Epoch {}".format(epoch))
		start_epoch = epoch + 1
	else:
		start_epoch = 1

	# start training
	#th = 10000
	for epoch in range(start_epoch, opt.epochs+1):
		val_loss, val_mAP = val_epoch(model, val_loader, criterion, device, opt)
示例#25
0
    for i in range(1, opt.n_scales):
        opt.scales.append(opt.scales[-1] * opt.scale_step)
    opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
    print(opt)

    torch.manual_seed(opt.manual_seed)
    model, parameters = generate_model(opt)
    print(model)

    criterion = nn.CrossEntropyLoss()
    if not opt.no_cuda:
        criterion = criterion.cuda()
    criterion_pose = nn.L1Loss().cuda()
    if not opt.no_train:

        training_data = get_training_set(opt)
        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   pin_memory=True)
        train_logger = Logger(os.path.join(opt.result_path, 'train.log'),
                              ['epoch', 'loss', 'acc', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path, 'train_batch.log'),
            ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

        if opt.nesterov:
            dampening = 0
        else:
            dampening = opt.dampening
# check if cuda is available
if opt.cuda and not torch.cuda.is_available():
    raise Exception("No GPU found, please run without --cuda")

# open this allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.
# if NN structure is fixed during training, it is recommanded to open
cudnn.benchmark = True
# define random seed no.
torch.manual_seed(opt.seed)
if opt.cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
root_path = "dataset/"
# call function in data.py:to pass trainset address and transfer direction to DatasetFromFolder class
train_set = get_training_set(root_path + opt.dataset, opt.direction)
test_set = get_test_set(root_path + opt.dataset, opt.direction)
# pass trainset to loader
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batch_size,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.test_batch_size,
                                 shuffle=False)

device = torch.device("cuda:0" if opt.cuda else "cpu")
# initialize model
print('===> Building models')
net_g = define_G(opt.input_nc,
示例#27
0
文件: main.py 项目: hangxu124/3DAE
            #RandomHorizontalFlip(),
            #RandomRotate(),
            #RandomResize(),
            #crop_method,
            #MultiplyValues(),
            #Dropout(),
            #SaltImage(),
            #Gaussian_blur(),
            #SpatialElasticDisplacement(),
            ToTensor(opt.norm_value)  #, norm_method
        ])
        temporal_transform = TemporalRandomCrop(opt.sample_duration,
                                                opt.downsample)
        target_transform = ClassLabel()

        training_data1 = get_training_set(opt, 'A', 'top', spatial_transform,
                                          temporal_transform, target_transform)
        training_data2 = get_training_set(opt, 'N', 'top', spatial_transform,
                                          temporal_transform, target_transform)
        #print (training_data1.__len__())
        train_loader1 = torch.utils.data.DataLoader(training_data1,
                                                    batch_size=opt.batch_size,
                                                    shuffle=True,
                                                    num_workers=opt.n_threads,
                                                    pin_memory=True,
                                                    drop_last=True)
        train_loader2 = torch.utils.data.DataLoader(training_data2,
                                                    batch_size=opt.batch_size,
                                                    shuffle=True,
                                                    num_workers=opt.n_threads,
                                                    pin_memory=True,
                                                    drop_last=True)
示例#28
0
        elif opt.train_crop == 'corner':
            crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'center':
            crop_method = MultiScaleCornerCrop(opt.scales,
                                               opt.sample_size,
                                               crop_positions=['c'])
        spatial_transform = Compose([
            crop_method,
            RandomHorizontalFlip(),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = TemporalRandomCrop(opt.sample_duration)
        target_transform = ClassLabel()
        training_data = get_training_set(opt,
                                         spatial_transform,
                                         temporal_transform,
                                         target_transform,
                                         image_type=opt.image_type)
        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   pin_memory=True)
        train_logger = Logger(os.path.join(opt.result_path, 'train.log'),
                              ['epoch', 'loss', 'acc', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path, 'train_batch.log'),
            ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

        if opt.nesterov:
            dampening = 0
示例#29
0
        if opt.train_crop == 'random':
            crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'corner':
            crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
        elif opt.train_crop == 'center':
            crop_method = MultiScaleCornerCrop(opt.scales,
                                               opt.sample_size,
                                               crop_positions=['c'])
        spatial_transform = Compose([
            crop_method,
            RandomHorizontalFlip(),
            ToTensor(opt.norm_value), norm_method
        ])
        temporal_transform = TemporalRandomCrop(opt.sample_duration)
        target_transform = ClassLabel()
        training_data = get_training_set(opt, spatial_transform,
                                         temporal_transform, target_transform)
        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   pin_memory=True)
        train_logger = Logger(os.path.join(opt.result_path, 'train.log'),
                              ['epoch', 'loss', 'acc', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path, 'train_batch.log'),
            ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

        if opt.nesterov:
            dampening = 0
        else:
            dampening = opt.dampening
示例#30
0
def main():
    opt = parse_opts()
    # Path configurations
    opt.annotation_path = os.path.join(opt.annotation_directory,
                                       opt.annotation_path)
    save_result_dir_name = \
        os.path.join(opt.result_path,
                     get_prefix() + '_{}{}_{}_epochs'.format(opt.model, opt.model_depth, opt.n_epochs))
    if not os.path.exists(save_result_dir_name):
        os.mkdir(save_result_dir_name)
    opt.result_path = os.path.join(opt.result_path, save_result_dir_name)

    # For data generator
    opt.scales = [opt.initial_scale]
    for epoch in range(1, opt.n_scales):
        opt.scales.append(opt.scales[-1] * opt.scale_step)
    opt.arch = '{}-{}'.format(opt.model, opt.model_depth)

    # Model
    model, parameters = generate_model(opt)
    # print(model)

    # Loss function
    criterion = nn.CrossEntropyLoss()
    if not opt.no_cuda:
        criterion = criterion.cuda()

    # Normalizing
    if not opt.no_mean_norm:
        opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
        opt.std = get_std(opt.norm_value, dataset=opt.std_dataset)
        norm_method = Normalize(opt.mean, opt.std)
    else:
        norm_method = Normalize([0, 0, 0], [1, 1, 1])

    print(opt)
    with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
        json.dump(vars(opt), opt_file)

    # **************************** TRAINING CONFIGURATIONS ************************************
    assert opt.train_crop in ['corner', 'center']
    if opt.train_crop == 'corner':
        crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
    elif opt.train_crop == 'center':
        crop_method = MultiScaleCornerCrop(opt.scales,
                                           opt.sample_size,
                                           crop_positions=['c'])

    # Пространственное преобразование
    spatial_transform = Compose([
        crop_method,
        #RandomHorizontalFlip(),
        ToTensor(opt.norm_value),
        norm_method
    ])
    # Временное преобразование
    temporal_transform = TemporalRandomCrop(opt.sample_duration)
    # Целевое преобразование
    target_transform = ClassLabel()

    train_loader_list = []
    if not opt.no_cross_validation:
        annotation_list = os.listdir(opt.annotation_directory)
        for annotation in annotation_list:
            opt.annotation_path = os.path.join(opt.annotation_directory,
                                               annotation)
            training_data = get_training_set(opt, spatial_transform,
                                             temporal_transform,
                                             target_transform)
            train_loader = torch.utils.data.DataLoader(
                training_data,
                batch_size=opt.batch_size,
                shuffle=True,
                num_workers=opt.n_threads,
                pin_memory=True)
            train_loader_list.append(train_loader)
    else:
        training_data = get_training_set(opt, spatial_transform,
                                         temporal_transform, target_transform)
        train_loader = torch.utils.data.DataLoader(training_data,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.n_threads,
                                                   pin_memory=True)
        train_loader_list.append(train_loader)

    train_logger = Logger(os.path.join(opt.result_path, 'train.log'),
                          ['epoch', 'loss', 'acc', 'lr'])
    train_batch_logger = Logger(
        os.path.join(opt.result_path, 'train_batch.log'),
        ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

    optimizer = optim.SGD(parameters,
                          lr=opt.learning_rate,
                          momentum=opt.momentum,
                          dampening=opt.dampening,
                          weight_decay=opt.weight_decay)

    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               'min',
                                               patience=opt.lr_patience)

    # ***************************** VALIDATION CONFIGURATIONS *********************************
    spatial_transform = Compose([
        Scale(opt.sample_size),
        CenterCrop(opt.sample_size),
        ToTensor(opt.norm_value), norm_method
    ])
    temporal_transform = LoopPadding(opt.sample_duration)
    target_transform = ClassLabel()

    val_loader_list = []
    if not opt.no_cross_validation:
        annotation_list = os.listdir(opt.annotation_directory)
        for annotation in annotation_list:
            opt.annotation_path = os.path.join(opt.annotation_directory,
                                               annotation)
            validation_data = get_validation_set(opt, spatial_transform,
                                                 temporal_transform,
                                                 target_transform)
            val_loader = torch.utils.data.DataLoader(validation_data,
                                                     batch_size=opt.batch_size,
                                                     shuffle=False,
                                                     num_workers=opt.n_threads,
                                                     pin_memory=True)
            val_loader_list.append(val_loader)
    else:
        validation_data = get_validation_set(opt, spatial_transform,
                                             temporal_transform,
                                             target_transform)
        val_loader = torch.utils.data.DataLoader(validation_data,
                                                 batch_size=opt.batch_size,
                                                 shuffle=False,
                                                 num_workers=opt.n_threads,
                                                 pin_memory=True)
        val_loader_list.append(val_loader)

    val_logger = Logger(os.path.join(opt.result_path, 'val.log'),
                        ['epoch', 'loss', 'acc'])

    # **************************************** TRAINING ****************************************
    epoch_avg_time = AverageMeter()
    train_loss_list = []
    train_acc_list = []
    valid_acc_list = []
    best_accuracy = 0
    current_train_data = 0
    current_valid_data = 0
    opt.frequence_cross_validation = round(opt.n_epochs /
                                           opt.n_cross_validation_sets + 0.5)

    for epoch in range(opt.begin_epoch, opt.n_epochs + 1):
        epoch_start_time = time.time()
        print('Epoch #' + str(epoch))

        # optimizer = regulate_learning_rate(optimizer, epoch, opt.frequence_regulate_lr)

        train_loader = train_loader_list[current_train_data]
        if not opt.no_cross_validation and epoch % opt.frequence_cross_validation == 0:
            print('\t##### Cross-validation: switch training data #####')
            current_train_data = (current_train_data +
                                  1) % len(train_loader_list)
            train_loader = train_loader_list[current_train_data]
        train_loss, train_acc = train_epoch(epoch, train_loader, model,
                                            criterion, optimizer, opt,
                                            train_logger, train_batch_logger)

        val_loader = val_loader_list[current_valid_data]
        if not opt.no_cross_validation and epoch % opt.frequence_cross_validation == 0:
            print('\t##### Cross-validation: switch validation data #####')
            current_valid_data = (current_valid_data +
                                  1) % len(val_loader_list)
            val_loader = val_loader_list[current_valid_data]
        validation_acc = val_epoch(epoch, val_loader, model, criterion, opt,
                                   val_logger)

        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        valid_acc_list.append(validation_acc)

        # Save model with best accuracy
        if validation_acc > best_accuracy:
            best_accuracy = validation_acc
            save_file_path = os.path.join(opt.result_path, 'best_model.pth')
            states = {
                'epoch': epoch + 1,
                'arch': opt.arch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }
            torch.save(states, save_file_path)

        epoch_end_time = time.time() - epoch_start_time
        epoch_avg_time.update(epoch_end_time)
        print('\tTime left: ' +
              str(round(epoch_avg_time.avg *
                        (opt.n_epochs - epoch) / 60, 1)) + ' minutes')

    # ******************************* SAVING RESULTS OF TRAINING ******************************
    save_pictures(np.linspace(1, opt.n_epochs, opt.n_epochs),
                  train_loss_list, 'red', 'Loss',
                  os.path.join(opt.result_path, 'train_loss.png'))
    save_pictures(np.linspace(1, opt.n_epochs, opt.n_epochs), train_acc_list,
                  'blue', 'Accuracy',
                  os.path.join(opt.result_path, 'train_accuracy.png'))
    save_pictures(np.linspace(1, opt.n_epochs, opt.n_epochs), valid_acc_list,
                  'blue', 'Accuracy',
                  os.path.join(opt.result_path, 'validation_accuracy.png'))