def train(model_label, nb_steps_toplayer, nb_steps_finetune, training_data_dir,
          validation_data_dir, img_width, img_height, batch, nb_classes,
          shuffle_flag, learning_rate, nb_training_samples,
          nb_validation_samples, directory_location):

    os.makedirs(directory_location, exist_ok=True)

    if nb_classes == 2:
        classmode, losstype = "binary", "binary_crossentropy"
    elif nb_classes > 2:
        classmode, losstype = "categorical", "categorical_crossentropy"

    train, validate = build_data(training_data_dir, validation_data_dir,
                                 img_width, img_height, batch, nb_classes,
                                 shuffle_flag, classmode)

    early_stop = EarlyStopping(monitor='val_loss', patience=3)

    tbCallBack = keras.callbacks.TensorBoard(log_dir='Graph_finetune',
                                             histogram_freq=0,
                                             write_graph=True,
                                             write_images=True)

    if model_label == "vgg16":
        vgg16(train, validate, losstype, nb_steps_toplayer, nb_steps_finetune,
              learning_rate, tbCallBack, early_stop, nb_classes,
              nb_training_samples, nb_validation_samples, directory_location)

    elif model_label == "vgg19":
        vgg19(train, validate, losstype, nb_steps_toplayer, nb_steps_finetune,
              learning_rate, tbCallBack, early_stop, nb_classes,
              nb_training_samples, nb_validation_samples, directory_location)
Exemplo n.º 2
0
def model_config(net_type, num_classes, OOD_num_classes):
    if net_type == "resnet50":
        model = models.resnet50(num_c=num_classes,
                                num_cc=OOD_num_classes,
                                pretrained=True)
    elif net_type == "resnet34":
        model = models.resnet34(num_c=num_classes,
                                num_cc=OOD_num_classes,
                                pretrained=True)
    elif net_type == "vgg19":
        model = models.vgg19(num_c=num_classes,
                             num_cc=OOD_num_classes,
                             pretrained=True)
    elif net_type == "vgg16":
        model = models.vgg16(num_c=num_classes,
                             num_cc=OOD_num_classes,
                             pretrained=True)
    elif net_type == "vgg19_bn":
        model = models.vgg19_bn(num_c=num_classes,
                                num_cc=OOD_num_classes,
                                pretrained=True)
    elif net_type == "vgg16_bn":
        model = models.vgg16_bn(num_c=num_classes,
                                num_cc=OOD_num_classes,
                                pretrained=True)
    return model
Exemplo n.º 3
0
def main(args):

    print('\nPreparing {} data'.format(args.dataset))
    if args.dataset == 'mnist':
        (X_train, y_train), (X_test, y_test), (X_valid, y_valid) = load_mnist()
    elif args.dataset == 'cifar10':
        (X_train, y_train), (X_test, y_test), (X_valid, y_valid) = load_cifar10()
    elif args.dataset == 'cifar100':
        (X_train, y_train), (X_test, y_test), (X_valid, y_valid) = load_cifar100()

    print('\nConstruction graph')
    if args.model == 'cnn_1':
        env = cnn_1(args)
    elif args.model == 'cnn_2':
        env = cnn_2(args)
    elif args.model == 'vgg16':
        env = vgg16(args)
    elif args.model == 'vgg19':
        env = vgg19(args)

    print('\nInitializing graph')
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    print('\nTraining')
    name = '{0}_{1}'.format(args.model, args.dataset)
    train(sess, env, X_train, y_train, X_valid, y_valid, batch_size=args.batch_size,
                                            epochs=args.epochs, name=name)

    print('\nEvaluating on clean data')
    evaluate(sess, env, X_test, y_test)
Exemplo n.º 4
0
def get_network(args, depth=10, width=10):
    """ return given network
    """
    if args.task == 'cifar10':
        nclass = 10
    elif args.task == 'cifar100':
        nclass = 100
    #Yang added none bn vggs
    if args.net == 'vgg11':
        if args.batch_norm:
            net = vgg11_bn(num_classes=nclass)
        else:
            net = vgg11(num_classes=nclass)
    elif args.net == 'vgg13':
        if args.batch_norm:
            net = vgg13_bn(num_classes=nclass)
        else:
            net = vgg13(num_classes=nclass)
    elif args.net == 'vgg16':
        if args.batch_norm:
            net = vgg16_bn(num_classes=nclass)
        else:
            net = vgg16(num_classes=nclass)
    elif args.net == 'vgg19':
        if args.batch_norm:
            net = vgg19_bn(num_classes=nclass)
        else:
            net = vgg19(num_classes=nclass)

    elif args.net == 'resnet':
        net = resnet(num_classes=nclass, depth=depth, width=width)
    # elif args.net == 'resnet34':
    #     net = resnet34(num_classes=nclass)
    # elif args.net == 'resnet50':
    #     net = resnet50(num_classes=nclass)
    # elif args.net == 'resnet101':
    #     net = resnet101(num_classes=nclass)
    # elif args.net == 'resnet152':
    #     net = resnet152(num_classes=nclass)

    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    if args.gpu:  #use_gpu
        net = net.cuda()

    return net
Exemplo n.º 5
0
def main(args):

    print('\nPreparing {} data'.format(args.dataset))
    if args.dataset == 'mnist':
        (X_train, y_train), (X_test, y_test), (X_valid, y_valid) = load_mnist()
    elif args.dataset == 'cifar10':
        (X_train, y_train), (X_test, y_test), (X_valid,
                                               y_valid) = load_cifar10()
    elif args.dataset == 'cifar100':
        (X_train, y_train), (X_test, y_test), (X_valid,
                                               y_valid) = load_cifar100()

    print('\nConstruction graph')
    if args.model == 'cnn_1':
        env = cnn_1(args)
    elif args.model == 'cnn_2':
        env = cnn_2(args)
    elif args.model == 'vgg16':
        env = vgg16(args)
    elif args.model == 'vgg19':
        env = vgg19(args)

    print('\nInitializing graph')
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    print('\nLoading saved model')
    name = '{0}_{1}'.format(args.model, args.dataset)
    env.saver.restore(sess, 'models/{0}/{1}'.format(name, name))

    print('\nEvaluating on clean data')
    evaluate(sess, env, X_test, y_test)

    print('\nExcluding misclassification samples')
    # mnist 1000 samples -> 0:1010
    # cifar10 1000 samples -> 0:
    (X_test, y_test) = exclude_miss(sess, env, X_test, y_test, 0, 12)
    evaluate(sess, env, X_test, y_test)

    print('\nGenerating adversarial data')
    X_adv = make_adv(args, sess, env, X_test, y_test)

    print('\nEvaluating on adversarial data')
    evaluate(sess, env, X_adv, y_test)

    print('\nResults')
Exemplo n.º 6
0
def main():
    # Keep track of elapsed time for Slurm.
    time_begin = time.time()
    # Handle arguments.
    args = proc_args()
    if args.subparser1 == 'train':
        # Name of the file storing the epoch and sample number if the program ends before completion due to Slurm's time limit.
        tempfile = 'tmp0'
        # Get the epoch and sample number or start from the beginning.
        e, b = read_temp_file(tempfile)
        # Load a saved CRN+VGG19 file to work on.
        training_model = load_model(args.load,
                                    custom_objects={
                                        'normalize_crn_output':
                                        models.normalize_crn_output
                                    })
        # Load the pretrained VGG19 to generate the labels.
        vgg = load_model(args.vgg)
        # Get the number of samples in the dataset, so it knows when the epoch will end.
        data_size = size_data(args.semantic)
        # Count the number of epochs so far.
        while e < args.epochs:
            # Count the number of samples processed so far.
            while b < data_size:
                batch_begin = time.time()
                # Slurm allocated 48 minutes. Graceful exit at 40 minutes for an 8 minute buffer. Each training batch has a long running time.
                if (time.time() - time_begin) >= (60 * 40):
                    os.remove(args.save)
                    os.rename(args.load, args.save)
                    # Save weights, architecture, training configuration, and optimization state.
                    training_model.save(args.load)
                    # Save where it left off.
                    write_temp_file(tempfile, e, b)
                    # Exit.
                    return
                # Load a batch of semantic layouts.
                data = load_data(args.semantic, b, args.batchsize)
                # Load a batch of ground truth images.
                raw_labels = load_images(args.truth, b, args.batchsize)
                # Use VGG19 to produce the labels from ground truth images.
                labels = vgg.predict(raw_labels)
                # Train a batch.
                training_model.train_on_batch(x=data, y=labels)
                print("batch time: %d" % (time.time() - batch_begin))
                # Increment the sample counter by the batch size.
                b += args.batchsize
            # When the current epoch is finished, set the sample counter to zero.
            b = 0
            # Increment the epoch counter.
            e += 1
        # When all the epochs finished running, save the model.
        training_model.save(args.save)
        # Remove the epoch/sample counter file since it completed the requested number of epochs.


#        if os.path.isfile(tempfile):
#            os.remove(tempfile)
    elif args.subparser1 == 'generate':
        # Load the CRN+VGG19 architecture and extract CRN from it.
        custom_object = {'normalize_crn_output': models.normalize_crn_output}
        crn_vgg = load_model(args.load, custom_objects=custom_object)
        testing_model = models.extract_crn(crn_vgg)
        # Synthesize and save all images.
        for i in range(size_data(args.semantic)):
            # Stop if Slurm time limit.
            if (time.time() - time_begin) >= (60 * 45):
                return
            data = load_data(args.semantic, i, 1)
            result = testing_model.predict(data)
            filename = "%s/%05d.png" % (args.synthesized, i)
            cv2.imwrite(filename, result[0])
    elif args.subparser1 == 'prepare':
        if args.subparser2 == 'crn':
            # This program needs an initial saved model to work with. Save an untrained CRN+VGG19 model.
            models.combine_crn_vgg19(models.crn256(),
                                     load_model(args.vgg)).save(args.save)
        elif args.subparser2 == 'vgg':
            # The prepcrn subcommand requires a pretrained VGG19 save file to work with.
            models.vgg19(256, 512).save(args.save)
    return
Exemplo n.º 7
0
    def setup(self):
        args = self.args
        sub_dir = '{}_input-{}_wot-{}_wtv-{}_reg-{}_nIter-{}_normCood-{}'.format(
            args.dataset, args.crop_size, args.wot, args.wtv, args.reg,
            args.num_of_iter_in_ot, args.norm_cood)

        self.save_dir = os.path.join(args.out_path, 'ckpts', sub_dir)
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)

        time_str = datetime.strftime(datetime.now(), '%m%d-%H%M%S')
        self.logger = log_utils.get_logger(
            os.path.join(self.save_dir, 'train-{:s}.log'.format(time_str)))
        log_utils.print_config(vars(args), self.logger)

        if torch.cuda.is_available():
            self.device = torch.device("cuda")
            self.device_count = torch.cuda.device_count()
            assert self.device_count == 1
            self.logger.info('Using {} gpus'.format(self.device_count))
        else:
            raise Exception("Gpu is not available")

        dataset_name = args.dataset.lower()
        if dataset_name == 'qnrf':
            from datasets.crowd import Crowd_qnrf as Crowd
        elif dataset_name == 'nwpu':
            from datasets.crowd import Crowd_nwpu as Crowd
        elif dataset_name == 'sha':
            from datasets.crowd import Crowd_sh as Crowd
        elif dataset_name == 'shb':
            from datasets.crowd import Crowd_sh as Crowd
        else:
            raise NotImplementedError

        downsample_ratio = 8
        self.datasets = {
            'train':
            Crowd(os.path.join(args.data_path,
                               DATASET_PATHS[dataset_name]["train_path"]),
                  crop_size=args.crop_size,
                  downsample_ratio=downsample_ratio,
                  method='train'),
            'val':
            Crowd(os.path.join(args.data_path,
                               DATASET_PATHS[dataset_name]["val_path"]),
                  crop_size=args.crop_size,
                  downsample_ratio=downsample_ratio,
                  method='val')
        }

        self.dataloaders = {
            x: DataLoader(self.datasets[x],
                          collate_fn=(train_collate
                                      if x == 'train' else default_collate),
                          batch_size=(args.batch_size if x == 'train' else 1),
                          shuffle=(True if x == 'train' else False),
                          num_workers=args.num_workers * self.device_count,
                          pin_memory=(True if x == 'train' else False))
            for x in ['train', 'val']
        }
        self.model = vgg19()
        self.model.to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=args.lr,
                                    weight_decay=args.weight_decay)

        self.start_epoch = 0
        if args.resume:
            self.logger.info('loading pretrained model from ' + args.resume)
            suf = args.resume.rsplit('.', 1)[-1]
            if suf == 'tar':
                checkpoint = torch.load(args.resume, self.device)
                self.model.load_state_dict(checkpoint['model_state_dict'])
                self.optimizer.load_state_dict(
                    checkpoint['optimizer_state_dict'])
                self.start_epoch = checkpoint['epoch'] + 1
            elif suf == 'pth':
                self.model.load_state_dict(torch.load(args.resume,
                                                      self.device))
        else:
            self.logger.info('random initialization')

        self.ot_loss = OT_Loss(args.crop_size, downsample_ratio,
                               args.norm_cood, self.device,
                               args.num_of_iter_in_ot, args.reg)
        self.tv_loss = nn.L1Loss(reduction='none').to(self.device)
        self.mse = nn.MSELoss().to(self.device)
        self.mae = nn.L1Loss().to(self.device)
        self.save_list = Save_Handle(max_num=1)
        self.best_mae = np.inf
        self.best_mse = np.inf
        self.best_count = 0
Exemplo n.º 8
0
                               device,
                               aleatoric=args.aleatoric,
                               max_epoch=args.epochs)
        gt_trainer.train()

    elif args.method == "bayes":
        sigma = 10
        use_background = args.use_bg
        background_ratio = 1
        crop_size = 256

        if args.model == "csrnet":
            bayes_net = CSRNet().to(device)
            downsample_ratio = 1
        elif args.model == "vgg19_extended":
            bayes_net = vgg19().to(device)
            downsample_ratio = 8
        else:
            print('model should be csrnet or vgg19_extended')

        post_prob = PostProb(sigma, crop_size, downsample_ratio,
                             background_ratio, use_background, device)
        loss = BayesLoss(use_background, device)
        optimizer = torch.optim.Adam(bayes_net.parameters(), lr=args.lr)
        bayes_trainer = BayesTrainer(loading_data_Bayes,
                                     bayes_net,
                                     loss,
                                     optimizer,
                                     device,
                                     post_prob,
                                     aleatoric=args.aleatoric,
Exemplo n.º 9
0
def main():
    global best_prec1, args, use_gpu
    args = parser.parse_args()
    use_gpu = torch.cuda.is_available()

    # load data
    traindir = './data/new_train'
    valdir = './data/new_test'
    normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(), normalizer
        ])),
                                               batch_size=32,
                                               shuffle=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(), normalizer
        ])),
                                             batch_size=16,
                                             shuffle=False)

    # create model
    if args.arch == 'vgg19':
        vgg19 = models.vgg19(pretrained=True)
        model = Vgg19Ft(vgg19, args.num_classes)
        ignored_params = list(
            map(id,
                list(model.classifier.children())[6].parameters()))
        base_params = filter(lambda x: id(x) not in ignored_params,
                             model.parameters())

        optimizer = optim.SGD(
            [{
                'params': base_params
            }, {
                'params': list(model.classifier.children())[6].parameters(),
                'lr': args.lr
            }],
            args.lr * 0.1,
            momentum=args.momentum,
            weight_decay=args.weight_decay)
    else:
        if args.arch == 'resnet152':
            model = models.resnet152(pretrained=True)
            model.fc = nn.Linear(model.fc.in_features, args.num_classes)
            ignored_params = list(map(id, model.fc.parameters()))
            base_params = filter(lambda x: id(x) not in ignored_params,
                                 model.parameters())
            optimizer = optim.SGD([{
                'params': base_params
            }, {
                'params': model.fc.parameters(),
                'lr': args.lr
            }],
                                  args.lr * 0.1,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)
        else:
            print('please choose reasonable model architeture')
            return

    criterion = nn.CrossEntropyLoss()

    if use_gpu:
        model.cuda()
        criterion.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("Loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("Loaded checkpoint '{}' (epoch {})".format(
                args.resume, args.start_epoch))
        else:
            print("no checkpoint found at '{}'".format(args.resume))

    for epoch in xrange(args.start_epoch + 1, args.epochs + 1):
        train_model(model, train_loader, criterion, optimizer,
                    exp_lr_scheduler, epoch)
        prec1 = test_model(model, val_loader, criterion)
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        if epoch % args.save_freq == 0:
            save_checkpoint(
                {
                    'epoch': epoch,
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'best_prec1': best_prec1
                }, epoch, False)
        if is_best:
            save_checkpoint(
                {
                    'epoch': epoch,
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'best_prec1': best_prec1
                }, epoch, is_best)
            print('best prec1: {}'.format(best_prec1))
Exemplo n.º 10
0
    def val_epoch(self):
        args = self.args
        counter_dir = os.path.join(
            args.data_dir, 'test_data',
            'base_dir_metric_{}'.format(args.counter_type))
        if not os.path.exists(counter_dir):
            os.makedirs(counter_dir)
        epoch_start = time.time()
        self.model.eval()  # Set model to evaluate mode
        epoch_res = []
        for inputs, count, name in self.dataloaders['val']:
            inputs = inputs.to(self.device)
            assert inputs.size(
                0) == 1, 'the batch size should equal to 1 in validation mode'
            with torch.set_grad_enabled(False):
                outputs, _ = self.model(inputs)
                res = count[0].item() - torch.sum(outputs).item()
                epoch_res.append(res)

        epoch_res = np.array(epoch_res)
        mse = np.sqrt(np.mean(np.square(epoch_res)))
        mae = np.mean(np.abs(epoch_res))
        self.logger.info(
            'Epoch {} Val, MSE: {:.2f} MAE: {:.2f}, Cost {:.1f} sec'.format(
                self.epoch, mse, mae,
                time.time() - epoch_start))

        model_state_dic = self.model.state_dict()
        if (2.0 * mse + mae) < (2.0 * self.best_mse + self.best_mae):
            self.best_mse = mse
            self.best_mae = mae
            self.logger.info(
                "save best mse {:.2f} mae {:.2f} model epoch {}".format(
                    self.best_mse, self.best_mae, self.epoch))
            torch.save(
                model_state_dic,
                os.path.join(self.save_dir,
                             'best_model_{}.pth'.format(self.best_count)))
            self.best_count += 1
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
            device = torch.device('cuda')
            part_B_train = os.path.join(args.data_dir, 'train_data', 'images')
            part_B_train = part_B_train.replace(
                '{}/train_data'.format(args.counter_type), 'train_data')
            part_B_test = os.path.join(args.data_dir, 'test_data', 'images')
            part_B_test = part_B_test.replace(
                '{}/test_data'.format(args.counter_type), 'test_data')
            model_path = os.path.join(
                self.save_dir, 'best_model_{}.pth'.format(self.best_count - 1))
            model = vgg19()
            model.to(device)
            model.load_state_dict(torch.load(model_path, device))
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ])
            path_sets_B = [part_B_test]
            img_paths_B = []
            for path in path_sets_B:
                for img_path in glob.glob(os.path.join(path, '*.jpg')):
                    img_paths_B.append(img_path)
            number = 0
            image_errs_temp = []
            for img_path in tqdm(img_paths_B):
                #for k in xrange(len(img_paths_B)):
                for i in range(0, 3):
                    for j in range(0, 3):
                        image_path = img_path.replace(
                            'test_data',
                            '{}/test_data'.format(args.counter_type)).replace(
                                '.jpg', '_{}_{}.jpg'.format(i, j))
                        name = os.path.basename(image_path).split('.')[0]
                        mat_path = image_path.replace('.jpg', '.mat').replace(
                            'images',
                            'ground-truth').replace(name, 'GT_{}'.format(name))
                        mat = io.loadmat(mat_path)
                        #          dataloader = torch.utils.data.DataLoader('sha', 1, shuffle=False,num_workers=1, pin_memory=True)
                        image_errs = []
                        img = transform(
                            Image.open(image_path).convert('RGB')).cuda()
                        inputs = img.unsqueeze(0)

                        #assert inputs.size(0) == 1, 'the batch size should equal to 1'
                        with torch.set_grad_enabled(False):
                            outputs, _ = model(inputs)
                        img_err = abs(mat["image_info"][0, 0][0, 0][1] -
                                      torch.sum(outputs).item())
                        img_err = np.squeeze(img_err)
                        print(image_path, img_err)
                        image_errs_temp.append(img_err)

                image_errs = np.reshape(image_errs_temp, (3, 3))

                with open(
                        img_path.replace(
                            'test_data/images',
                            '{}/test_data/base_dir_metric_{}'.format(
                                args.counter_type,
                                args.counter_type)).replace('.jpg', '.npy'),
                        'wb') as f:
                    np.save(f, image_errs)
                image_errs_temp.clear()
Exemplo n.º 11
0
def main():
    output_dir = "./save_fig"

    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Hyper-parameters
    eps = 1e-8

    ### data config
    test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                            num_class=args.num_classes,
                                            mode="test")
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=2)

    ### novelty data
    out_test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                                num_class=args.num_classes,
                                                mode="OOD")
    out_test_loader = torch.utils.data.DataLoader(out_test_dataset,
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=2)

    ##### model, optimizer config
    if args.net_type == "resnet50":
        model = models.resnet50(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "resnet34":
        model = models.resnet34(num_c=args.num_classes,
                                num_cc=args.OOD_num_classes,
                                pretrained=True)
    elif args.net_type == "vgg19":
        model = models.vgg19(num_c=args.num_classes,
                             num_cc=args.OOD_num_classes,
                             pretrained=True)
    elif args.net_type == "vgg16":
        model = models.vgg16(num_c=args.num_classes,
                             num_cc=args.OOD_num_classes,
                             pretrained=True)
    elif args.net_type == "vgg19_bn":
        model = models.vgg19_bn(num_c=args.num_classes,
                                num_cc=args.OOD_num_classes,
                                pretrained=True)
    elif args.net_type == "vgg16_bn":
        model = models.vgg16_bn(num_c=args.num_classes,
                                num_cc=args.OOD_num_classes,
                                pretrained=True)

    print("load checkpoint_last")
    checkpoint = torch.load(args.model_path)

    ##### load model
    model.load_state_dict(checkpoint["model"])
    start_epoch = checkpoint["epoch"]
    optimizer = optim.SGD(model.parameters(), lr=checkpoint["init_lr"])

    #### create folder
    Path(output_dir).mkdir(exist_ok=True, parents=True)

    model = model.to(device).eval()
    # Start grad-CAM
    bp = BackPropagation(model=model)
    inv_normalize = transforms.Normalize(
        mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],
        std=[1 / 0.229, 1 / 0.224, 1 / 0.255])
    target_layer = "layer4"

    stime = time.time()

    gcam = GradCAM(model=model)

    grad_cam = GradCAMmodule(target_layer, output_dir)
    grad_cam.model_config(model)
    for j, test_data in enumerate(test_loader):
        #### initialized
        org_image = test_data['input'].to(device)
        target_class = test_data['label'].to(device)

        target_class = int(target_class.argmax().cpu().detach())
        result = model(org_image).argmax()
        print("number: {} pred: {} target: {}".format(j, result, target_class))
        result = int(result.cpu().detach())
        grad_cam.saveGradCAM(org_image, result, j)
Exemplo n.º 12
0
    if model == 'res18conv':
        # ResNet18 Conv
        model, criterion, optimizer, scheduler = models.resNet18_conv()
        model_conv = fit(model, criterion, optimizer, scheduler, num_epochs=30)
        predict(model_conv)

    if model == 'res152conv':
        # ResNet152 Conv
        model, criterion, optimizer, scheduler = models.resNet152_conv()
        model_conv = fit(model, criterion, optimizer, scheduler, num_epochs=30)
        predict(model_conv)

    if model == 'dense161':
        # DenseNet161
        model, criterion, optimizer, scheduler = models.denseNet161_ft()
        model_ft = fit(model, criterion, optimizer, scheduler, num_epochs=30)
        predict(model_ft)

    if model == 'vgg19':
        # VGG 19-layer model
        model, criterion, optimizer, scheduler = models.vgg19()
        model_ft = fit(model, criterion, optimizer, scheduler, num_epochs=30)
        predict(model_ft)

    if model == 'alex':
        # AlexNet
        model, criterion, optimizer, scheduler = models.alexNet()
        model_ft = fit(model, criterion, optimizer, scheduler, num_epochs=30)
        predict(model_ft)
Exemplo n.º 13
0
def style_transfer(args):
    """
    Style transfer algorithm.
    """

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # We used a 19-layer VGG network as content and style extractor
    model = vgg19(device)

    # Content image resized to keep aspect ratio
    content_image = utils.img_to_tensor(Image.open(args.content_image_pth),
                                        device, args.image_height,
                                        args.image_width)

    # To extract image information on comparable scales, we always resized the style image to the same size as
    # the content image before computing its feature representations
    style_image = utils.img_to_tensor(Image.open(args.style_image_pth), device,
                                      args.image_height, args.image_width)

    # Generated image should be same shape as content image
    generated_image = torch.randn(content_image.data.size(),
                                  device=device,
                                  requires_grad=True)

    # The content image is passed through the network and the content representation in one layer is stored.
    # The style image is passed through the network and its style representation on all layers included are computed
    # and stored.
    content_image_content = model.get_content(content_image,
                                              args.content_layer_name)
    style_image_style = model.get_style(style_image, args.style_layer_names)

    # Extracting layer output dimensions for the style image. These dimensions are needed in regularizing the style loss
    # function
    Ns, Ms = model.get_Ns_and_Ms(style_image, args.style_layer_names)

    loss_fn = make_loss(args.content_weight, args.style_weight,
                        content_image_content, style_image_style,
                        get_content_loss, get_style_loss, Ns, Ms)

    optimizer = optim.LBFGS([generated_image])
    losses = []

    for cnt in range(args.iter_count):

        def closure():
            generated_image.data.clamp_(0, 1)
            optimizer.zero_grad()

            generated_image_content = model.get_content(
                generated_image, args.content_layer_name, detach=False)
            generated_image_style = model.get_style(generated_image,
                                                    args.style_layer_names,
                                                    detach=False)

            loss = loss_fn(generated_image_content, generated_image_style)

            loss.backward()
            return loss

        print('#### Iteration: {}'.format(cnt))
        loss = optimizer.step(closure)
        losses.append(loss)

    generated_image.data.clamp_(0, 1)

    # Storing the generated image
    timestr = time.strftime("%Y%m%d-%H%M%S")
    output_image_path = os.path.join('output', timestr + '.jpg')
    output_log_path = os.path.join('output', timestr + '.log')

    utils.tensor_to_img(generated_image).save(output_image_path)

    # Storing settings used to generate the image
    with open(output_log_path, 'w') as f:
        json.dump(vars(args), f, indent=4)
Exemplo n.º 14
0
import argparse
import torch
import os
import scipy.io as io
import numpy as np
import datasets.crowd as crowd
from torchvision import transforms
from models import vgg19

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = torch.device('cuda')
root = '/content/content/content/VisDrone2020-CC'
part_B_train = os.path.join(root, 'train_data', 'images')
part_B_test = os.path.join(root, 'test_data', 'downsampled-padded-images')
model_path = '/content/DMcouting_coarse_best_model_0.pth'
model = vgg19()
model.to(device)
model.load_state_dict(torch.load(model_path, device))
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])
path_sets_B = [part_B_test]
img_paths_B = []
for path in path_sets_B:
    for img_path in glob.glob(os.path.join(path, '*.jpg')):
        img_paths_B.append(img_path)
number = 0
image_errs_temp = []
os.mkdir(
Exemplo n.º 15
0
import planner as pln
import hardware as hw
import dataset
import models

import torch.nn
import torch

import time

simd_cfg_path = '../../hwcfg/simd.json'
hw_spec = hw.HardwareSpec(simd_cfg_path)

data = dataset.imagenet()
vgg19 = models.vgg19()

pnn = pln.Planner()

start_time = time.time()

for name, module in vgg19.named_modules():
    if isinstance(module, torch.nn.Sequential):
        continue
    pnn.set_data(data=data, module=module, hw_spec=hw_spec, layer_name=name)
    data = pnn.run('../../build')

elapsed_time = time.time() - start_time
print('[Front-end] Elapsed time: ' +
      time.strftime('%H hours %M min %S sec', time.gmtime(elapsed_time)))
Exemplo n.º 16
0
def main():
    start_epoch = 0

    pretrained_model = os.path.join("./pre_trained", args.dataset,
                                    args.net_type + ".pth.tar")
    save_model = "./save_model_dis/pre_training"
    tensorboard_dir = "./tensorboard/OOD_dis/pre_training" + args.dataset
    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Hyper-parameters
    eps = 1e-8

    ### data config
    train_dataset = load_data.Dog_metric_dataloader(image_dir=image_dir,
                                                    num_class=args.num_classes,
                                                    mode="train",
                                                    soft_label=args.soft_label)
    if args.custom_sampler:
        MySampler = load_data.customSampler(train_dataset, args.batch_size,
                                            args.num_instances)
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_sampler=MySampler,
                                                   num_workers=2)
    else:
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=2)

    test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                            num_class=args.num_classes,
                                            mode="test")
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=8,
                                              shuffle=False,
                                              num_workers=2)

    out_test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                                num_class=args.num_classes,
                                                mode="OOD")
    out_test_loader = torch.utils.data.DataLoader(out_test_dataset,
                                                  batch_size=args.batch_size,
                                                  shuffle=False,
                                                  num_workers=2)

    if args.transfer:
        ### perfectly OOD data
        OOD_dataset = load_data.Dog_dataloader(image_dir=OOD_dir,
                                               num_class=args.OOD_num_classes,
                                               mode="OOD")
        OOD_loader = torch.utils.data.DataLoader(OOD_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 num_workers=2)

    ##### model, optimizer config
    if args.net_type == "resnet50":
        model = models.resnet50(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "resnet34":
        model = models.resnet34(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "vgg19":
        model = models.vgg19(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "vgg16":
        model = models.vgg16(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "vgg19_bn":
        model = models.vgg19_bn(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "vgg16_bn":
        model = models.vgg16_bn(num_c=args.num_classes, pretrained=True)

    if args.transfer:
        extra_fc = nn.Linear(2048, args.num_classes + args.OOD_num_classes)

    if args.load == True:
        print("loading model")
        checkpoint = torch.load(pretrained_model)

        ##### load model
        model.load_state_dict(checkpoint["model"])

    batch_num = len(
        train_loader) / args.batch_size if args.custom_sampler else len(
            train_loader)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.init_lr,
                          momentum=0.9,
                          nesterov=args.nesterov)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, args.num_epochs * batch_num)

    #### loss config
    criterion = nn.BCEWithLogitsLoss()

    #### create folder
    Path(os.path.join(save_model, env, args.net_type)).mkdir(exist_ok=True,
                                                             parents=True)

    if args.board_clear == True:
        files = glob.glob(tensorboard_dir + "/*")
        for f in files:
            shutil.rmtree(f)
    i = 0
    while True:
        if Path(os.path.join(tensorboard_dir, str(i))).exists() == True:
            i += 1
        else:
            Path(os.path.join(tensorboard_dir, str(i))).mkdir(exist_ok=True,
                                                              parents=True)
            break
    summary = SummaryWriter(os.path.join(tensorboard_dir, str(i)))

    # Start training
    j = 0
    best_score = 0
    score = 0
    membership_loss = torch.tensor(0)
    transfer_loss = torch.tensor(0)
    for epoch in range(start_epoch, args.num_epochs):
        running_loss = 0
        running_membership_loss = 0
        running_transfer_loss = 0
        running_class_loss = 0
        train_acc = 0
        test_acc = 0
        stime = time.time()

        # for i, (train_data, OOD_data) in enumerate(zip(train_loader, OOD_loader)):
        for i, train_data in enumerate(train_loader):
            #### initialized
            org_image = train_data['input'] + 0.01 * torch.randn_like(
                train_data['input'])
            org_image = org_image.to(device)
            gt = train_data['label'].type(torch.FloatTensor).to(device)

            model = model.to(device).train()
            optimizer.zero_grad()

            #### forward path
            out1, out2 = model.pendis_forward(org_image)

            if args.membership:
                membership_loss = (
                    Membership_loss(out2, gt, args.num_classes) +
                    Membership_loss(out1, gt, args.num_classes))
                running_membership_loss += membership_loss.item()

            if args.transfer:
                extra_fc = extra_fc.to(device).train()

                OOD_image = (
                    OOD_data['input'] +
                    0.01 * torch.randn_like(OOD_data['input'])).to(device)
                OOD_gt = torch.cat(
                    (torch.zeros(args.batch_size, args.num_classes),
                     OOD_data['label'].type(torch.FloatTensor)),
                    dim=1).to(device)

                #### forward path
                _, feature = model.gen_forward(OOD_image)
                OOD_output = extra_fc(feature)
                transfer_loss = criterion(OOD_output, OOD_gt)
                running_transfer_loss += transfer_loss.item()

            #### calc loss
            class1_loss = criterion(out1, gt)
            class2_loss = criterion(out2, gt)
            class_loss = (class1_loss + class2_loss)

            total_loss = class_loss + membership_loss * 0.3 + transfer_loss

            #### calc accuracy
            train_acc += sum(
                torch.argmax(out1, dim=1) == torch.argmax(
                    gt, dim=1)).cpu().detach().item()
            train_acc += sum(
                torch.argmax(out2, dim=1) == torch.argmax(
                    gt, dim=1)).cpu().detach().item()

            total_loss.backward()
            optimizer.step()
            scheduler.step()

            running_class_loss += class_loss.item()
            running_loss += total_loss.item()

        with torch.no_grad():
            for i, test_data in enumerate(test_loader):
                org_image = test_data['input'].to(device)
                model = model.to(device).eval()
                gt = test_data['label'].type(torch.FloatTensor).to(device)

                #### forward path
                out1, out2 = model.pendis_forward(org_image)
                score_1 = nn.functional.softmax(out1, dim=1)
                score_2 = nn.functional.softmax(out2, dim=1)
                dist = torch.sum(torch.abs(score_1 - score_2), dim=1).reshape(
                    (org_image.shape[0], -1))
                if i == 0:
                    dists = dist
                    labels = torch.zeros((org_image.shape[0], ))
                else:
                    dists = torch.cat((dists, dist), dim=0)
                    labels = torch.cat(
                        (labels, torch.zeros((org_image.shape[0]))), dim=0)

                test_acc += sum(
                    torch.argmax(torch.sigmoid(out1), dim=1) == torch.argmax(
                        gt, dim=1)).cpu().detach().item()
                test_acc += sum(
                    torch.argmax(torch.sigmoid(out2), dim=1) == torch.argmax(
                        gt, dim=1)).cpu().detach().item()

            for i, out_org_data in enumerate(out_test_loader):
                out_org_image = out_org_data['input'].to(device)

                out1, out2 = model.pendis_forward(out_org_image)
                score_1 = nn.functional.softmax(out1, dim=1)
                score_2 = nn.functional.softmax(out2, dim=1)
                dist = torch.sum(torch.abs(score_1 - score_2), dim=1).reshape(
                    (out_org_image.shape[0], -1))

                dists = torch.cat((dists, dist), dim=0)
                labels = torch.cat((labels, torch.ones(
                    (out_org_image.shape[0]))),
                                   dim=0)

        roc = evaluate(labels.cpu(), dists.cpu(), metric='roc')
        print('Epoch{} AUROC: {:.3f}, test accuracy : {:.4f}'.format(
            epoch, roc, test_acc / test_dataset.num_image / 2))

        print(
            'Epoch [{}/{}], Step {}, total_loss = {:.4f}, class = {:.4f}, membership = {:.4f}, transfer = {:.4f}, exe time: {:.2f}, lr: {:.4f}*e-4'
            .format(epoch, args.num_epochs, i + 1, running_loss / batch_num,
                    running_class_loss / batch_num,
                    running_membership_loss / batch_num,
                    running_transfer_loss / batch_num,
                    time.time() - stime,
                    scheduler.get_last_lr()[0] * 10**4))
        print('exe time: {:.2f}, lr: {:.4f}*e-4'.format(
            time.time() - stime,
            scheduler.get_last_lr()[0] * 10**4))

        print("train accuracy total : {:.4f}".format(
            train_acc / train_dataset.num_image / 2))
        print("test accuracy total : {:.4f}".format(
            test_acc / test_dataset.num_image / 2))

        summary.add_scalar('loss/total_loss', running_loss / batch_num, epoch)
        summary.add_scalar('loss/class_loss', running_class_loss / batch_num,
                           epoch)
        summary.add_scalar('loss/membership_loss',
                           running_membership_loss / batch_num, epoch)
        summary.add_scalar('acc/train_acc',
                           train_acc / train_dataset.num_image / 2, epoch)
        summary.add_scalar('acc/test_acc',
                           test_acc / test_dataset.num_image / 2, epoch)
        summary.add_scalar("learning_rate/lr",
                           scheduler.get_last_lr()[0], epoch)
        time.sleep(0.001)
        torch.save(
            {
                'model': model.state_dict(),
                'epoch': epoch,
                'init_lr': scheduler.get_last_lr()[0]
            },
            os.path.join(save_model, env, args.net_type,
                         'checkpoint_last_pre.pth.tar'))
Exemplo n.º 17
0
parser.add_argument('--val_path', type=str, default='')
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--bs256_lr', type=float, default=0.01)
parser.add_argument('--multi_step',
                    type=str,
                    default='[30, 60, 90]',
                    help='if set to be [], using cosine learning rate decay')
parser.add_argument('--saved_model', type=str, default='ckpt.t7')
args = parser.parse_args()
args.multi_step = eval(args.multi_step)

##############################################
############### define network ###############
##############################################
if args.model_name == 'vgg19_noBN':
    net = models.vgg19()
elif args.model_name == 'vgg19':
    net = torchvision.models.vgg19()
elif args.model_name == 'vgg19_bn':
    net = torchvision.models.vgg19_bn()

net.features = torch.nn.DataParallel(net.features)
net.cuda()

##############################################
################# dataloader #################
##############################################
train_loader, val_loader = utils.folder_loader(args.train_path, args.val_path,
                                               args.batch_size)

##############################################