def __init__(self, opt, dis):
        super(DiscriminatorLoss, self).__init__()

        self.dis = dis
        self.gpu_id = opt.gpu_ids[0]

        # Adversarial criteria for the predictions
        self.crits = []

        # Targets for criteria
        self.labels_real = []
        self.labels_fake = []

        # Iterate over discriminators to inialize criteria
        for loss_type, output_shapes in zip(dis.adv_loss_types, dis.shapes):

            if loss_type == 'gan' or loss_type == 'lsgan':

                # Set criteria
                if loss_type == 'gan':
                    self.crits += [nn.BCEWithLogitsLoss()]
                elif loss_type == 'lsgan':
                    self.crits += [nn.MSELoss()]

                # Set labels
                labels_real = []
                labels_fake = []

                for shape in output_shapes:

                    labels_real += [
                        Variable(torch.ones(shape).cuda(self.gpu_id))
                    ]
                    labels_fake += [
                        Variable(torch.zeros(shape).cuda(self.gpu_id))
                    ]

                self.labels_real += [labels_real]
                self.labels_fake += [labels_fake]

            elif loss_type == 'wgan':

                self.crits += [None]

                self.labels_real += [None]
                self.labels_fake += [None]

        # Initialize criterion for aux loss
        self.crit_aux = utils.get_criterion(opt.aux_loss_type)
Esempio n. 2
0
    def __init__(self, opt):
        super(Model, self).__init__()

        self.gpu_id = opt.gpu_ids[0]
        self.weights_path = os.path.join(opt.experiment_path, 'checkpoints')

        # Generators
        self.gen_A = Generator(opt, 'A', opt.gen_type_name_A)
        self.gen_B = Generator(opt, 'B', opt.gen_type_name_B)

        # Discriminators
        self.dis_A = DiscriminatorWrapper(opt, 'A')
        self.dis_B = DiscriminatorWrapper(opt, 'B')

        # Load weights
        utils.load_checkpoint(self, opt.which_epoch, opt.pretrained_gen_path)

        # Print architectures
        print('\nGen A to B\n')
        num_params = 0
        for p in self.gen_B.parameters():
            num_params += p.numel()
        print(self.gen_B)
        print('Number of parameters: %d' % num_params)

        print('\nGen B to A\n')
        num_params = 0
        for p in self.gen_A.parameters():
            num_params += p.numel()
        print(self.gen_A)
        print('Number of parameters: %d' % num_params)

        print('\nDis A\n')
        num_params = 0
        for p in self.dis_A.parameters():
            num_params += p.numel()
        print(self.dis_A)
        print('Number of parameters: %d' % num_params)

        print('\nDis B\n')
        num_params = 0
        for p in self.dis_B.parameters():
            num_params += p.numel()
        print(self.dis_B)
        print('Number of parameters: %d' % num_params)

        self.gen_params = chain(self.gen_A.parameters(),
                                self.gen_B.parameters())

        self.dis_params = chain(self.dis_A.parameters(),
                                self.dis_B.parameters())

        # Losses
        self.crit_dis_A = DiscriminatorLoss(opt, self.dis_A)
        self.crit_dis_B = DiscriminatorLoss(opt, self.dis_B)

        # If an encoder is required, load the weights
        if (opt.mse_loss_type_A == 'perceptual'
                or opt.mse_loss_type_B == 'perceptual'
                or hasattr(self, 'dis_A') and self.dis_A.use_encoder
                or hasattr(self, 'dis_B') and self.dis_B.use_encoder):

            # Load encoder
            if opt.enc_type[:5] == 'vgg19':
                layers = '1,6,11,20,29'

            self.enc = FeatureExtractor(input_range='tanh',
                                        net_type=opt.enc_type,
                                        layers=layers).eval()

            print('')
            print(self.enc)
            print('')

        else:

            self.enc = None

        self.crit_mse_A = utils.get_criterion(opt.mse_loss_type_A,
                                              opt.mse_loss_weight_A, self.enc)
        self.crit_mse_B = utils.get_criterion(opt.mse_loss_type_B,
                                              opt.mse_loss_weight_B, self.enc)

        self.weights_path = os.path.join(opt.experiment_path, 'checkpoints')

        # In case domains have different sizes, this is needed for mse loss
        scale_factor = opt.img_size_B // opt.img_size_A

        self.down = nn.AvgPool2d(scale_factor)
        self.up = nn.Upsample(scale_factor=scale_factor,
                              mode='bilinear',
                              align_corners=False)

        # Load onto gpus
        self.gen_A = nn.DataParallel(self.gen_A.cuda(self.gpu_id), opt.gpu_ids)
        self.gen_B = nn.DataParallel(self.gen_B.cuda(self.gpu_id), opt.gpu_ids)
        self.dis_A = nn.DataParallel(self.dis_A.cuda(self.gpu_id), opt.gpu_ids)
        self.dis_B = nn.DataParallel(self.dis_B.cuda(self.gpu_id), opt.gpu_ids)
        if self.enc is not None:
            self.enc = nn.DataParallel(self.enc.cuda(self.gpu_id), opt.gpu_ids)
Esempio n. 3
0
def main():
    # Argparse custom actions
    class SetModes(argparse.Action):
        """Set the modes of operations."""
        def __call__(self, parser, args, values, option_string=None):
            for value in values:
                setattr(args, value, True)

    # yapf: disable
    parser = argparse.ArgumentParser(description='Fake News Classifier')
    # Initialization
    parser.add_argument('--init', action='store_true', default=False,
                        help='perform initialization')
    # Modes
    parser.add_argument('-m', '--mode', action=SetModes, nargs='+', choices=['train', 'test', 'demo', 'plot'],
                        help='specify the mode of operation: train, test, demo, plot')
    parser.add_argument('--train', action='store_true', default=False,
                        help='train the model')
    parser.add_argument('--test', action='store_true', default=False,
                        help='test the model (must either train or load a model)')
    parser.add_argument('--demo', action='store_true', default=False,
                        help='demo the model on linewise samples from a file (must either train or load a model)')
    parser.add_argument('--plot', action='store_true', default=False,
                        help='plot training data (must either train or have existing training data)')
    # Options
    parser.add_argument('-b', '--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('-c', '--config', type=str,
                        help='path to configuration json file (overrides args)')
    parser.add_argument('--data-loader', type=str, default='BatchLoader',
                        help='data loader to use (default: "BatchLoader")')
    parser.add_argument('--dataset', type=str, default='FakeRealNews',
                        help='dataset to use (default: "FakeRealNews")')
    parser.add_argument('-e', '--epochs', type=int, default=10,
                        help='number of epochs to train (default: 10)')
    parser.add_argument('-f', '--file', type=str,
                        help='specify a file for another argument')
    parser.add_argument('--lr', '--learning-rate', dest='learning_rate', type=float, default=1e-4,
                        help='learning rate (default: 1e-4)')
    parser.add_argument('-l', '--load', type=int, metavar='EPOCH',
                        help='load a model and its training data')
    parser.add_argument('--loss', type=str, default='BCEWithLogitsLoss',
                        help='loss function (default: "BCEWithLogitsLoss")')
    parser.add_argument('--model', type=str, default='FakeNewsNet',
                        help='model architecture to use (default: "FakeNewsNet")')
    parser.add_argument('-s', '--sample-size', type=int, metavar='N',
                        help='limit sample size for training')
    parser.add_argument('--seed', type=int, default=0,
                        help='random seed (default: 0)')
    parser.add_argument('--save', action='store_true', default=True,
                        help='save model checkpoints and training data (default: True)')
    parser.add_argument('--no-save', dest='save', action='store_false')
    args = parser.parse_args()
    # yapf: enable

    # Print help if no args
    if len(sys.argv) == 1:
        parser.print_help()
        parser.exit()

    # Configure logger
    logging.basicConfig(level=logging.DEBUG)
    logging.getLogger('matplotlib').setLevel(logging.WARNING)

    # Load configuration file if specified
    if args.config is not None:
        utils.load_config(args)

    # Exit if no mode is specified
    if not args.init and not args.train and not args.test and not args.demo and not args.plot:
        logging.error(
            'No mode specified. Please specify with: --mode {init,train,test,demo,plot}'
        )
        exit(1)
    # Exit on `--load` if run directory not found
    if (args.load is not None or
        (args.plot
         and not args.train)) and not os.path.isdir(utils.get_path(args)):
        logging.error(
            'Could not find directory for current configuration {}'.format(
                utils.get_path(args)))
        exit(1)
    # Exit on `test` or `demo` without `train` or `--load EPOCH`
    if (args.test or args.demo) and not (args.train or args.load is not None):
        logging.error(
            'Cannot run `test` or `demo` without a model. Try again with either `train` or `--load EPOCH`.'
        )
        exit(1)
    # Exit on `demo` without a string file
    if args.demo and not args.file:
        logging.error(
            'Cannot run `demo` without a file. Try again with `--file FILE`.')
        exit(1)

    # Setup run directory
    if args.save and not args.init and not (args.train or args.test
                                            or args.demo or args.plot):
        utils.save_config(args)
        path = utils.get_path(args) + '/output.log'
        os.makedirs(os.path.dirname(path), exist_ok=True)
        logging.getLogger().addHandler(logging.FileHandler(path))

    # Set random seeds
    random.seed(args.seed)
    torch.manual_seed(args.seed)

    # Variable declarations
    training_data = None

    # Load GloVe vocabulary
    if args.init or args.train or args.test or args.demo:
        glove = torchtext.vocab.GloVe(name='6B', dim=50)

    # Perform initialization
    if args.init or args.train or args.test:
        # Determine which dataset to use
        dataset = utils.get_dataset(args)
        # Preload the dataset
        dataset.load()
        # Get preprocessed samples
        samples = preprocessing.get_samples(dataset, glove, args.init)
        random.shuffle(samples)

    # DataLoader setup for `train`, `test`
    if args.train or args.test:
        # Select data loader to use
        DataLoader = utils.get_data_loader(args)

        # Split samples
        split_ratio = [.6, .2, .2]
        trainset, validset, testset = list(
            DataLoader.splits(samples, split_ratio))
        if args.sample_size is not None:  # limit samples used in training
            trainset = trainset[:args.sample_size]
            validset = validset[:int(args.sample_size * split_ratio[1] /
                                     split_ratio[0])]

        # Get data loaders
        train_loader, valid_loader, test_loader = [
            DataLoader(split, batch_size=args.batch_size)
            for split in [trainset, validset, testset]
        ]

    # Load samples for demo
    if args.demo:
        if os.path.isfile(args.file):
            # Read samples from the input file
            with open(args.file, 'r') as f:
                samples = [line for line in f if line.strip()]
            data = pd.DataFrame({
                'text': samples,
                'label': [0.5] * len(samples)
            })
            # Preprocess samples
            preprocessing.clean(data)
            samples = preprocessing.encode(data, glove)
            samples = [(torch.tensor(text).long(), label)
                       for text, label in samples]

            # Select data loader to use
            DataLoader = utils.get_data_loader(args)

            # Get data loader
            data_loader = DataLoader(samples, batch_size=1, shuffle=False)
        else:
            logging.error('Could not find file for demo at {}'.format(
                args.file))
            exit(1)

    # Model setup for `train`, `test`, `demo`
    if args.train or args.test or args.demo:
        # Create the model
        model = utils.get_model(glove, args)

        # Load a model
        if args.load is not None:
            utils.load_model(args.load, model, args)

    # Run `train`
    if args.train:
        training_data = training.train(model, train_loader, valid_loader, args)

    # Run `test`
    if args.test:
        if args.train or args.load is not None:
            criterion = utils.get_criterion(args.loss)
            acc, loss = training.evaluate(model, test_loader, criterion)
            logging.info('Testing accuracy: {:.4%}, loss: {:.6f}'.format(
                acc, loss))
        else:
            logging.error('No model loaded for testing')
            exit(1)

    # Run `demo`
    if args.demo:
        if args.train or args.load is not None:
            model.eval()  # set model to evaluate mode
            logging.info('-- Results --')
            for i, (text, _) in enumerate(data_loader):
                preview = data['text'][i][:32] + '...'
                out = model(text).flatten()
                prob = torch.sigmoid(out)  # apply sigmoid to get probability
                pred = (prob >
                        0.5).long()  # predict `true` if greater than 0.5
                label = ['fake', 'true'][pred.item()]
                label = '{}{}{}'.format(
                    '\033[92m' if pred.item() else '\033[93m', label,
                    '\033[0m')
                confidence = (prob if pred.item() else 1 - prob).item()
                logging.info(
                    'Report {}: {} with {:.2%} confidence - "{}"'.format(
                        i, label, confidence, preview))
        else:
            logging.error('No model loaded for demo')
            exit(1)

    # Run `plot`
    if args.plot:
        if training_data is None:
            training_data = utils.load_training_data(args, allow_missing=False)
        if args.load is not None and not args.train:
            for k, v in training_data.items():
                training_data[k] = v[:args.load + 1]

        logging.info('Plotting training data')
        training.plot(training_data)
Esempio n. 4
0
def main():
    # parse command line argument and generate config dictionary
    config = parse_args()
    logger.info(json.dumps(config, indent=2))

    run_config = config['run_config']
    optim_config = config['optim_config']

    # Code for saving in the correct place
    all_arguments = {}
    for key in config.keys():
        all_arguments.update(config[key])

    run_config['save_name'] = run_config['save_name'].format(**all_arguments)
    print('Saving in ' + run_config['save_name'])
    # End code for saving in the right place

    if run_config['test_config']:
        sys.exit(0)

    # TensorBoard SummaryWriter
    if run_config['tensorboard']:
        writer = SummaryWriter(run_config['outdir'])
    else:
        writer = None

    # create output directory
    outdir = pathlib.Path(run_config['outdir'])
    outdir.mkdir(exist_ok=True, parents=True)

    # save config as json file in output directory
    outpath = outdir / 'config.json'
    with open(outpath, 'w') as fout:
        json.dump(config, fout, indent=2)

    # load data loaders
    train_loader, test_loader = get_loader(config['data_config'])

    # set random seed (this was moved after the data loading because the data
    # loader might have a random seed)
    seed = run_config['seed']
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    epoch_seeds = np.random.randint(np.iinfo(np.int32).max // 2,
                                    size=optim_config['epochs'])

    # load model
    logger.info('Loading model...')
    model = utils.load_model(config['model_config'])
    n_params = sum([param.view(-1).size()[0] for param in model.parameters()])
    logger.info('n_params: {}'.format(n_params))

    if run_config['count_params']:
        # this option means just count the number of parameters, then move on
        sys.exit(0)

    if run_config['fp16'] and not run_config['use_amp']:
        model.half()
        for layer in model.modules():
            if isinstance(layer, nn.BatchNorm2d):
                layer.float()

    device = torch.device(run_config['device'])
    if device.type == 'cuda' and torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    model.to(device)
    logger.info('Done')

    train_criterion, test_criterion = utils.get_criterion(
        config['data_config'])

    # create optimizer
    if optim_config['no_weight_decay_on_bn']:
        params = [
            {
                'params': [
                    param for name, param in model.named_parameters()
                    if 'bn' not in name
                ]
            },
            {
                'params': [
                    param for name, param in model.named_parameters()
                    if 'bn' in name
                ],
                'weight_decay':
                0
            },
        ]
    else:
        params = model.parameters()
    optim_config['steps_per_epoch'] = len(train_loader)
    optimizer, scheduler = utils.create_optimizer(params, optim_config)

    # for mixed-precision
    amp_handle = apex.amp.init(
        enabled=run_config['use_amp']) if is_apex_available else None

    # run test before start training
    if run_config['test_first']:
        test(0, model, test_criterion, test_loader, run_config, writer)

    state = {
        'config': config,
        'state_dict': None,
        'optimizer': None,
        'epoch': 0,
        'accuracy': 0,
        'best_accuracy': 0,
        'best_epoch': 0,
    }
    epoch_logs = []
    for epoch, seed in zip(range(1, optim_config['epochs'] + 1), epoch_seeds):
        np.random.seed(seed)
        # train
        train_log = train(epoch, model, optimizer, scheduler, train_criterion,
                          train_loader, config, writer, amp_handle)

        epoch_log = train_log.copy()
        epoch_logs.append(epoch_log)
        utils.save_epoch_logs(epoch_logs, outdir)
    """
    Upload to bucket code
    """

    from google.cloud import storage
    import os

    client = storage.Client()
    bucket = client.get_bucket('ramasesh-bucket-1')
    filenames = os.listdir(outdir)

    for filename in filenames:
        print('Processing file: ' + filename)

        blob = bucket.blob(run_config['save_name'] + filename)
        blob.upload_from_filename(str(outdir) + '/' + filename)
    """
Esempio n. 5
0
    def __init__(self, opt):
        super(Model, self).__init__()

        self.gpu_id = opt.gpu_ids[0]
        self.weights_path = os.path.join(opt.experiment_path, 'checkpoints')

        # Generator
        self.gen_B = Generator(opt, 'B', opt.gen_type_name_B)
        
        self.noise_size = (opt.batch_size, self.gen_B.noise_channels)

        # Discriminator
        if opt.dis_type_names_B: self.dis_B = DiscriminatorWrapper(opt, 'B')

        # Load weights
        utils.load_checkpoint(self, opt.which_epoch, opt.pretrained_gen_path)

        # Print architectures
        print('\nGen A to B\n')
        num_params = 0
        for p in self.gen_B.parameters():
            num_params += p.numel()
        print(self.gen_B)
        print('Number of parameters: %d' % num_params)

        self.X_min = torch.from_numpy(np.load(os.path.join(input_path, "data_min.npy")))
        self.X_min = self.X_min.cuda()

        self.X_max = torch.from_numpy(np.load(os.path.join(input_path, "data_max.npy")))
        self.X_max = self.X_max.cuda()
        
        self.X_mean = torch.from_numpy(np.load(os.path.join(input_path, "data_mean.npy")))
        self.X_mean = self.X_mean.cuda()

        self.X_std = torch.from_numpy(np.load(os.path.join(input_path, "data_std.npy")))
        self.X_std = self.X_std.cuda()
        
        self.y_std = torch.from_numpy(np.load(os.path.join(input_path, "target_mean.npy")))
        self.y_std = self.y_std.cuda()

        self.y_mean = torch.from_numpy(np.load(os.path.join(input_path, "target_std.npy")))
        self.y_mean = self.y_mean.cuda()

        self.gen_params = self.gen_B.parameters()

        # Discriminator
        if opt.dis_type_names_B:

            print('\nDis B\n')
            num_params = 0
            for p in self.dis_B.parameters():
                num_params += p.numel()
            print(self.dis_B)
            print('Number of parameters: %d' % num_params)

            self.dis_params = self.dis_B.parameters()

            # Losses
            self.crit_dis_B = DiscriminatorLoss(opt, self.dis_B)

        # If an encoder is required, load the weights
        if hasattr(self, 'dis_B') and self.dis_B.use_encoder:

            # Load encoder
            if opt.enc_type[:5] == 'vgg19':
                layers = '1,6,11,20,29'

            self.enc = FeatureExtractor(
                input_range='tanh',
                net_type=opt.enc_type,
                layers=layers).eval()

            print('')
            print(self.enc)
            print('')

        else:

            self.enc = None

        # Pretrained aux classifier/regressor
        if opt.pretrained_aux_path:

            self.aux = torch.load(opt.pretrained_aux_path)

            self.crit_aux_B = utils.get_criterion(
                opt.aux_loss_type, 
                opt.gen_aux_loss_weight,
                self.enc)

            print('')
            print(self.aux)
            print('')

        self.up = nn.Upsample(
            scale_factor=1, 
            mode='bilinear',
            align_corners=False)

        # Load onto gpus
        self.gen_B = nn.DataParallel(self.gen_B.cuda(self.gpu_id), opt.gpu_ids)
        if opt.dis_type_names_B:
        	self.dis_B = nn.DataParallel(self.dis_B.cuda(self.gpu_id), opt.gpu_ids)
        if hasattr(self, 'aux'):
            self.aux = nn.DataParallel(self.aux.cuda(self.gpu_id), opt.gpu_ids)
        if self.enc is not None: 
            self.enc = nn.DataParallel(self.enc.cuda(self.gpu_id), opt.gpu_ids)