Beispiel #1
0
    def _setup(self, config):
        self.config = config

        device = config['device']
        self.device = device
        torch.manual_seed(config['seed'])
        if self.device == 'cuda':
            torch.cuda.manual_seed(config['seed'])

        # model
        self.model = model_utils.get_model(config['model']).to(device)
        self.model_args = config['model']
        # count parameters
        self.nparameters = sum(param.nelement() for param in self.model.parameters())
        print("Parameter count: ", self.nparameters)

        # dataset
        self.train_loader, self.valid_loader, self.test_loader = dataset_utils.get_dataset(config['dataset'])

        structured_params = filter(lambda p: hasattr(p, '_is_structured') and p._is_structured, self.model.parameters())
        unstructured_params = filter(lambda p: not (hasattr(p, '_is_structured') and p._is_structured), self.model.parameters())
        if config['optimizer'] == 'Adam':
            self.optimizer = optim.Adam([{'params': structured_params, 'weight_decay': 0.0},
                                         {'params': unstructured_params}],
                                        lr=config['lr'], weight_decay=config['weight_decay'])
        else:
            self.optimizer = optim.SGD([{'params': structured_params, 'weight_decay': 0.0},
                                        {'params': unstructured_params}],
                                       lr=config['lr'], momentum=0.9, weight_decay=config['weight_decay'])
        # scheduler
        if config['lr_decay']['milestones'] is not None:
            self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=config['lr_decay']['milestones'], gamma=config['lr_decay']['factor'])
        else:
            self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=config['lr_decay']['period'], gamma=config['lr_decay']['factor'])
        self.switch_ams = config['switch_ams']
Beispiel #2
0
def train_vae(cfg):
    logger.info(f"Run config:\n{cfg.pretty()}")
    out_dir = os.getcwd()
    logger.info('Working directory {}'.format(out_dir))

    # To ensure reproducibility
    pl.seed_everything(123)

    # Dataset
    train_loader, test_loader, image_shape = get_dataset(
        cfg.dataset, cfg.batch_size, cfg.num_workers)

    # Model definition
    vae_model = VAE(cfg,
                    train_loader=train_loader,
                    val_loader=test_loader,
                    img_shape=image_shape)

    # Train
    trainer = pl.Trainer(early_stop_callback=False,
                         checkpoint_callback=False,
                         max_nb_epochs=cfg.num_epochs,
                         fast_dev_run=cfg.fast_dev_run,
                         gpus=[0] if torch.cuda.is_available() else 0)
    trainer.fit(vae_model)
    logger.info('Finished. Save to: {}'.format(os.getcwd()))

    # Save models
    save_file = osp.join(os.getcwd(), 'vae_{}_encoder.pth'.format(cfg.dataset))
    torch.save(vae_model.q_net.state_dict(), save_file)
    logger.info('Saving model: {}'.format(save_file))
    save_file = osp.join(os.getcwd(), 'vae_{}_decoder.pth'.format(cfg.dataset))
    torch.save(vae_model.p_net.state_dict(), save_file)
    logger.info('Saving model: {}'.format(save_file))
def main(args):
    args.device = "cuda" if torch.cuda.is_available() else "cpu"
    train_dl, val_dl, test_dl = get_dataset(args.batch_size, args.dataset)
    model = construct_model(args.model_type)
    train(model, train_dl, val_dl, test_dl, args)
Beispiel #4
0
class CrossEntropy(Loss):
    def compute_loss(self, inputs):
        y_true, y_pred = inputs
        loss = K.categorical_crossentropy(y_true, K.softmax(y_pred))
        return K.mean(loss)

if __name__ == '__main__':
    num_classes = 6676
    vocab_size = 6676
    max_length = 7
    hidden_dim = 128
    train_batch_size = 128
    val_batch_size = 500

    (X_train, Y_train), (X_val, Y_val) = get_dataset()
    tf = {}
    tf['unk'] = 0
    tf['sep'] = 0
    for s in X_train + Y_train + X_val + Y_val:
        for c in s:
            if is_chinese(c):
                tf[c] = tf.get(c, 0) + 1
            elif is_chinese_sep(c):
                tf['sep'] = tf.get('sep', 0) + 1
            else:
                tf['unk'] = tf.get('unk', 0) + 1
    tf = {i: j for i, j in tf.items() if j >= 2}
    token2id = {key: i + 1 for i, key in enumerate(tf.keys())}
    token2id['non'] = 0
    id2token = ['non'] + list(tf.keys())
Beispiel #5
0
import arg_parsing
import dataset_utils
from junk_preprocessing import get_preprocess_image

slim = tf.contrib.slim
args = arg_parsing.parse_args(training=True)

tf.logging.set_verbosity(tf.logging.INFO)
deploy_config = model_deploy.DeploymentConfig(num_clones=args.num_gpus)
split_name = 'train'

with tf.Graph().as_default() as g:
    with tf.device(deploy_config.variables_device()):
        global_step = slim.create_global_step()

    dataset = dataset_utils.get_dataset(split_name, args.data_dir)
    params = dataset_utils.read_meta_params(split_name, args.data_dir)

    num_classes = dataset.num_classes
    network_fn = junk_net.inference

    def clone_fn(batch_queue):
        images, labels = batch_queue.dequeue()
        logits, end_points = network_fn(images, num_classes)
        slim.losses.softmax_cross_entropy(logits, labels)
        predictions = tf.argmax(logits, 1)
        labels = tf.argmax(labels, 1)
        accuracy, update_op = slim.metrics.streaming_accuracy(
            predictions,
            labels,
            metrics_collections=['accuracy'],
Beispiel #6
0
LEARNING_RATE = ARGS.learning_rate
#LR_DECAY_STEPS = 10000
#LR_DECAY_RATE = 0.7
INIT_TIMESTAMP = get_timestamp()
LOG_DIR = 'logs'

# Create datasets (.map() after .batch() due to lightweight mapping fxn)
print('Creating train and val datasets...')
TRAIN_FILES, VAL_FILES = train_val_split()
#TEST_FILES = glob('ModelNet40/*/test/*.npy')   # only used to get length for comparison
print('Number of training samples:', len(TRAIN_FILES))
print('Number of validation samples:', len(VAL_FILES))
#print('Number of testing samples:', len(TEST_FILES))

print(TRAIN_FILES.iloc[5])
train_ds = get_dataset(TRAIN_FILES, BATCH_SIZE)
val_ds = get_dataset(VAL_FILES, BATCH_SIZE)
print('Datasets ready!')


# Create model
def get_bn_momentum(step):
    #return min(0.99, 0.5 + 0.0002*step)
    return 0.99


print('Creating model...')
bn_momentum = tf.Variable(get_bn_momentum(0), trainable=False)
model = get_model(bn_momentum=bn_momentum)
print('Model ready!')
model.summary()
Beispiel #7
0
PARSER.add_argument('--init_weight',
                    type=str,
                    default=None,
                    help='Path to trained weights')
ARGS = PARSER.parse_args()

# Create test set
BATCH_SIZE = ARGS.batch_size
print('Creating test dataset...')
_, TEST_FILES = train_val_split()
#TEST_FILES = glob('ModelNet40/*/test/*.npy')   # only used to get length for comparison
print('Number of test samples:', len(TEST_FILES))
#print('Number of testing samples:', len(TEST_FILES))

print(TEST_FILES.iloc[10])
test_ds = get_dataset(TEST_FILES, BATCH_SIZE)
print('Dataset ready!')

#Class look-ups
idx_lookup = {
    'proton': 0,
    'pi-': 1,
    'e-': 2
    #,'k+'
    ,
    'mu-': 3,
    'pion0': 4
}

# Instantiate metrics
accuracy = tf.keras.metrics.CategoricalAccuracy()
Beispiel #8
0
        'batch': n_images,
        'transform': 'original'
    }
    permuted_dataset = {
        'name': 'PPCIFAR10',
        'batch': n_images,
        'transform': 'permute'
    }
    normalize_dataset = {
        'name': 'PPCIFAR10',
        'batch': n_images,
        'transform': 'normalize'
    }
    training_dataset = {'name': 'PPCIFAR10', 'batch': n_images}
    torch.manual_seed(0)
    orig_train_loader, orig_test_loader = dataset_utils.get_dataset(
        original_dataset)
    torch.manual_seed(0)
    perm_train_loader, perm_test_loader = dataset_utils.get_dataset(
        permuted_dataset)
    torch.manual_seed(0)
    norm_train_loader, norm_test_loader = dataset_utils.get_dataset(
        normalize_dataset)
    torch.manual_seed(0)
    train_train_loader, train_test_loader = dataset_utils.get_dataset(
        training_dataset)

    def imshow(img, name):
        npimg = img.numpy()
        plt.imshow(np.transpose(npimg, (1, 2, 0)))
        # plt.show()
        plt.savefig(name, bbox_inches='tight')
Beispiel #9
0
perm_path = 'T5.2'
method = 'butterfly'

if __name__ == '__main__':
    # original_dataset = {'name': 'PPCIFAR10', 'batch': 8, 'transform': 'original'}
    # permuted_dataset = {'name': 'PPCIFAR10', 'batch': 8, 'transform': 'permute'}
    # normalize_dataset = {'name': 'PPCIFAR10', 'batch': 8, 'transform': 'normalize'}
    training_dataset = {'name': 'PPCIFAR10', 'batch': 128}
    # torch.manual_seed(0)
    # orig_train_loader, orig_test_loader = dataset_utils.get_dataset(original_dataset)
    # torch.manual_seed(0)
    # perm_train_loader, perm_test_loader = dataset_utils.get_dataset(permuted_dataset)
    # torch.manual_seed(0)
    # norm_train_loader, norm_test_loader = dataset_utils.get_dataset(normalize_dataset)
    torch.manual_seed(0)
    train_train_loader, train_test_loader = dataset_utils.get_dataset(
        training_dataset)

    def imshow(img, name):
        npimg = img.numpy()
        plt.imshow(np.transpose(npimg, (1, 2, 0)))
        # plt.show()
        plt.savefig(name, bbox_inches='tight')

    # get some random training images
    # torch.manual_seed(0)
    # dataiter = iter(orig_train_loader)
    # orig_images, labels = dataiter.next()
    # torch.manual_seed(0)
    # dataiter = iter(perm_train_loader)
    # perm_images, _ = dataiter.next()
    # torch.manual_seed(0)