Пример #1
0
 def __init__(self, config):
     # Configurations
     self.config = config
     self.tables_config = self.config.get("tables", dict())
     # Sleeper configurations
     self.sleeper_base_url = self.config.get("sleeper_base_url")
     # ESPN configurations
     self.espn_start_year = self.config.get("espn_start_year", dict())
     self.espn_end_year = self.config.get("espn_end_year", dict())
     # Command line arguments
     self.config = config
     self.args = parse_args(self.config["args"])
     self.league_name = self.args["league_name"]
     self.sleeper_season_id = self.args["sleeper_season_id"]
     self.gcs_bucket = self.args["gcs_bucket"]
     self.gbq_project = self.args["gbq_project"]
     self.gbq_dataset = self.args["gbq_dataset"]
     self.espn_league_id = self.args.get("espn_league_id")
     self.espn_s2 = self.args.get("espn_s2")
     self.espn_swid = self.args.get("espn_swid")
     self.logger = get_logger("Fantasy Football Stats",
                              level=self.args["log_level"])
     # Create GCP clients
     self.gcs_client = storage.Client()
     self.gbq_client = bigquery.Client()
def main():
    args = parser.parse_args()
    args, logging, writer = utils.parse_args(args)

    logging.info('# Start Re-training #')

    criterion = LOSS_FACTORY[args.task](args, args.loss_scaling)

    if args.model_type == "stochastic":
        model_temp = STOCHASTIC_FACTORY[args.model]
    else:
        raise NotImplementedError("Other models have not been implemented!")
    model = model_temp(args.input_size, args.output_size, args.layers,
                       args.activation, args, True)

    logging.info('## Model created: ##')
    logging.info(model.__repr__())

    logging.info("### Param size = %f MB, Total number of params = %d ###" %
                 utils.count_parameters_in_MB(model, args))

    logging.info('### Loading model to parallel GPUs ###')

    utils.profile(model, args, logging)
    model = utils.model_to_gpus(model, args)

    logging.info('### Preparing schedulers and optimizers ###')
    optimizer = SGLD(model.parameters(),
                     args.learning_rate,
                     norm_sigma=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, args.epochs)

    logging.info('## Downloading and preparing data ##')
    train_loader, valid_loader = get_train_loaders(args)

    logging.info('## Beginning Training ##')

    train = Trainer(model, criterion, optimizer, scheduler, args)

    best_error, train_time, val_time = train.train_loop(
        train_loader, valid_loader, logging, writer)

    logging.info(
        '## Finished training, the best observed validation error: {}, total training time: {}, total validation time: {} ##'
        .format(best_error, timedelta(seconds=train_time),
                timedelta(seconds=val_time)))

    logging.info('## Beginning Plotting ##')
    del model
    with torch.no_grad():
        args.samples = 100
        args.model_path = args.save
        model = model_temp(args.input_size, args.output_size, args.layers,
                           args.activation, args, False)
        model = utils.model_to_gpus(model, args)
        model.eval()
        plot_regression_uncertainty(model, PLT, train_loader, args)
        logging.info('# Finished #')
Пример #3
0
def test_parse_args():
    args = parse_args(sys.argv[1:])
    assert args.mg is not None
    assert args.mins is not None
    assert args.test is not None
    with pytest.raises(AttributeError):
        assert args.bongo is None
def main():
    args = utils.parse_args()
    log_file = Path("{}.log".format(datetime.now().strftime('%Y%m%d_%H%M%S')))
    utils.setup_logger(log_path=Path.cwd() / args.log_dir / log_file,
                       log_level=args.log_level)

    ood_data_experiment()
Пример #5
0
 def test_parse_args_with_default_dry_run(self):
     user_input = parse_args([
         self.directory_parameter_key, self.non_default_input_directory,
         self.prefix_parameter_key, self.prefix_name
     ])
     self.validate_parsed_input_content(
         user_input, self.non_default_full_input_directory,
         self.prefix_name, False)
Пример #6
0
 def test_parse_args_with_default_directory(self):
     user_input = parse_args([
         self.prefix_parameter_key, self.prefix_name,
         self.dry_run_parameter_key
     ])
     self.validate_parsed_input_content(user_input,
                                        self.expected_default_directory,
                                        self.prefix_name, True)
Пример #7
0
def main():
    args = utils.parse_args()
    log_file = Path("{}.log".format(datetime.now().strftime('%Y%m%d_%H%M%S')))
    utils.setup_logger(log_path=Path.cwd() / args.log_dir / log_file,
                       log_level=args.log_level)
    LOGGER.info("Args: {}".format(args))

    train_distilled_network_dirichlet()
    predictions_corrupted_data_dirichlet()
Пример #8
0
def main():
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpuid
    trDL, teDL = load_data(args, stop=True, one_hot=True)
    net = load_model(args.dataset,
                     args.arch,
                     width=args.width,
                     depth=args.depth)
    ct = torch.nn.MSELoss()
    print('# of parameters', num_parameters(net))

    res = scan(net,
               ct,
               trDL,
               teDL,
               args.model,
               verbose=True,
               niters=50,
               nonuniformity=args.nonuniformity)
    with open(args.save_res, 'wb') as f:
        pickle.dump(res, f)
import matplotlib.pyplot as plt
import os
import csv
import numpy as np
from src.utils import moving_average, parse_args
from src.trainer import run_maml

# main function
if __name__ == '__main__':

    # parse arguments
    args = parse_args()

    # set up args
    if args.meta_train_k_shot == -1:
        args.meta_train_k_shot = args.k_shot
    if args.meta_train_batch_size == -1:
        args.meta_train_batch_size = args.meta_batch_size

    # number of iterations to plot
    n_iterations = 3000

    # Plot the graphs
    exp_string = 'cls_'+str(args.n_way)+'.mbs_'+str(args.meta_train_batch_size) + '.k_shot_' + str(args.meta_train_k_shot) + \
                    '.inner_numstep_' + str(args.num_inner_updates) + '.meta_lr_' + str(args.meta_lr) + '.inner_updatelr_' + str(args.inner_update_lr) + \
                    '.learn_inner_update_lr_' + str(args.learn_inner_update_lr) + '.dataset_' + str(args.dataset) + \
                    '.mutual_exclusive_' + str(args.mutual_exclusive) + '.metareg_' + str(args.metareg) + \
                    '.lambda_' + str(args.metareg_lambda) + '.tau_' + str(args.metareg_eta)
    csv_file = '{}/{}.csv'.format(args.logdir, exp_string)

    legends = [
Пример #10
0
def main():

    args = parse_args()
    config = get_config(args.config)

    o_img_paths = sorted(glob(os.path.join(config['paths']['data'], 'Original/*')))
    f_img_paths = sorted(glob(os.path.join(config['paths']['data'], 'Filtered/*')))

    img_paths = {'original': o_img_paths,
                 'filtered': f_img_paths
                 }

    device = config['inference_params']['device']
    if not torch.cuda.is_available():
        device = 'cpu'
    print(f'Current device is {device}')

    model = pydoc.locate(config['inference_params']['model'])().to(device)
    model = nn.DataParallel(model)
    model_dumps = torch.load(config['paths']['weights'], map_location=device)
    model.load_state_dict(model_dumps['model_state_dict'])
    crop_size = config['inference_params']['crop_size']


    model.eval()
    torch.set_grad_enabled(False)

    if crop_size:
        transforms = Compose([ToFloat(), RandomCrop(crop_size), ToTensor()])
    else:
        transforms = Compose([ToFloat(), ToTensor()])

    dataset = Dataset(paths=img_paths,
                      transform=transforms
                     )

    dataloader = DataLoader(dataset,
                            batch_size=config['data_params']['batch_size'],
                            num_workers=config['data_params']['num_workers'],
                            shuffle=False
                            )
    psnr_scores_model = []
    psnr_scores_ref = []

    tic = time.time()
    for sample in dataloader:
        X = sample['X'].to(device)
        Y = sample['Y'].to(device)
        name = sample['name']

        Y_pred = model(X)

        X = X.detach().cpu().numpy()
        Y = Y.detach().cpu().numpy()
        Y_pred = Y_pred.detach().cpu().numpy()

        psnr_scores_model.append(PSNR(Y, Y_pred))
        psnr_scores_ref.append(PSNR(Y, X))

        # save predicted images
        for idx in range(X.shape[0]):
            cv2.imwrite(os.path.join(config['paths']['results'], name[idx]),
                        Y_pred[idx, 0, :, :] * 255)

    toc = time.time()

    # save PSNR scores to logs
    model_name = config['inference_params']['model_name']
    with open(config['paths']['log'], 'a') as f:
        f.write(f'Model {model_name}\n')
        f.write('='*20 + '\n')
        f.write(f'PSNR (mean ± std) of refs and predicted images: {np.mean(psnr_scores_model)} ± {np.std(psnr_scores_model)}\n')
        f.write(f'PSNR (mean ± std) of refs and noisy images: {np.mean(psnr_scores_ref)} ± {np.std(psnr_scores_ref)}\n')
        f.write(f'Elapsed time: {toc - tic}, s\n')
        f.write('='*50 + '\n'*2)
Пример #11
0
def main():
    args = parse_args()
    configs = get_config(args.config)
    paths = get_config(args.paths)

    print(f'Configs\n{configs}\n')
    print(f'Paths\n{paths}\n')

    ####### DATA ######
    train_loader, val_loader = make_ct_datasets(configs, paths)

    ####### MODEL ######
    model = pydoc.locate(configs['train_params']['model'])()
    model_name = configs['train_params']['model_name']

    if torch.cuda.is_available():
        device = 'cuda'
    else:
        device = 'cpu'
    model.to(device)
    print(f'Current device: {device}')

    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
        print(f'Number of CUDA devices: {torch.cuda.device_count()}')

    try:
        pretrained = configs['train_params']['pretrained']
        if pretrained:
            model_dumps = torch.load(configs['train_params']['path_weights'],
                                     map_location=device)
            model.load_state_dict(model_dumps['model_state_dict'])
            print(
                f'Weights loaded from model {configs["train_params"]["path_weights"]}'
            )
    except KeyError:
        print('A parameter wasn`t found in the config file')

    ####### OPTIMIZER ######
    optimizer_name = configs['train_params']['optimizer']
    optimizer = pydoc.locate('torch.optim.' + optimizer_name)(
        model.parameters(), **configs['train_params']['optimizer_params'])
    ####### SCHEDULER ######
    scheduler_name = configs['train_params']['scheduler']
    scheduler = pydoc.locate('torch.optim.lr_scheduler.' + scheduler_name)(
        optimizer, **configs['train_params']['scheduler_params'])
    ####### CRITERION ######
    loss = pydoc.locate(configs['train_params']['loss'])()

    ####### TRAINING ######
    max_epoch = int(configs['train_params']['max_epoch'])

    train(model,
          optimizer,
          loss,
          train_loader,
          max_epoch,
          device,
          val_loader,
          scheduler=scheduler,
          weights_path=paths['dumps']['weights'],
          model_name=model_name)
Пример #12
0
def train_distilled_network_dirichlet(
        model_dir="models/distilled_model_cifar10_dirichlet"):
    """Distill ensemble with distribution distillation (Dirichlet) """

    args = utils.parse_args()

    log_file = Path("{}.log".format(datetime.now().strftime('%Y%m%d_%H%M%S')))
    utils.setup_logger(log_path=Path.cwd() / args.log_dir / log_file,
                       log_level=args.log_level)

    data_ind = np.load(
        "src/experiments/cifar10/training_files/training_data_indices.npy")
    num_train_points = 40000
    train_ind = data_ind[:num_train_points]
    valid_ind = data_ind[num_train_points:]

    train_data = cifar10_ensemble_pred.Cifar10Data(ind=train_ind,
                                                   augmentation=True)
    valid_data = cifar10_ensemble_pred.Cifar10Data(ind=valid_ind)

    train_loader = torch.utils.data.DataLoader(train_data.set,
                                               batch_size=100,
                                               shuffle=True,
                                               num_workers=0)

    valid_loader = torch.utils.data.DataLoader(valid_data.set,
                                               batch_size=100,
                                               shuffle=True,
                                               num_workers=0)

    test_data = cifar10_ensemble_pred.Cifar10Data(train=False)

    test_loader = torch.utils.data.DataLoader(test_data.set,
                                              batch_size=64,
                                              shuffle=True,
                                              num_workers=0)

    ensemble_size = 10

    # Note that the ensemble predictions are assumed to have been saved to file (see ensemble_predictions.py),
    # ensemble_indices.npy contains the order of the ensemble members such that ind[:ensemble_size] are the indices
    # of the first ensemble, ind[ensemble_size:2*ensemble_size] are the indices of the second ensemble and so on
    ind = np.load("src/experiments/cifar10/training_files/ensemble_indices.npy"
                  )[((args.rep - 1) * ensemble_size):(args.rep *
                                                      ensemble_size)]
    ensemble = ensemble_wrapper.EnsembleWrapper(output_size=10, indices=ind)

    device = utils.torch_settings(args.seed, args.gpu)
    distilled_model = cifar_resnet_dirichlet.CifarResnetDirichlet(
        ensemble,
        resnet_utils.BasicBlock, [3, 2, 2, 2],
        device=device,
        learning_rate=args.lr)

    loss_metric = metrics.Metric(name="Mean loss",
                                 function=distilled_model.calculate_loss)
    distilled_model.add_metric(loss_metric)

    distilled_model.train(train_loader,
                          num_epochs=args.num_epochs,
                          validation_loader=valid_loader)

    distilled_model.eval_mode()
    predicted_distribution = []
    all_labels = []

    for batch in test_loader:
        inputs, labels = batch
        inputs, labels = inputs[0].to(distilled_model.device), labels.to(
            distilled_model.device)

        predicted_distribution.append(
            distilled_model.predict(inputs).to(distilled_model.device))
        all_labels.append(labels.long())

    test_acc = metrics.accuracy(torch.cat(predicted_distribution),
                                torch.cat(all_labels))
    LOGGER.info("Test accuracy is {}".format(test_acc))

    torch.save(distilled_model.state_dict(), model_dir)
Пример #13
0
        return ResidualFusionGenerator(num_filters = 64,
                                    num_blocks = 3,
                                    ch = self.ch,
                                    name = 'residual_fusion_generator',
                                    trainable = trainable)
    
    def build_model(self, model, input_shape, dummy_check=True):
        b,h,w,c = input_shape
        model.build(input_shape=(b, h, w, c+c))
        if dummy_check:
            x = utils.get_dummy_tensor(batch_size=b, height=h, width=w, channels=c+c)
            y = model(x, training=self.training)

if __name__ == '__main__':
    print('TF version: {}'.format(tf.__version__))
    args = utils.parse_args()
    print(args)
    engine_name = os.path.basename(__file__)

    print('Engine {} has started.'.format(engine_name))
    
    print('Engine is initializing...')
    model = Engine(args)
    if model.stage == 'train':
        print('Engine is training...')
        model.train()
    if model.stage == 'apply':
        print('Engine is being applied...')
        model.apply()
    if model.stage == 'to_proto':
        print('Engine is converting weights to PB...')
Пример #14
0
def main():
    change_files_name(parse_args(argv[1:]))
Пример #15
0
def test_parse_args_with_200_360():
    args = parse_args(['200', '360'])
    assert args.mins == 360
Пример #16
0
def test_parse_args_with_200():
    args = parse_args(['200'])
    assert args.mg == 200
Пример #17
0
def test_parse_args_with_t():
    args = parse_args(['-t'])
    assert args.test
Пример #18
0
def predictions_dirichlet(
    model_dir="../models/distilled_model_cifar10_dirichlet",
    file_dir="../../dataloaders/data/distilled_model_predictions_dirichlet.h5"
):
    """Make and save predictions on train and test data with distilled model at model_dir"""

    args = utils.parse_args()

    train_data = cifar10_ensemble_pred.Cifar10Data()
    test_data = cifar10_ensemble_pred.Cifar10Data(train=False)

    ensemble = ensemble_wrapper.EnsembleWrapper(output_size=10)

    distilled_model = cifar_resnet_dirichlet.CifarResnetDirichlet(
        ensemble, resnet_utils.BasicBlock, [3, 2, 2, 2], learning_rate=args.lr)

    distilled_model.load_state_dict(
        torch.load(model_dir, map_location=torch.device('cpu')))
    distilled_model.eval_mode()

    data_list = [test_data, train_data]
    labels = ["test", "train"]

    hf = h5py.File(file_dir, 'w')

    for data_set, label in zip(data_list, labels):

        data, pred_samples, alpha, teacher_predictions, targets = \
            [], [], [], [], []

        data_loader = torch.utils.data.DataLoader(data_set.set,
                                                  batch_size=32,
                                                  shuffle=False,
                                                  num_workers=0)

        for batch in data_loader:
            inputs, labels = batch
            img = inputs[0].to(distilled_model.device)
            data.append(img.data.numpy())
            targets.append(labels.data.numpy())
            teacher_predictions.append(inputs[1].data.numpy())

            a, probs = distilled_model.predict(img, return_params=True)
            alpha.append(a.data.numpy())
            pred_samples.append(probs.data.numpy())

        data = np.concatenate(data, axis=0)
        pred_samples = np.concatenate(pred_samples, axis=0)
        teacher_predictions = np.concatenate(teacher_predictions, axis=0)
        targets = np.concatenate(targets, axis=0)
        alpha = np.concatenate(alpha, axis=0)

        preds = np.argmax(np.mean(pred_samples, axis=1), axis=-1)

        # Check accuracy
        acc = np.mean(preds == targets)
        LOGGER.info("Accuracy on {} data set is: {}".format(label, acc))

        # Check accuracy relative teacher
        teacher_preds = np.argmax(np.mean(teacher_predictions, axis=1),
                                  axis=-1)
        rel_acc = np.mean(preds == teacher_preds)
        LOGGER.info("Accuracy on {} data set relative teacher is: {}".format(
            label, rel_acc))

        grp = hf.create_group(label)
        grp.create_dataset("data", data=data)
        grp.create_dataset("predictions", data=pred_samples)
        grp.create_dataset("teacher-predictions", data=teacher_predictions)
        grp.create_dataset("targets", data=targets)
        grp.create_dataset("alpha", data=alpha)

    return pred_samples
Пример #19
0
def predictions_corrupted_data_dirichlet(
    model_dir="models/distilled_model_cifar10_dirichlet",
    file_dir="../../dataloaders/data/distilled_model_predictions_corrupted_data_dirichlet.h5"
):
    """Make and save predictions on corrupted data with distilled model at model_dir"""

    args = utils.parse_args()

    # Load model
    ensemble = ensemble_wrapper.EnsembleWrapper(output_size=10)

    distilled_model = cifar_resnet_dirichlet.CifarResnetDirichlet(
        ensemble, resnet_utils.BasicBlock, [3, 2, 2, 2], learning_rate=args.lr)

    distilled_model.load_state_dict(
        torch.load(model_dir,
                   map_location=torch.device(distilled_model.device)))

    distilled_model.eval_mode()

    corruption_list = [
        "test", "brightness", "contrast", "defocus_blur", "elastic_transform",
        "fog", "frost", "gaussian_blur", "gaussian_noise", "glass_blur",
        "impulse_noise", "motion_blur", "pixelate", "saturate", "shot_noise",
        "snow", "spatter", "speckle_noise", "zoom_blur"
    ]

    hf = h5py.File(file_dir, 'w')

    for i, corruption in enumerate(corruption_list):
        corr_grp = hf.create_group(corruption)

        if corruption == "test":
            intensity_list = [0]
        else:
            intensity_list = [1, 2, 3, 4, 5]

        for intensity in intensity_list:
            # Load the data
            data_set = cifar10_corrupted.Cifar10DataCorrupted(
                corruption=corruption, intensity=intensity, data_dir="../../")
            dataloader = torch.utils.data.DataLoader(data_set.set,
                                                     batch_size=100,
                                                     shuffle=False,
                                                     num_workers=0)

            # data = []
            predictions, targets, alpha = [], [], []

            for j, batch in enumerate(dataloader):
                inputs, labels = batch
                targets.append(labels.data.numpy())
                # data.append(inputs.data.numpy())

                inputs, labels = inputs.to(distilled_model.device), labels.to(
                    distilled_model.device)

                a, preds = distilled_model.predict(inputs, return_params=True)
                alpha.append(a.to(torch.device("cpu")).data.numpy())
                predictions.append(preds.to(torch.device("cpu")).data.numpy())

            sub_grp = corr_grp.create_group("intensity_" + str(intensity))

            # data = np.concatenate(data, axis=0)
            # sub_grp.create_dataset("data", data=data)

            predictions = np.concatenate(predictions, axis=0)
            sub_grp.create_dataset("predictions", data=predictions)

            targets = np.concatenate(targets, axis=0)
            sub_grp.create_dataset("targets", data=targets)

            preds = np.argmax(np.mean(predictions, axis=1), axis=-1)

            acc = np.mean(preds == targets)
            LOGGER.info(
                "Accuracy on {} data set with intensity {} is {}".format(
                    corruption, intensity, acc))

            alpha = np.concatenate(alpha, axis=0)
            sub_grp.create_dataset("alpha", data=alpha)

    hf.close()
Пример #20
0
def train():
    """Train function."""
    args = parse_args()

    args.outputs_dir = params['save_model_path']

    if args.group_size > 1:
        init()
        context.set_auto_parallel_context(device_num=get_group_size(), parallel_mode=ParallelMode.DATA_PARALLEL,
                                          gradients_mean=True)
        args.outputs_dir = os.path.join(args.outputs_dir, "ckpt_{}/".format(str(get_rank())))
        args.rank = get_rank()
    else:
        args.outputs_dir = os.path.join(args.outputs_dir, "ckpt_0/")
        args.rank = 0

    # with out loss_scale
    if args.group_size > 1:
        args.loss_scale = params['loss_scale'] / 2
        args.lr_steps = list(map(int, params["lr_steps_NP"].split(',')))
    else:
        args.loss_scale = params['loss_scale']
        args.lr_steps = list(map(int, params["lr_steps"].split(',')))

    # create network
    print('start create network')
    criterion = openpose_loss()
    criterion.add_flags_recursive(fp32=True)
    network = OpenPoseNet(vggpath=params['vgg_path'])
    # network.add_flags_recursive(fp32=True)

    if params["load_pretrain"]:
        print("load pretrain model:", params["pretrained_model_path"])
        load_model(network, params["pretrained_model_path"])
    train_net = BuildTrainNetwork(network, criterion)

    # create dataset
    if os.path.exists(args.jsonpath_train) and os.path.exists(args.imgpath_train) \
            and os.path.exists(args.maskpath_train):
        print('start create dataset')
    else:
        print('Error: wrong data path')


    num_worker = 20 if args.group_size > 1 else 48
    de_dataset_train = create_dataset(args.jsonpath_train, args.imgpath_train, args.maskpath_train,
                                      batch_size=params['batch_size'],
                                      rank=args.rank,
                                      group_size=args.group_size,
                                      num_worker=num_worker,
                                      multiprocessing=True,
                                      shuffle=True,
                                      repeat_num=1)
    steps_per_epoch = de_dataset_train.get_dataset_size()
    print("steps_per_epoch: ", steps_per_epoch)

    # lr scheduler
    lr_stage, lr_base, lr_vgg = get_lr(params['lr'] * args.group_size,
                                       params['lr_gamma'],
                                       steps_per_epoch,
                                       params["max_epoch_train"],
                                       args.lr_steps,
                                       args.group_size)
    vgg19_base_params = list(filter(lambda x: 'base.vgg_base' in x.name, train_net.trainable_params()))
    base_params = list(filter(lambda x: 'base.conv' in x.name, train_net.trainable_params()))
    stages_params = list(filter(lambda x: 'base' not in x.name, train_net.trainable_params()))

    group_params = [{'params': vgg19_base_params, 'lr': lr_vgg},
                    {'params': base_params, 'lr': lr_base},
                    {'params': stages_params, 'lr': lr_stage}]

    opt = Adam(group_params, loss_scale=args.loss_scale)

    train_net.set_train(True)
    loss_scale_manager = FixedLossScaleManager(args.loss_scale, drop_overflow_update=False)

    model = Model(train_net, optimizer=opt, loss_scale_manager=loss_scale_manager)

    params['ckpt_interval'] = max(steps_per_epoch, params['ckpt_interval'])
    config_ck = CheckpointConfig(save_checkpoint_steps=params['ckpt_interval'],
                                 keep_checkpoint_max=params["keep_checkpoint_max"])
    ckpoint_cb = ModelCheckpoint(prefix='{}'.format(args.rank), directory=args.outputs_dir, config=config_ck)
    time_cb = TimeMonitor(data_size=de_dataset_train.get_dataset_size())
    callback_list = [MyLossMonitor(), time_cb, ckpoint_cb]
    print("============== Starting Training ==============")
    model.train(params["max_epoch_train"], de_dataset_train, callbacks=callback_list,
                dataset_sink_mode=False)
from src.utils import parse_args, visualize_embeddings
from src.config import ModelType, MODEL_DICT, LOSS_DICT

def get_model(model_args, train_args, input_shape, device):
    model_args = vars(model_args)
    model_args['loss_type'] = LOSS_DICT[model_args['loss_type']]
    if MODEL_DICT[train_args.model] == ModelType.Triplet:
        model = TripletNet(input_shape=input_shape,
                           lr=train_args.lr, 
                           device=device,
                           **model_args)
    return model

if __name__ == "__main__":

    model_args, train_args, data_args = parse_args()

    # data loaders
    train_dataset = MNIST(os.getcwd(), 
                          train=True, 
                          download=True, 
                          transform=Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]))
    train_loader = DataLoader(train_dataset, batch_size=train_args.batch_size, shuffle=True)
    test_dataset = MNIST(os.getcwd(), 
                          train=False, 
                          download=True, 
                          transform=Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]))
    test_loader = DataLoader(test_dataset, batch_size=train_args.batch_size, shuffle=False)

    # Initialize a trainer
    trainer = pl.Trainer(gpus=1, max_epochs=train_args.epochs, progress_bar_refresh_rate=20)
Пример #22
0
"""
Owen Brooks
run.py

This module provides command line connection for the ftp_client module
"""
import json
import sys
import logging

from src.utils import parse_args
from src.ftp_server import Server

log_file, port = parse_args(sys.argv[1:])  # exclude filename

logging.basicConfig(filename=log_file,
                    format="%(asctime)s - %(message)s",
                    level=logging.INFO)
logger = logging.getLogger()

creds = json.load(open('credentials.json', 'r'))

ftp_client = Server(creds, logger)
exit_msg = ftp_client.run(port)
print(exit_msg)