def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("--num_epochs", default=40, type=int)
    parser.add_argument("--top_words", default=35000, type=int)
    parser.add_argument("--max_sequence_length", default=500, type=int)
    parser.add_argument("--batch_size", default=256, type=int)
    parser.add_argument("--polyaxon_env", default=0, type=int)

    arguments = parser.parse_args().__dict__
    num_epochs = arguments.pop("num_epochs")
    top_words = arguments.pop("top_words")
    max_sequence_length = arguments.pop("max_sequence_length")
    batch_size = arguments.pop("batch_size")
    polyaxon_env = arguments.pop("polyaxon_env")

    if polyaxon_env:
        experiment = Experiment()
        data_path = get_data_paths()["data-local"]
    else:
        data_path = "/data"

    np.random.seed(7)
    bbc_data_dir = data_path + "/bbc-topic-classification/bbc_data/"
    glove_embedding_dir = (data_path +
                           "/bbc-topic-classification/glove.6B.300d.txt")

    data = load_dataset(bbc_data_dir)
    glove_embeddings = load_glove_embeddings(glove_embedding_dir)

    preprocessing_pipeline = create_preprocessing_pipeline(
        top_words, max_sequence_length)

    train, test = train_test_split(data, test_size=0.25)
    X_train = preprocessing_pipeline.fit_transform(train.text)
    y_train = train["class"].values

    embedding_matrix = create_embedding_matrix(glove_embeddings,
                                               preprocessing_pipeline)
    model = create_model(embedding_matrix)
    model.fit(X_train,
              y_train,
              epochs=num_epochs,
              batch_size=batch_size,
              shuffle=True)

    model.save("model.h5")
    joblib.dump(preprocessing_pipeline, "preprocessing_pipeline.pkl")

    X_test = preprocessing_pipeline.transform(test.text)
    y_test = test["class"].values
    metrics = model.evaluate(X_test, y_test)

    if polyaxon_env:
        experiment.outputs_store.upload_file("model.h5")
        experiment.outputs_store.upload_file("preprocessing_pipeline.pkl")
        experiment.log_metrics(loss=metrics[0], accuracy=metrics[1])
    else:
        print("loss: {}, accuracy: {}".format(metrics[0], metrics[1]))
Пример #2
0
def main(argv=sys.argv[1:]):

    # Polyaxon experiment
    experiment = Experiment()

    argv.extend(['-f', get_outputs_path()])

    cartpole_client.main(argv)

    experiment.log_metrics(score=cartpole_client.RESULTS[0]['score'])
def train_polyaxon(args):
    # Start polyaxon experiment
    experiment = Experiment()

    # Start training
    cv_roc_auc, test_roc_auc, test_logloss = train(args)

    # Save artifacts
    experiment.outputs_store.upload_file(os.path.join(ARGS.model_dir, "model.pkl"))
    experiment.log_metrics(
        test_roc_auc=test_roc_auc, test_logloss=test_logloss, cv_roc_auc=cv_roc_auc
    )
Пример #4
0
class Params:
    """
    Description
    ----
    This enables the code to use the polyaxon

    """
    # This is to load the params from a file
    input_thread = threading.Thread(target=get_file_inputs, args=(), daemon=True)
    input_thread.start()
    print("Fetching inputs", end=" ... -> ")
    time.sleep(10)
    print("done.")


    temporal_context = 0
    last_interval = None

    # polyaxon params
    experiment = Experiment()

    plx = pp.get_parameters()
    param_utils.set_params(plx)
    param_utils.check_params(plx)

    # output paths
    file_path_mdl = define_prepare_mdl_path(plx)
    logdir_tb = define_prepare_tb_path()
Пример #5
0
def run_experiment(data_path, glove_path):
    # try:
    log_level = get_log_level()
    if not log_level:
        log_level = logging.INFO

    logger.info("Starting experiment")

    experiment = Experiment()
    logging.basicConfig(level=log_level)

    logging.info("Loading data")
    dpl = Datapipeline(data_path=data_path)
    dpl.transform()
    train, val = dpl.split_data()
    logging.info("Data loaded")
    model = twitter_model(glove_path=glove_path)
    model.build_model(train.values)
    model.get_train_data(train.values)
    output_model = model.train()

    filepath = os.path.join(get_outputs_path(), "trump_bot.h5")

    # metrics = model.train(params)
    #
    # experiment.log_metrics(**metrics)
    # save model
    output_model.save(filepath)

    logger.info("Experiment completed")
Пример #6
0
    def __init__(self):
        try:
            from polyaxon_client.tracking import Experiment
        except ImportError:
            raise RuntimeError("This contrib module requires polyaxon-client to be installed. "
                               "Please install it with command: \n pip install polyaxon-client")

        self.experiment = Experiment()
Пример #7
0
    def __init__(self, *args: Any, **kwargs: Any):
        try:
            from polyaxon.tracking import Run

            self.experiment = Run(*args, **kwargs)

        except ImportError:
            try:
                from polyaxon_client.tracking import Experiment

                self.experiment = Experiment(*args, **kwargs)
            except ImportError:
                raise RuntimeError(
                    "This contrib module requires polyaxon to be installed.\n"
                    "For Polyaxon v1.x please install it with command: \n pip install polyaxon\n"
                    "For Polyaxon v0.x please install it with command: \n pip install polyaxon-client"
                )
Пример #8
0
def run_experiment(params):
    try:
        log_level = get_log_level()
        if not log_level:
            log_level = logging.INFO

        logger.info("Starting experiment")

        experiment = Experiment()
        logging.basicConfig(level=log_level)
        
        metrics = model.train(params)
        
        experiment.log_metrics(**metrics)

        logger.info("Experiment completed")
    except Exception as e:
        logger.error(f"Experiment failed: {str(e)}")
Пример #9
0
 def __init__(self,
              learn,
              experiment=None,
              monitor='val_loss',
              mode='auto'):
     super(PolyaxonFastai, self).__init__(learn, monitor=monitor, mode=mode)
     self.experiment = experiment
     if settings.IS_MANAGED:
         self.experiment = self.experiment or Experiment()
Пример #10
0
 def create_experiment(self,
                       name=None,
                       framework=None,
                       tags=None,
                       description=None,
                       config=None):
     experiment = Experiment(project=self.project,
                             group_id=self.group_id,
                             client=self.client,
                             track_logs=self.track_logs,
                             track_code=self.track_code,
                             track_env=self.track_env,
                             outputs_store=self.outputs_store)
     experiment.create(name=name,
                       framework=framework,
                       tags=tags,
                       description=description,
                       config=config,
                       base_outputs_path=self.base_outputs_path)
     return experiment
Пример #11
0
 def __init__(self,
              tensors,
              experiment=None,
              every_n_iter=None,
              every_n_secs=None):
     super(PolyaxonLoggingTensorHook,
           self).__init__(tensors=tensors,
                          every_n_iter=every_n_iter,
                          every_n_secs=every_n_secs)
     self.experiment = experiment
     if settings.IS_MANAGED:
         self.experiment = self.experiment or Experiment()
def main(args):
    """ Runs dataLayer processing scripts to turn raw dataLayer from (../raw) into
        cleaned dataLayer ready to be analyzed (saved in ../processed).
    """
    ## Talk to Rune about how dataLayer is handle.
    config = TrainingConfig()
    config = update_config(args, config)
    ## For polyaxon

    if config.run_polyaxon:
        input_root_path = Path(get_data_paths()['data'])  #'data'
        output_root_path = Path(get_outputs_path())
        inpainting_data_path = input_root_path / 'inpainting'
        os.environ['TORCH_HOME'] = str(input_root_path / 'pytorch_cache')
        config.data_path = inpainting_data_path
        config.output_path = output_root_path
        config.polyaxon_experiment = Experiment()
        pathToData = str(input_root_path /
                         '/workspace/data_landset8/testImages')
    else:
        pathToData = Path(r"C:\Users\Morten From\PycharmProjects\testDAta")

    logger = logging.getLogger(__name__)
    logger.info('making final dataLayer set from raw dataLayer')
    logger.info(pathToData)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    B_size = 1
    beta_test_path_list = glob(str(pathToData) + "/*/")
    ImageDict = get_dataset(beta_test_path_list, batch_size=B_size)
    train = ImageDict['train_dataloader']
    test = ImageDict['test_dataloader']

    genPath = r'C:\Users\Morten From\PycharmProjects\Speciale\Master_Satelite_Image_Inpainting\models\New_400.pth'
    outputPathImages = Path(
        r'C:\Users\Morten From\PycharmProjects\Speciale\Master_Satelite_Image_Inpainting\images'
    )
    testGen = UnetGenerator(3, 3, 8)
    testGen.load_state_dict(torch.load(genPath))
    testGen = testGen.to(device)

    testGen.eval()
    iterater = 0
    for real, SAR in tqdm(train, position=0, leave=True, disable=True):
        batchOfImages = real.to(device)
        batchOfImagesSAR = SAR.to(device)
        outputs = testGen(batchOfImagesSAR)
        modelHelper.save_tensor_batchSAR(
            batchOfImages, batchOfImagesSAR, outputs, B_size,
            Path.joinpath(outputPathImages, 'iter' + str(iterater)))
        iterater = iterater + 1
Пример #13
0
def _plx_log_params(params_dict):
    from polyaxon_client.tracking import Experiment

    plx_exp = Experiment()
    plx_exp.log_params(
        **{"pytorch version": torch.__version__, "ignite version": ignite.__version__,}
    )
    plx_exp.log_params(**params_dict)
class Params:
    """
    Description
    ----
    This enables the code to use winslow. Most of this is copied from Params (for Polyaxon).
    """
    # This is to load the params from a file
    input_thread = threading.Thread(target=get_file_inputs,
                                    args=(),
                                    daemon=True)
    input_thread.start()
    print("Fetching inputs", end=" ... -> ")
    time.sleep(10)
    print("done.")

    temporal_context = 0
    last_interval = None

    # polyaxon params
    experiment = Experiment()

    plx = pp.get_parameters()
    param_utils.set_params(plx)
    param_utils.check_params(plx)

    # if the environment is within winslow
    if 'WINSLOW_PIPELINE_NAME' in os.environ:
        # output paths
        log_dir_mdl = "/workspace/mdl_chkpts/"
        if not os.path.exists(log_dir_mdl):
            os.mkdir(log_dir_mdl)
            print("Directory ", log_dir_mdl, " Created ")
        else:
            print("Directory ", log_dir_mdl, " already exists")
        file_path_mdl = "/workspace/mdl_chkpts/" + plx.get(
            'mdl_architecture') + '_' + plx.get('eng_kind') + ".hdf5"

        logdir_tb = "/workspace/tf_logs/scalars" + datetime.now().strftime(
            "%Y%m%d-%H%M%S")

        file_path_raw_mdl = "/workspace/mdl_chkpts/" + plx.get(
            'mdl_architecture') + '_' + 'untrained' + ".hdf5"

    else:
        # output paths
        file_path_mdl, file_path_raw_mdl = define_prepare_mdl_path(plx)
        logdir_tb = define_prepare_tb_path()
Пример #15
0
    def _run(self, config):

        logger = config.get_logger('train')

        # setup data_loader instances
        data_loader = config.init_obj('data_loader', module_data)
        valid_data_loader = data_loader.split_validation()

        # build model architecture, then print to console
        model = config.init_obj('arch', module_arch)
        logger.info(model)

        # get function handles of loss and metrics
        criterion = getattr(module_loss, config['loss'])
        metrics = [getattr(module_metric, met) for met in config['metrics']]

        # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
        trainable_params = filter(lambda p: p.requires_grad,
                                  model.parameters())
        optimizer = config.init_obj('optimizer', torch.optim, trainable_params)

        lr_scheduler = config.init_obj('lr_scheduler',
                                       torch.optim.lr_scheduler, optimizer)

        experiment = Experiment()
        experiment.set_name(config['name'])

        description = [
            config['trainer']['epochs'], config['arch']['type'],
            config['optimizer']['type'], config['optimizer']['args']['lr'],
            config['loss']
        ]

        description = "Epochs {0}: Arch: {1} Optimizer: {2} lr: {3} Loss: {4}".format(
            *description)
        experiment.set_description(description)

        if 'type' in config['trainer'].keys():
            trainer_name = config['trainer']['type']
        else:
            trainer_name = "Trainer"

        trainer = getattr(trainers_module, trainer_name)
        trainer = trainer(model,
                          criterion,
                          metrics,
                          optimizer,
                          config=config,
                          data_loader=data_loader,
                          valid_data_loader=valid_data_loader,
                          lr_scheduler=lr_scheduler,
                          experiment=experiment)
        trainer.train()
Пример #16
0
def run_experiment(data_path, model_name, params):
    try:
        log_level = get_log_level()
        if not log_level:
            log_level = logging.INFO

        logger.info("Starting experiment")

        experiment = Experiment()
        logging.basicConfig(level=log_level)

        # initiate model class
        model = Model(model_name)
        logger.info(f'{model_name} ok')

        # get data
        refs = model.get_data(data_path, **params)
        logger.info('data ok')

        # train model
        model.model.train()
        logger.info('model trained')

        # get pred
        preds = refs.apply(lambda x: model.model.predict(x))
        logger.info('preds ok')

        # eval
        precision, recall, f1 = model.model.eval(preds, refs)
        logger.info('eval ok')

        print(f'Precision: {precision}')
        print(f'Recall: {recall}')
        print(f'F1: {f1}')

        experiment.log_metrics(precision=precision)
        experiment.log_metrics(recall=recall)
        experiment.log_metrics(f1=f1)

        logger.info("Experiment completed")
    except Exception as e:
        logger.error(f"Experiment failed: {str(e)}")
Пример #17
0
def run(config, logger=None, local_rank=0, **kwargs):

    assert torch.cuda.is_available(), torch.cuda.is_available()
    assert (torch.backends.cudnn.enabled
            ), "Nvidia/Amp requires cudnn backend to be enabled."

    dist.init_process_group("nccl", init_method="env://")

    # As we passed config with option --manual_config_load
    assert hasattr(config, "setup"), (
        "We need to manually setup the configuration, please set --manual_config_load "
        "to py_config_runner")

    config = config.setup()

    assert_config(config, TRAINVAL_CONFIG)
    # The following attributes are automatically added by py_config_runner
    assert hasattr(config, "config_filepath") and isinstance(
        config.config_filepath, Path)
    assert hasattr(config, "script_filepath") and isinstance(
        config.script_filepath, Path)

    config.output_path = Path(get_outputs_path())

    if dist.get_rank() == 0:
        plx_exp = Experiment()
        plx_exp.log_params(
            **{
                "pytorch version": torch.__version__,
                "ignite version": ignite.__version__,
            })
        plx_exp.log_params(**get_params(config, TRAINVAL_CONFIG))

    try:
        training(
            config,
            local_rank=local_rank,
            with_mlflow_logging=False,
            with_plx_logging=True,
        )
    except KeyboardInterrupt:
        logger.info("Catched KeyboardInterrupt -> exit")
    except Exception as e:  # noqa
        logger.exception("")
        dist.destroy_process_group()
        raise e

    dist.destroy_process_group()
Пример #18
0
        default=-3
    )
    parser.add_argument(
        '--batch_size',
        type=int,
        default=100
    )
    parser.add_argument(
        '--epochs',
        type=int,
        default=1
    )
    args = parser.parse_args()

    # Polyaxon
    experiment = Experiment('mnist')
    experiment.create(framework='tensorflow', tags=['examples'])
    experiment.log_params(
        conv1_size=args.conv1_size,
        conv1_out=args.conv1_out,
        conv1_activation=args.conv1_activation,
        pool1_size=args.pool1_size,
        conv2_size=args.conv2_size,
        conv2_out=args.conv2_out,
        conv2_activation=args.conv2_activation,
        pool2_size=args.pool2_size,
        fc1_activation=args.fc1_activation,
        fc1_size=args.fc1_size,
        optimizer=args.optimizer,
        log_learning_rate=args.log_learning_rate,
        batch_size=args.batch_size,
Пример #19
0
def _plx_log_artifact(fp):
    from polyaxon_client.tracking import Experiment

    plx_exp = Experiment()
    plx_exp.log_artifact(fp)
Пример #20
0
# Polyaxon
from polyaxon_client.tracking import Experiment

from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_score
from sklearn import datasets

from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn import metrics
import os

import pandas as pd

# Polyaxon
experiment = Experiment()

dataset = datasets.load_boston()
# x 训练特征:['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS',
#'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']
x = dataset.data

target = dataset.target
#把label变为(?, 1)维度,为了使用下面的数据集合分割
y = np.reshape(target, (len(target), 1))

#讲数据集1:3比例分割为 测试集:训练集
x_train, x_verify, y_train, y_verify = train_test_split(x, y, random_state=1)
'''
x_train的shape:(379, 13)
y_train的shape:(379, 1)
Пример #21
0
def main():
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for training (default: 1000)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=15, metavar='N',
                        help='number of epochs to train (default: 9)')
    parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=42, metavar='S',
                        help='random seed (default: 42)')
    args = parser.parse_args()

    experiment = Experiment()
    logger = logging.getLogger('main')
    logger.setLevel(get_log_level())

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    logger.info('%s', device)

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('.', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.batch_size, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('.', train=False, transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.test_batch_size, shuffle=True, **kwargs)

    model = Net().to(device)
    model_path = os.path.join(get_outputs_path(), 'model.p')
    state_path = os.path.join(get_outputs_path(), 'state.json')

    start = 1

    if os.path.isfile(model_path):
        model.load_state_dict(torch.load(model_path))
        logger.info('%s', 'Model Loaded')
    if os.path.isfile(state_path):
        with open(state_path, 'r') as f:
            data = json.load(f)
            start = data['epoch']
        logger.info('%s', 'State Loaded')

    optimizer = optim.SGD(model.parameters(), lr=args.lr)

    with SummaryWriter(log_dir=get_outputs_path()) as writer:
        for epoch in range(start, args.epochs + 1):
            train(epoch, writer, experiment, args, model, device, train_loader, optimizer)
            test(epoch, writer, experiment, args, model, device, test_loader)
            torch.save(model.state_dict(), model_path)
            with open(state_path, 'w') as f:
                data = {
                    'epoch' : epoch
                }
                json.dump(data, f)
Пример #22
0
y = iris.target

h = .02  # step size in the mesh

# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])

for weights in ['uniform', 'distance']:
    # we create an instance of Neighbours Classifier and fit the data.
    clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
    clf.fit(X, y)
    y_model = clf.predict(X)
    model_accuracy = accuracy_score(y, y_model)

    experiment = Experiment()
    experiment.create()
    experiment.log_metrics(model_accuracy=model_accuracy)
    experiment.log_params(weights=weights, n_neighbors=n_neighbors)

    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, x_max]x[y_min, y_max].
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.figure()
def main(config):
    logging.basicConfig(level=logging.INFO)
    logging.info("STARTING PROGRAM")

    if config.TRAIN.POLYAXON:
        from polyaxon_client.tracking import Experiment, get_data_paths, get_outputs_path
        data_dir = get_data_paths()
        config.DATASET.OUTPUT_PATH = get_outputs_path()
        config.DATASET.PATH = os.path.join(data_dir['data1'],
                                           config.DATASET.PATH_NAS)
        model_path = os.path.join(data_dir['data1'],
                                  config.MODEL.PRETRAINED_NAS)

        logger = logging.getLogger()
        logger.setLevel(logging.INFO)
        logger.addHandler(
            logging.FileHandler(
                os.path.join(config.DATASET.OUTPUT_PATH,
                             'Heatmaps_from_human_joints.log')))

        # Polyaxon
        experiment = Experiment()

    else:
        logger = logging.getLogger()
        logger.setLevel(logging.INFO)
        logger.addHandler(
            logging.FileHandler(
                os.path.join(config.DATASET.OUTPUT_PATH,
                             'Heatmaps_Resnet101.log')))
        model_path = config.MODEL.PRETRAINED

    trainloader, valloader = utils.load_split_train_val(
        config.DATASET.PATH, "train", "validation", config)

    print('batch size', config.TRAIN.BATCH_SIZE)
    print('dataset', config.DATASET.PATH_NAS)
    print("weights", config.TRAIN.UPDATE_WEIGHTS)
    print("Model: ", model_path)
    print("LR: ", config.TRAIN.LR)
    model = utils.model_pose_resnet.get_pose_net(model_path, is_train=True)

    model.eval()

    for name, parameter in model.named_parameters():
        parameter.requires_grad = config.TRAIN.UPDATE_WEIGHTS
        if "deconv" in name or "final" in name:
            parameter.requires_grad = True

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    optimizer = optim.Adam(model.parameters(), lr=config.TRAIN.LR)
    model.to(device)

    # Decay LR by a factor of 0.1 every 3 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.01)

    writer = SummaryWriter(config.DATASET.OUTPUT_PATH)
    best_acc = 0

    for epoch in range(config.TRAIN.END_EPOCH):
        criterion = nn.MSELoss()
        logger.info('Epoch {}/{}'.format(epoch, config.TRAIN.END_EPOCH - 1))
        logger.info('-' * 10)
        acc = utils.AverageMeter()
        batch_loss = utils.AverageMeter()

        for i, (inputs, labels) in enumerate(trainloader):

            inputs, labels = inputs.to(device), labels.to(device)

            # print(summary(model, tuple(inputs.size())[1:]))
            logps = model.forward(inputs)

            criterion = nn.MSELoss()
            loss = criterion(logps, labels.float())
            batch_loss.update(loss.item(), inputs.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            _, avg_acc, cnt, pred, target, dists = utils.accuracy(
                logps.detach().cpu().numpy(),
                labels.detach().cpu().numpy(),
                thr=config.TRAIN.THRESHOLD)
            print("Current batch accuracy: ", avg_acc)
            acc.update(avg_acc, cnt)
            print("Batch {} train accurcy: {}, loss: {}".format(
                i, acc.avg, batch_loss.avg))
        writer.add_scalar('Loss/train', float(batch_loss.avg), epoch)

        val_acc = run_val(model, valloader, device, criterion, writer, epoch,
                          config)

        logger.info(
            'Train Loss: {:.4f} Train Acc: {:.4f} Val Acc: {:.4f}'.format(
                batch_loss.avg, acc.avg, val_acc))

        if val_acc > best_acc:
            best_acc = val_acc
            logging.info("best val at epoch: " + str(epoch))
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': batch_loss.avg,
                }, os.path.join(config.DATASET.OUTPUT_PATH, "best_model.pt"))

        if epoch % 250 == 0:
            torch.save(
                {
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': batch_loss.avg,
                },
                os.path.join(config.DATASET.OUTPUT_PATH,
                             "model" + str(epoch) + ".pt"))

    logger.info('Best val Acc: {:4f}'.format(best_acc))
Пример #24
0
    parser.add_argument('--skip_top',
                        type=int,
                        default=30,
                        help='Top occurring words to skip')
    parser.add_argument('--maxlen', type=int, default=100)
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--num_nodes', type=int, default=8)
    parser.add_argument('--optimizer', type=str, default='adam')
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--dropout', type=float, default=0.8)
    parser.add_argument('--epochs', type=int, default=1)
    parser.add_argument('--seed', type=int, default=234)
    args = parser.parse_args()

    # Polyaxon
    experiment = Experiment('bidirectional-lstm')
    experiment.create(framework='keras', tags=['examples'])
    experiment.log_params(max_features=args.max_features,
                          skip_top=args.skip_top,
                          maxlen=args.maxlen,
                          batch_size=args.batch_size,
                          num_nodes=args.num_nodes,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate,
                          dropout=args.dropout,
                          epochs=args.epochs,
                          seed=args.seed)

    logger.info('Loading data...')
    (x_train, y_train), (x_test,
                         y_test) = imdb.load_data(num_words=args.max_features,
Пример #25
0
def testClassifier(classifier, scaled_test_x, test_y, test_ids):
    test_y_pred = classifier.predict_classes(scaled_test_x)
    prediction = dict(zip(test_ids, test_y_pred.flatten()))
    reality = dict(zip(test_ids, test_y))
    return prediction, reality


def testClassifier(classifier, scaled_test_x, test_y, test_ids):
    test_y_pred = classifier.predict_classes(scaled_test_x)
    prediction = dict(zip(test_ids, test_y_pred.flatten()))
    reality = dict(zip(test_ids, test_y))
    return prediction, reality


# Run dat naow
experiment = Experiment()

# 0. Read Args
if __name__ == '__main__':
    parser = argparse.ArgumentParser()

    parser.add_argument('--cluster', default='no cluster given', type=str)

    parser.add_argument('--batch_size', default=128, type=int)

    parser.add_argument('--learning_rate', default=0.02, type=float)

    parser.add_argument('--dropout', default=0.2, type=float)

    parser.add_argument('--num_epochs', default=10, type=int)
Пример #26
0
                        type=int,
                        default=2000,
                        help='The maximum number of features.')
    parser.add_argument('--max_df',
                        type=float,
                        default=1.0,
                        help='the maximum document frequency.')
    parser.add_argument(
        '--C',
        type=float,
        default=1.0,
        help='Inverse of regularization strength of LogisticRegression')
    args = parser.parse_args()

    # Polyaxon
    experiment = Experiment(project='newsgroup')
    experiment.create()
    experiment.log_params(ngram_range=(args.ngram, args.ngram),
                          max_features=args.max_features,
                          max_df=args.max_df,
                          C=args.C)

    # Train and eval the model with given parameters.
    # Polyaxon
    metrics = train_and_eval(ngram_range=(args.ngram, args.ngram),
                             max_features=args.max_features,
                             max_df=args.max_df,
                             C=args.C)

    # Logging metrics
    print("Testing metrics: {}", metrics)
Пример #27
0
    # ==================================================================================================#
    # PATHS SETUP                                                                                       #
    # ==================================================================================================#
    run_name = args.run_name

    if cluster:
        data_paths = get_data_paths()
        patient_path = data_paths[
            'data1'] + "/HHase_Robotic_RL/NAS_Sacrum_Scans/Patient_files/"
        patient_data_path = data_paths[
            'data1'] + "/HHase_Robotic_RL/NAS_Sacrum_Scans/"
        load_model_path = data_paths[
            'data1'] + "/HHase_Robotic_RL/Models/model_best.pth"
        output_path = get_outputs_path()
        tensorboard_path = get_outputs_path()
        experiment = Experiment()
    else:
        patient_path = "./../Data/Patient_files/"
        patient_data_path = "./../Data/"
        output_path = './'
        tensorboard_path = './runs/'
        load_model_path = "./../Data/pretrained_model/model_best.pth"
        model_save_path = output_path + "/models/{}.pt".format(run_name)

    #load_model_path = output_path + "/models/{}.pt".format(run_name)
    datetime = datetime.now()
    tensorboard_name = 'Nov' + datetime.strftime(
        "%d_%H-%M-%S") + '_Rachet-' + run_name

    # ==================================================================================================#
    # PARAMETER INITIALIZATION                                                                          #
Пример #28
0
    Fr = FR / Z
    
    P = Ca / (Ca + Fa)
    R = Ca / (Ca + Fr)
    SA = Ca + Cr
    F = (2 * P * R)/( P + R)
    
    RCa = Ca / (Fr + Ca)
    RFa = Fa / (Cr + Fa)
    
    D = IncorrectRejectionRate / CorrectRejectionRate
    Da = RCa / RFa
    Df = math.sqrt((Da*D))
    return Df

experiment = Experiment()

# 0. Read Args
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    
    parser.add_argument(
        '--batch_size',
        default=128,
        type=int)

    parser.add_argument(
        '--learning_rate',
        default=0.001,
        type=float)
    
Пример #29
0
 def __init__(self):
     self.experiment = Experiment()
Пример #30
0
def polyaxon_checkpoint_fn(lightning_module):
    from polyaxon_client.tracking import Experiment
    exp = Experiment()
    exp.outputs_store.upload_dir(lightning_module.config.save_path)
    exp.outputs_store.upload_dir('lightning_logs')