Ejemplo n.º 1
0
def main() -> None:
    args = get_args()
    config = get_bunch_config_from_json(args.config)

    comet_experiment = Experiment(
        api_key=config.comet_api_key,
        project_name=config.comet_project_name,
        workspace=config.comet_workspace,
        disabled=not config.use_comet_experiments,
    )
    comet_experiment.set_name(config.experiment_name)
    comet_experiment.log_parameters(config)

    test_tweets = load_test_tweets(config.test_data_path)

    client = LanguageServiceClient()
    result = []
    predictions = np.zeros(len(test_tweets), dtype=np.int32)

    for i, tweet in enumerate(test_tweets):
        start_iter_timestamp = time.time()
        document = types.Document(
            type=enums.Document.Type.PLAIN_TEXT, content=tweet, language="en"
        )

        response = client.analyze_sentiment(document=document)
        response_dict = MessageToDict(response)
        result.append(response_dict)

        prediction_present = bool(response_dict["documentSentiment"])
        if prediction_present:
            # -1, 1 predictions
            predictions[i] = 2 * (response.document_sentiment.score > 0) - 1

        print("iteration", i, "took:", time.time() - start_iter_timestamp, "seconds")

    comet_experiment.log_asset_data(result, name="google_nlp_api_response.json")

    ids = np.arange(1, len(test_tweets) + 1).astype(np.int32)
    predictions_table = np.column_stack((ids, predictions))

    if comet_experiment.disabled:
        save_path = build_save_path(config)
        os.makedirs(save_path)

        formatted_predictions_table = pd.DataFrame(
            predictions_table, columns=["Id", "Prediction"], dtype=np.int32,
        )
        formatted_predictions_table.to_csv(
            os.path.join(save_path, "google_nlp_api_predictions.csv"), index=False
        )
    else:
        comet_experiment.log_table(
            filename="google_nlp_api_predictions.csv",
            tabular_data=predictions_table,
            headers=["Id", "Prediction"],
        )

    percentage_predicted = np.sum(predictions != 0) / predictions.shape[0]
    comet_experiment.log_metric(name="percentage predicted", value=percentage_predicted)
Ejemplo n.º 2
0
class CometTracker:
    def __init__(self, comet_params, run_params=None, prev_exp_id=None):
        if prev_exp_id:  # previous experiment
            api_key = comet_params['api_key']
            del comet_params[
                'api_key']  # removing this because the rest of the items need to be passed
            self.experiment = ExistingExperiment(
                api_key=api_key,
                previous_experiment=prev_exp_id,
                **comet_params)
            print(
                f'In CometTracker: ExistingExperiment initialized with id: {prev_exp_id}'
            )

        else:  # new experiment
            self.experiment = Experiment(**comet_params)
            self.experiment.log_parameters(run_params)

    def track_metric(self, metric, value, step):
        self.experiment.log_metric(metric, value, step)

    def add_tags(self, tags):
        self.experiment.add_tags(tags)
        print(f'In [add_tags]: Added these tags to the new experiment: {tags}')

    def set_name(self, name):
        self.experiment.set_name(name)
Ejemplo n.º 3
0
def train_autofeature_model(data_path,
                            embedding_dimension,
                            batch_size):
    training_data = load_data(data_path)
    X = training_data.drop(labels=['SK_ID_CURR'], axis=1)

    experiment = Experiment(
        api_key=API_KEY, project_name="home-credit")
    experiment.set_name(
        'home-credit-autofeature-selection')

    model = build_model(X.shape[1], int(embedding_dimension))
    model.summary()
    model.compile(optimizer='adam', loss='mean_squared_logarithmic_error')

    model.fit(X, X,
              epochs=5,
              batch_size=int(batch_size))

    experiment.log_multiple_params(
        {"embedding_dimension": embedding_dimension,
         "batch_size": batch_size})

    model.save(
        'home-credit-encoder-{}-{}.hdf5'.format(
            embedding_dimension, batch_size))
Ejemplo n.º 4
0
def get_experiment_objects():
    args = CLIParser().parse_args()
    comet = Experiment(api_key="<your_key>",
                       project_name="<your_project>",
                       workspace="<your_workspace>",
                       log_code=False,
                       auto_param_logging=False,
                       auto_metric_logging=False,
                       disabled=args.no_comet,
                       display_summary=False)

    if not args.continue_train:
        # else args.save is the directory from which assets are loaded to
        # continue training
        args.save = "{}-{}".format(args.save, time.strftime("%Y%m%d-%H%M%S"))
        create_exp_dir(args)

    comet.set_name(args.save.split('/')[-1])
    comet.log_parameters(vars(args))
    copy_assets(args, comet)

    last_state = None
    if args.continue_train:
        last_state = torch.load(os.path.join(args.save, 'state.pt'))
        continue_random(last_state, args.cuda)
    else:
        init_random(args.seed, args.cuda)

    return args, comet, last_state
Ejemplo n.º 5
0
def train(path):
    name = os.path.splitext(os.path.basename(path))[0]
    print('Processing: ', name)
    features = pd.read_csv(path, index_col=None)
    selected_features_names = [name for name, desc in selected_features]
    features = features[selected_features_names]
    split_idx = 1200
    features = features.drop(['sound.files'], axis=1)
    noise_only_df, df = features.iloc[:split_idx], features.iloc[split_idx:]
    y = df.pop('petrel')
    X = df.values
    y_noise = noise_only_df.pop('petrel')
    X_noise = noise_only_df.values
    X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, random_state=42, stratify=y)
    hyperparams = {
        'n_estimators': [100, 300, 500, 1000],
        'learning_rate': [0.1],
        'gamma': [0.0, 0.5],
        'max_depth': [2, 3, 4],
        'min_child_weight': [1, 2],
        'subsample': [1.0, 0.8],
        'reg_alpha': [0.0, 0.1],
        'reg_lambda': [1, 2, 3]
    }
    #
    # hyperparams = {
    #     'n_estimators': [100],
    #     'learning_rate': [0.1],
    #     'gamma': [0.0],
    #     'max_depth': [2],
    #     'min_child_weight': [1],
    #     'subsample': [1.0],
    #     'reg_alpha': [0.0],
    #     'reg_lambda': [1]
    # }

    clf = model_selection.GridSearchCV(estimator=xg.XGBClassifier(objective='binary:logistic', n_jobs=-1),
                                       param_grid=hyperparams,
                                       cv=4)
    fit_params = clf.fit(X_train, y_train)
    estimator = fit_params.best_estimator_
    joblib.dump(estimator, name + '_model.pkl')

    test_pred = estimator.predict(X_test)
    metrics = calculate_metrics(test_pred, y_test)

    noise_pred = estimator.predict(X_noise)
    noise_detection_accuracy = accuracy_score(y_noise, noise_pred)

    experiment = Experiment(api_key="4PdGdUZmGf6P8QsMa5F2zB4Ui",
                            project_name="storm petrels",
                            workspace="tracewsl")
    experiment.set_name(name)
    experiment.log_parameter('name', name)
    experiment.log_multiple_params(fit_params.best_params_)
    experiment.log_multiple_metrics(metrics)
    experiment.log_metric('Noise detection accuracy', noise_detection_accuracy)
    experiment.log_figure('Confusion matrix', get_confusion_matrix_figure(test_pred, y_test))
    experiment.log_figure('Feature importnace', get_feature_importance_figure(estimator, list(df.columns.values)))
Ejemplo n.º 6
0
def main():
    precision = torch.float

    # Parameter and Object declarations
    env_params = {
        "data path": "C:/Users/aaa2cn/Documents/nao_data/",
        "ip": "localhost",
        "port": 52232,
        "score type": "score"  # Aggregate error in pose
    }
    env = env_factory.make_env("nao", "pose assumption", env_params)

    # Make a pool object
    model_params = {
        "precision": precision,
        "weight initialization scheme": "Sparse",
        "grad": False
    }
    model = model_factory.make_model("NAO FC model", model_params)

    # Make an algorithm object
    alg_params = {
        "target": env.target,
        "minimization mode": env.minimize,
        "minimum entropy": 0.1,
        "tolerance": 0.1,
        "max steps": 64,
        "memory size": 10
    }
    alg = algorithm_factory.make_alg("local search", model, alg_params)

    experiment = Experiment(api_key="5xNPTUDWzZVquzn8R9oEFkUaa",
                            project_name="nao",
                            workspace="aromorin")
    experiment.set_name("Pose Assumption virtual")
    hyper_params = {
        "Algorithm": "LS",
        "Parameterization": 35000,
        "Decay Factor": 0.01,
        "Directions": 10,
        "Search Radius": 0.1
    }
    experiment.log_parameters(hyper_params)

    slv_params = {"environment": env, "algorithm": alg, "logger": experiment}
    slv = solver_factory.make_slv("robot", slv_params)
    slv.solve(iterations=5000)

    slv.save_elite_weights(path='', name='pose_assump_virtual')

    # Recreate the target pose
    alg.eval()
    pred = alg.model(env.observation)
    angles = [p.item() for p in pred]
    print("These are the angles: ")
    print(angles)
    env.set_joints(angles)
    env.say("Is this the pose you set for me?")
    env.rest()
Ejemplo n.º 7
0
def experiment_flag(parser_com_dis, parser_name) :
    if not bool(parser_com_dis):
        experiment = Experiment(api_key="PUT YOUR COMET API KEY",
                                project_name="spoof19", workspace="WORKSPACE",
                                disabled=bool(parser_com_dis))
        experiment.set_name(parser_name)
        return experiment
    return []
def generate_categories():
    # capture the config path from the run arguments then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except ValueError:
        print("Missing or invalid arguments")
        exit(0)

    print("Logging experiment name: {name}".format(
        name=config.experiment.experiment_name))
    experiment = Experiment(api_key=config.experiment.api_key,
                            project_name=config.experiment.project_name,
                            workspace=config.experiment.workspace)
    experiment.set_name(config.experiment.experiment_name)

    print('Creating the data loader...')
    data_loader = DataLoader(config.defects_summarizer.paths)
    train_data, test_data = data_loader.get_data()

    print('Creating the Preprocessor...')
    preprocessor = CorexPreprocessor(train_data, config)
    preprocessor.prepare_data()

    print('Loading and evaluating the Model...')
    model = CorexModel(config.defects_summarizer, preprocessor, seed=False)
    trainer = CorexTrainer(model, preprocessor.get_data())
    trainer.train()
    trainer.generate_topics()
    top_docs_df = trainer.get_top_documents(
        config.defects_summarizer.evaluate.extract_topics,
        preprocessor.get_raw_corpus(),
        config.defects_summarizer.evaluate.extraction_quantile,
        labels=True)
    top_docs_df.to_csv(config.defects_summarizer.paths.save_data_path)

    print('Saving the trained topic model...')
    model.save()

    print('Preprocessing the summarizer...')
    summary_preprocessor = TextRankPreprocessor(
        top_docs_df, n_docs=config.defects_summarizer.evaluate.n_docs)
    summary_preprocessor.prepare_data()

    print('Loading and evaluating the summarizer...')
    summary_model = TextRankModel(config)
    summary_trainer = TextRankTrainer(summary_model, summary_preprocessor)
    avg_prec, avg_recall, avg_f1 = summary_trainer.train_and_evaluate(
        test_data)

    # Log the rest of the experiment
    metrics = {"precision": avg_prec, "recall": avg_recall, "f1": avg_f1}
    experiment.log_metrics(metrics)

    experiment.log_model(
        name=config.experiment.model_name,
        file_or_folder=config.labels_generator.paths.save_model_path)
Ejemplo n.º 9
0
def main() -> None:
    args = get_args()
    config = get_bunch_config_from_json(args.config)

    comet_experiment = Experiment(
        api_key=config.comet_api_key,
        project_name=config.comet_project_name,
        workspace=config.comet_workspace,
        disabled=not config.use_comet_experiments,
    )
    comet_experiment.set_name(config.experiment_name)
    comet_experiment.log_parameters(config)

    if config.model == "randomforest":
        classifier = GloveEmbeddingsClassifier(
            RandomForestClassifier(random_state=config.random_seed))
    elif config.model == "logregression":
        classifier = GloveEmbeddingsClassifier(
            LogisticRegression(solver="saga", random_state=config.random_seed))
    elif config.model == "decisiontree":
        classifier = GloveEmbeddingsClassifier(
            DecisionTreeClassifier(random_state=config.random_seed))
    else:
        raise ValueError("chosen model not available")

    training_features, training_labels = classifier.generate_training_data(
        config)
    best_model, best_model_score, best_model_params = classifier.run_grid_search(
        config.random_seed, config.model_parameters, training_features,
        training_labels)

    comet_experiment.log_metric("mean accuracy", best_model_score)
    comet_experiment.log_parameters(best_model_params)

    test_data_features = classifier.generate_test_data_features(config)
    ids = np.arange(1, test_data_features.shape[0] + 1)
    predictions = best_model.predict(test_data_features)
    predictions_table = np.stack([ids, predictions], axis=-1).astype(int)

    if comet_experiment.disabled:
        save_path = build_save_path(config)
        os.makedirs(save_path)

        formatted_predictions_table = pd.DataFrame(
            predictions_table,
            columns=["Id", "Prediction"],
            dtype=np.int32,
        )
        formatted_predictions_table.to_csv(os.path.join(
            save_path, "test_predictions.csv"),
                                           index=False)
    else:
        comet_experiment.log_table(
            filename="test_predictions.csv",
            tabular_data=predictions_table,
            headers=["Id", "Prediction"],
        )
Ejemplo n.º 10
0
def main(args):
    tensorboard_writer = None
    comet_experiment = None

    if (not args.no_log):
        # Create necessary directories
        if (not os.path.isdir(args.log_dir)):
            os.mkdir(args.log_dir)

        # Create log_dir for run
        run_log_dir = os.path.join(args.log_dir, args.exp_name)
        if (os.path.isdir(run_log_dir)):
            cur_count = len(glob.glob(run_log_dir + "_*"))
            run_log_dir = run_log_dir + "_" + str(cur_count)
        os.mkdir(run_log_dir)

        # Create tensorboard writer if requested

        if (args.tensorboard):
            tensorboard_dir = os.path.join(run_log_dir, "tensorboard")
            writer = SummaryWriter(log_dir=tensorboard_dir)

    # Create comet experiment if requested
    if (args.comet_config is not None):
        with open(args.comet_config, 'r') as f:
            comet_dict = json.load(f)
            comet_experiment = Experiment(
                api_key=comet_dict["api_key"],
                project_name=comet_dict["project_name"],
                workspace=comet_dict["workspace"],
            )
            comet_experiment.set_name(args.exp_name)

            # Get hash for latest git commit for logging
            last_commit_hash = subprocess.check_output(
                ['git', 'rev-parse', 'HEAD']).decode("utf-8").rstrip()
            comet_experiment.log_parameter("git_commit_id", last_commit_hash)

    # Instantiate dataset
    dynamics_data = Maze2DDataset()

    dataloader = DataLoader(dynamics_data, batch_size=128, shuffle=True)

    agent = Morel(4,
                  2,
                  tensorboard_writer=tensorboard_writer,
                  comet_experiment=comet_experiment)

    agent.train(dataloader, dynamics_data)

    if (not args.no_log):
        agent.save(os.path.join(run_log_dir, "models"))
        if comet_experiment is not None:
            upload_assets(comet_experiment, run_log_dir)

    agent.eval(dynamics_data.env)
Ejemplo n.º 11
0
def log_file_to_comet_output(project_name, exp_name, log_file_path,
                             refresh_rate):
    experiment = Experiment(api_key=os.environ["COMET_API_KEY"],
                            project_name=project_name)
    experiment.set_name(exp_name)

    with open(log_file_path, "r") as f:
        while True:
            print(f.read(), end='')
            time.sleep(refresh_rate)
Ejemplo n.º 12
0
def setup_comet_ml_experiment(api_key, project_name, experiment_name,
                              parameters, tags):
    """ Function for setting up comet ml experiment """
    experiment = Experiment(api_key=api_key,
                            project_name=project_name,
                            auto_metric_logging=False)
    experiment.set_name(experiment_name)
    experiment.log_parameters(parameters)
    experiment.add_tags(tags)
    return experiment
Ejemplo n.º 13
0
def setup_comet_ml(dpmodel):
    if dpmodel.config["train"]["comet_ml"]["track"]:
        experiment = Experiment(
            api_key=dpmodel.config["train"]["comet_ml"]["api_key"],
            project_name=dpmodel.config["train"]["comet_ml"]["project_name"])
        if "experiment_name" in dpmodel.config["train"]["comet_ml"].keys():
            experiment.set_name(
                dpmodel.config["train"]["comet_ml"]["experiment_name"])
    else:
        experiment = None
    return experiment
Ejemplo n.º 14
0
def create_experiment(config: dict) -> Experiment:
    experiment = None
    key = config.get('api_key', None)
    name = config.get('project_name', None)
    workspace = config.get('workspace', None)
    if None not in (key, name, workspace):
        experiment = Experiment(
            api_key=key,
            project_name=name,
            workspace=workspace)
        experiment.set_name(f'{datetime.datetime.now().time()}')
    return experiment
Ejemplo n.º 15
0
def main(cmd=None, stdout=True):
    """Run finetuning experiment for fixed seed."""

    # Initialize system
    args = get_args(cmd)
    assert torch.cuda.is_available()
    torch.cuda.set_device(args.device)

    # Initialize logging
    model_id = ("Finetune {}, seed size {}, epochs {}, labels {}, "
                "batch size {}, lr {}").format(args.model, args.seed_size,
                                               args.epochs, args.label_budget,
                                               args.batch_size, args.lr)
    logging.basicConfig(filename="{}/{}.txt".format(args.dout, model_id),
                        format='%(asctime)s %(levelname)-8s %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S',
                        level=logging.INFO)
    if stdout:
        logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
    logger = Experiment(comet_ml_key, project_name="ActiveDialogue")
    logger.set_name(model_id)
    logger.log_parameters(vars(args))

    # Select model and environment
    if args.model == "glad":
        model_arch = GLAD
    elif args.model == "gce":
        model_arch = GCE
    env = DSTEnv(load_dataset, model_arch, args)

    # Load seed if need-be
    if not env.load('seed'):
        raise ValueError("No loaded seed.")

    # Initialize evaluation
    best_metrics = env.metrics(True)
    for k, v in best_metrics.items():
        logger.log_metric(k, v, step=0)
    logging.info("Initial metrics: %s", best_metrics)

    # Finetune
    env.label_all()
    for epoch in range(1, args.epochs + 1):
        logging.info('Starting fit epoch %d.', epoch)
        env.fit()
        metrics = env.metrics(True)
        logging.info("Epoch metrics: %s", metrics)
        for k, v in metrics.items():
            logger.log_metric(k, v, step=epoch)
        if best_metrics is None or metrics[args.stop] > best_metrics[
                args.stop]:
            logging.info("Saving best!")
            best_metrics = metrics
Ejemplo n.º 16
0
def start_comet(args):
    exp = None
    if args.comet is not None and len(args.comet) > 0:
        workspace, project, apikey = args.comet.split("/")
        exp = Experiment(api_key=apikey,
                         project_name=project,
                         workspace=workspace)
        exp.set_name("td3")
        if len(args.comet_tags) > 0:
            comet_tags = args.comet_tags.split(",")
            for tag in comet_tags:
                exp.add_tag(tag)
    return exp
Ejemplo n.º 17
0
def opus_wrapper(**kwargs):
    from comet_ml import Experiment
    import torch
    import torch.nn.functional as F
    import os
    import os.path as osp
    import argparse
    from data import load_dataset
    from torch_geometric.datasets import Planetoid,PPI,TUDataset
    import torch_geometric.transforms as T
    from torch_geometric.nn import GATConv, GCNConv, GAE, VGAE
    from torch_geometric.data import DataLoader
    from maml import meta_gradient_step
    from models import Encoder, MetaEncoder, GraphSignature, MetaMLPEncoder, MetaSignatureEncoder, MetaGatedSignatureEncoder
    from utils import global_test, test, seed_everything
    from collections import OrderedDict
    from torchviz import make_dot
    import wandb
    import ipdb
    os.environ['WANDB_API_KEY'] = "7110d81f721ee9a7da84c67bcb319fc902f7a180"
    parser = argparse.ArgumentParser()
    my_args = parser.parse_args([])
    my_args.__dict__.update(kwargs)
    my_args.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("Checking CUDA")
    print(my_args.dev)

    if my_args.dataset=='PPI':
        project_name = 'meta-graph-ppi'
    elif my_args.dataset=='REDDIT-MULTI-12K':
        project_name = "meta-graph-reddit"
    elif my_args.dataset=='FIRSTMM_DB':
        project_name = "meta-graph-firstmmdb"
    elif my_args.dataset=='DD':
        project_name = "meta-graph-dd"
    elif my_args.dataset=='AMINER':
        project_name = "meta-graph-aminer"
    else:
        project_name='meta-graph'

    if my_args.comet:
        experiment = Experiment(api_key=my_args.comet_apikey,\
                project_name=project_name,\
                workspace=my_args.comet_username)
        experiment.set_name(my_args.namestr)
        my_args.experiment = experiment

    if my_args.wandb:
        wandb.init(project=project_name,name=my_args.namestr)
    print(my_args)
    return main(my_args)
    def __train_config(self):
        name = f"{self.config.SERVER_OPT}: {self.config.SERVER_LEARNING_RATE} - {self.config.CLIENT_OPT_STRATEGY} - {self.config.CLIENT_OPT}: {self.config.CLIENT_LEARNING_RATE}"
        logging.info(name)
        experiment = Experiment(
            workspace=self.workspace, project_name=self.project_name
        )
        experiment.set_name(name)
        learner = self.Learner(experiment, self.config, self.config_technical)
        try:
            learner.train()
        except ToLargeLearningRateExcpetion:
            pass  # TODO

        self.__refresh_df()
Ejemplo n.º 19
0
def do_training(config: TorchFederatedLearnerCIFAR100Config):
    config_technical = TorchFederatedLearnerTechnicalConfig(
        STORE_OPT_ON_DISK=False,
        STORE_MODEL_IN_RAM=False,
    )

    name = f"{config.SERVER_OPT}: {config.SERVER_LEARNING_RATE} - {config.CLIENT_OPT_STRATEGY} - {config.CLIENT_OPT}: {config.CLIENT_LEARNING_RATE}"
    logging.info(name)
    experiment = Experiment(workspace="federated-learning",
                            project_name=project_name)
    experiment.set_name(name)
    learner = TorchFederatedLearnerCIFAR100(experiment, config,
                                            config_technical)
    learner.train()
def generate_topics():
    # capture the config path from the run arguments then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except ValueError:
        print("Missing or invalid arguments")
        exit(0)

    print("Logging experiment name: {name}".format(
        name=config.experiment.experiment_name))
    experiment = Experiment(api_key=config.experiment.api_key,
                            project_name=config.experiment.project_name,
                            workspace=config.experiment.workspace)
    experiment.set_name(config.experiment.experiment_name)
    params = config.labels_generator.model
    experiment.log_parameters(params)

    print('Creating the data loader...')
    data_loader = DataLoader(config.labels_generator.paths)
    data = data_loader.get_data()

    print('Creating the Preprocessor...')
    preprocessor = CorexPreprocessor(data, config)
    preprocessor.prepare_data()

    print('Creating and training the Model...')
    model = CorexModel(config, preprocessor)
    trainer = CorexTrainer(model, preprocessor.get_data())
    trainer.train()

    print('Evaluating the model...')
    coherence_lst, avg_coherence = trainer.evaluate(preprocessor.get_data(),
                                                    preprocessor.get_corpus())
    trainer.generate_topics()
    print("Coherence score: {score_lst} \nAvg coherence score: {avg_score}".
          format(score_lst=coherence_lst, avg_score=avg_coherence))

    print('Saving the trained model...')
    model.save()

    # Log the rest of the experiment
    metrics = {"coherence": avg_coherence}
    experiment.log_metrics(metrics)

    experiment.log_model(
        name=config.experiment.model_name,
        file_or_folder=config.labels_generator.paths.save_model_path)
Ejemplo n.º 21
0
def make_experiment(env_file, name=None, tags=None):

    # Get environment values
    load_dotenv(env_file)
    COMETML_KEY = os.environ.get("COMETML_KEY")
    COMETML_PROJECT = os.environ.get("COMETML_PROJECT")

    # Start and configure experiment
    experiment = Experiment(COMETML_KEY, COMETML_PROJECT)

    if name is not None:
        experiment.set_name(name)
    if tags is not None:
        experiment.add_tags(tags)

    return experiment
Ejemplo n.º 22
0
class CometMlAdapter(BaseAdapter):
    def __init__(self, api_key, project_name, experiment_name):
        self.experiment = Experiment(api_key=api_key,
                                     project_name=project_name)
        self.experiment.set_name(experiment_name)

    def log_parameters(self, hyper_params):
        self.experiment.log_parameters(hyper_params)

    def set_model_graph(self, graph):
        self.experiment.set_model_graph(graph)

    def log_metric(self, name, metric, step):
        self.experiment.log_metric(name, metric, step=step)

    def register(self, name):
        pass
Ejemplo n.º 23
0
def training_loop(x,
                  y,
                  cfg,
                  exp_name="exp_1",
                  device=torch.device('cuda'),
                  starting_epoch=1):
    model = init_model(cfg, device)
    datasets = init_datasets(x, y, cfg)
    dataloaders = init_loaders(datasets, cfg)
    optim = init_optimizers(cfg, model)
    n_epochs = cfg.TRAIN.MAX_EPOCHS
    experiment = Experiment(project_name='mbm-pos-metrics',
                            api_key='w4JbvdIWlas52xdwict9MwmyH')
    experiment.set_name(exp_name)
    hyper = {
        'train_batch_size': cfg.TRAIN.BATCH_SIZE,
        'age_lambda': cfg.MODEL.AGE_LAMBDA,
        'n_epochs': cfg.TRAIN.MAX_EPOCHS,
        'learning_rate': cfg.TRAIN.LEARNING_RATE,
        'weight_decay': cfg.MODEL.WEIGHT_DECAY
    }

    experiment.log_parameters(hyper)
    print("Starting training loop!")
    for epoch in range(starting_epoch, n_epochs + 1):
        for phase in ['train', 'val', 'test']:
            print("Epoch {}/{} - {}".format(epoch, n_epochs, phase.upper()))
            forward_model(model,
                          dataloaders[phase],
                          optim,
                          cfg,
                          device,
                          experiment,
                          phase,
                          current_epoch=epoch)
        # checkpoint model
        check = {
            'model': model.state_dict(),
            'optim': optim['optimizer'].state_dict()
        }
        torch.save(
            check,
            "../../storage/mbm/checkpoints/{}_{}.pth".format(exp_name, epoch))
    experiment.end()
    return experiment
Ejemplo n.º 24
0
def setup_exp(args):
    exp_name = ("nb_" if args.test_notebook else "") + "_".join(
        [args.task, args.mode, args.embedder_name,
         get_hyp_str(args)])
    exp_kwargs = dict(project_name="self-supervised-survey",
                      workspace="eracah")
    if args.comet_mode == "online":
        from comet_ml import Experiment
        exp_kwargs.update(api_key="kH9YI2iv3Ks9Hva5tyPW9FAbx")
    elif args.comet_mode == "offline":
        from comet_ml.offline import OfflineExperiment as Experiment
        offline_directory = Path(".logs")
        exp_kwargs.update(offline_directory=str(offline_directory))

    experiment = Experiment(**exp_kwargs)
    experiment.set_name(exp_name)
    experiment.log_parameters(args.__dict__)
    return experiment, experiment.id
Ejemplo n.º 25
0
class CometTracker:
    def __init__(self, comet_params, experiment_name=None, run_params=None):
        self.experiment = Experiment(**comet_params)

        if run_params is not None:
            self.experiment.log_parameters(run_params)

        if experiment_name is not None:
            self.experiment.set_name(experiment_name)

    def track_metric(self, metric, value, step=None):
        self.experiment.log_metric(metric, value, step)

    def add_tags(self, tags):
        self.experiment.add_tags(tags)
        print(f'In [add_tags]: Added these tags to the new experiment: {tags}')

    def set_name(self, name):
        self.experiment.set_name(name)
Ejemplo n.º 26
0
    def get_comet_logger(self):
        if not self.paras.load :
            comet_exp = Experiment(project_name=COMET_PROJECT_NAME,
                                         workspace=COMET_WORKSPACE,
                                         auto_output_logging=None,
                                         auto_metric_logging=None,
                                         display_summary=False,
                                         )
            if self.paras.transfer:
                comet_exp.set_name(self.exp_name)
                comet_exp.add_tag(Path(self.ckpdir).parent.name)
                comet_exp.add_tag('transfer')
                comet_exp.add_tag(self.config['data']['corpus']['metas'][0])
            if self.paras.test:
                comet_exp.set_name(Path(self.paras.outdir).name)
                comet_exp.add_tag(Path(self.paras.config).parents[2].name)
                comet_exp.add_tag('test')
                comet_exp.add_tag(Path(self.paras.config).parent.stem)
                #comet_exp.add_tag(Path(self.paras.outdir).name)
            else:
                comet_exp.add_tag('train')

            for name, param in self.config.items():
                if isinstance(param, dict):
                    comet_exp.log_parameters(param, prefix=name)
                else:
                    comet_exp.log_parameter(name, param)
            comet_exp.log_other('seed', self.paras.seed)


            with open(Path(self.logdir,'exp_key'), 'w') as f:
                print(comet_exp.get_key(),file=f)
        else:
            with open(Path(self.logdir,'exp_key'),'r') as f:
                exp_key = f.read().strip()
                comet_exp = ExistingExperiment(previous_experiment=exp_key,
                                                    project_name=COMET_PROJECT_NAME,
                                                    workspace=COMET_WORKSPACE,
                                                    auto_output_logging=None,
                                                    auto_metric_logging=None,
                                                    display_summary=False,
                                                    )
        return comet_exp
Ejemplo n.º 27
0
def setup_comet(args, init_distributed):
    if init_distributed and args.distributed_rank>0: # Only the rank 0 process handled comet
        args.comet = False
    if args.comet:      # This will only be true if the user set the --comet flag, and the rank is 0
        print('Activating comet')
        experiment = Experiment(api_key=api_key,
                                project_name=args.comet_project, workspace=workspace,
                                auto_param_logging=False, auto_metric_logging=False,
                                parse_args=True, auto_output_logging=True, log_env_gpu=False
                                )
        # experiment.disable_mp() # Turn off monkey patching
        experiment.log_parameters(vars(args))
        # experiment.add_tag(args.comet_tag)
        experiment.set_name(args.comet_tag)
        experiment.add_tag(args.comet_real_tag)

        print("* Finished comet setup... ")
        return experiment
    else:
        return None
Ejemplo n.º 28
0
def main():
    """Entry point."""
    random_seed = 69420
    print(random_seed, "random_seed")
    seed_libraries(random_seed)
    flags = _parse_args(sys.argv[1:])
    experiment = Comet_Experiment(
        api_key=flags.comet_api_key,
        project_name="ideal-pancake",
        workspace="s0lvang",
    )
    experiment.set_name(flags.experiment_name)
    # Set up config and select datasets
    globals.init(
        experiment=experiment,
        _flags=flags,
    )
    # Trigger the experiment
    experiment = ExperimentManager(flags.datasets)
    experiment.run_experiments()
Ejemplo n.º 29
0
def setup_comet_ml():
    """Initialise Experiment object."""
    experiment = Experiment(
        api_key=config.COMET_API_KEY,
        disabled=True if not config.COMET_MONITOR else False,
        log_code=False,
        project_name=config.COMET_PROJECT_NAME,
        workspace=config.COMET_WORKSPACE,
    )

    experiment.set_name(config.EXPERIMENT_NAME)
    experiment.log_others({
        "conditioning": config.EXPERIMENT_Z,
        "dataset": config.EXPERIMENT_DATASET,
    })
    experiment.log_parameters({
        "batch_size": config.EXPERIMENT_BATCH_SIZE,
        "epochs": config.EXPERIMENT_EPOCHS,
    })

    return experiment
Ejemplo n.º 30
0
def trainLoop(num_epochs, batch_size, gen_lr, disc_lr, gen_train_freq, disc_train_freq, logger):
        # Create run directory for current training run 
        os.chdir(os.path.expanduser('~') + '/psig-gan/runs/')
        run_dir = str(datetime.datetime.now()).replace(' ','',) + "--" + "disc_lr"+ str(disc_lr) + " " + "gen_lr"+ str(gen_lr) + " " + "gen_train_freq" + str(gen_train_freq) + " " + "disc_train_freq" + str(disc_train_freq) + " "  + "num_epochs"+ str(num_epochs)

        os.mkdir(run_dir)
        # Instantiate GAN
        gan = DCGAN(latent_shape=100, output_image_shape=256, num_gen_images=batch_size, gen_filter_size=5, discrim_filter_size=5, gen_num_channels=128, discrim_num_channels=64)
        # Load real grassweeds image data 
        data_path = '/home/data/dcgan-data/'
        data_batch = util.createDataBatch(data_path,batch_size)
        # Define Comet-ML API key here for error logging
        comet_api_key = 'Gdy4QDrOmu0P01XuBI33rPuIS'
        #Define Comet Project Name
        logger = Experiment(comet_api_key, project_name="psig-gan")
        
        #Define experiment name
        logger.set_name("disc_lr"+ str(disc_lr) + " " + "gen_lr"+ str(gen_lr) + " " + "gen_train_freq"+ str(gen_train_freq) + " " + "disc_train_freq" + str(disc_train_freq) + " "  + "num_epochs"+ str(num_epochs))
        
        # Execute training loop
        train(gan, data_batch, num_epochs, run_dir, gen_lr, disc_lr, gen_train_freq, disc_train_freq, logger)
Ejemplo n.º 31
0
def run_main_loop(args, train_estimator, predict_estimator):
	total_steps = 0
	train_steps = math.ceil(args.train_examples / args._batch_size)
	eval_steps  = math.ceil(args.eval_examples  / args._batch_size)

	if args.use_comet:
		experiment = Experiment(api_key=comet_ml_api_key, project_name=comet_ml_project, workspace=comet_ml_workspace)
		experiment.log_parameters(vars(args))
		experiment.add_tags(args.tag)
		experiment.set_name(model_name(args))
	else:
		experiment = None

	prefetch_inception_model()

	with tf.gfile.Open(os.path.join(suffixed_folder(args, args.result_dir), "eval.txt"), "a") as eval_file:
		for epoch in range(0, args.epochs, args.predict_every):

			logger.info(f"Training epoch {epoch}")
			train_estimator.train(input_fn=train_input_fn, steps=train_steps * args.predict_every)
			total_steps += train_steps * args.predict_every

			if args.use_comet:
				experiment.set_step(epoch)

			# logger.info(f"Evaluate {epoch}")
			# evaluation = predict_estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
			# logger.info(evaluation)
			# save_evaluation(args, eval_file, evaluation, epoch, total_steps)
			
			# if args.use_comet:
			# 	experiment.log_metrics(evaluation)
			
			logger.info(f"Generate predictions {epoch}")
			predictions = predict_estimator.predict(input_fn=predict_input_fn)
			
			logger.info(f"Save predictions")
			save_predictions(args, suffixed_folder(args, args.result_dir), eval_file, predictions, epoch, total_steps, experiment)

	logger.info(f"Completed {args.epochs} epochs")