コード例 #1
0
def run_experiments(datasets, parameters):

    # Populate tags with some info.
    tags = parameters['tags']
    if parameters['run_mlp']:
        tags.append("MLP")
    if parameters['run_fcn']:
        tags.append("FCN")
    if parameters['run_resnet']:
        tags.append("ResNet")

    # Create Neptune client.
    neptune.init(project_qualified_name=parameters['neptune_project'])
    neptune.create_experiment(
        upload_source_files=[],
        params=parameters,
        tags=tags
    )
    neptune.log_artifact("ConvNet.py")
    neptune.log_artifact("MultiLayerPerceptron.py")
    neptune.log_artifact("ResNet.py")

    try:
        run_train_models(datasets, parameters)
    except KeyboardInterrupt:
        pass
    finally:
        neptune.stop()
コード例 #2
0
def main(env_name, n_epochs, eval_frequency, actor_net_dim, critic_net_dim,
         dsicriminator_net_dim, lr, gamma, tau, grad_clip, batch_size,
         entropy_weight, min_buffer_size, clip, ppo_updates, expert,
         activation, value_coef, betas, max_steps, tag, record):

    seed = np.random.randint(0, 1000000)
    import pybulletgym
    discriminator_updates = 1

    expert, activation = initiate_run(
        env_name, actor_net_dim, critic_net_dim, dsicriminator_net_dim, lr,
        gamma, tau, grad_clip, batch_size, entropy_weight, min_buffer_size,
        clip, ppo_updates, discriminator_updates, expert, activation,
        value_coef, betas, max_steps, seed, tag, record)

    env = Env(env_name)
    actor = Actor(env, actor_net_dim, activation, env.env.action_space.high,
                  env.env.action_space.low)
    critic = Critic(env, critic_net_dim, activation)
    discriminator = Discriminator(env, dsicriminator_net_dim, lr, batch_size,
                                  activation, betas)
    agent = Agent(gamma, clip, actor, critic, lr, batch_size, grad_clip,
                  entropy_weight, value_coef, betas)
    memory = PPOMemory(gamma, tau)

    args = [
        min_buffer_size, eval_frequency, ppo_updates, discriminator_updates,
        expert, seed
    ]

    gail = GAIL(env, actor, critic, discriminator, agent, memory, *args)
    epoch_to_best = gail.update(n_epochs, max_steps, record)
    if record:
        neptune.log_metric('best_epoch', epoch_to_best)
        neptune.stop()
コード例 #3
0
   def train_network_on_many_sets(self, train_dir=None, validation_file=None, epochs=None, batch_size=None,
                                  test_games=1):
       assert self.network is not None, 'You must create network before training'

       with open(validation_file, 'rb') as f:
           X_val, Y_val = pickle.load(f)

       X_val = self.vectorizer.many_states_to_input(X_val)
       Y_val = self.data_transformer.transform_array(Y_val)
       self.neptune_monitor.reset_epoch_counter()
       file1, file2 = self.gather_data_info(train_dir, validation_file)
       self.start_neptune_experiment(experiment_name=self.network_name, description='Training avg_pool arch network',
                                     neptune_monitor=self.neptune_monitor)
       self.neptune_monitor.log_histograms(file1, file2)
       files_for_training = os.listdir(train_dir)
       for epoch in range(epochs):
           print(f'\n Epoch {epoch}: \n')
           file_epoch = epoch % len(files_for_training)
           X, Y = load_data_for_model(os.path.join(train_dir, files_for_training[file_epoch]))
           X = self.vectorizer.many_states_to_input(X)
           Y = self.data_transformer.transform_array(Y)
           self.network.fit(x=X, y=Y, epochs=1, batch_size=batch_size,
                            validation_data=(X_val, Y_val),
                            callbacks=[self.neptune_monitor])
           del X
           del Y

       neptune.stop()
def main(args):
    init_logger()
    set_seed(args)

    if args.logger:
        neptune.init("wjdghks950/NumericHGN")
        neptune.create_experiment(name="({}) NumHGN_{}_{}_{}".format(
            args.task, args.train_batch_size, args.max_seq_len,
            args.train_file))
        neptune.append_tag("BertForSequenceClassification", "finetuning",
                           "num_augmented_HGN")

    tokenizer = load_tokenizer(args)
    train_dataset = dev_dataset = test_dataset = None
    if args.do_train:
        train_dataset = load_and_cache_examples(args, tokenizer, mode="train")
    dev_dataset = load_and_cache_examples(args, tokenizer, mode="dev")
    # test_dataset = load_and_cache_examples(args, tokenizer, mode="test")

    trainer = ParaSelectorTrainer(args, train_dataset, dev_dataset)

    if args.do_train:
        trainer.train()
        trainer.save_model()

    if args.do_eval:
        trainer.load_model()
        trainer.evaluate("dev")

    if args.logger:
        neptune.stop()
コード例 #5
0
def main(argv):
    gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param, skip_unknown=True)
    op_config_str = gin.config._CONFIG

    use_neptune = "NEPTUNE_API_TOKEN" in os.environ
    if use_neptune:

        params = utils.get_gin_params_as_dict(gin.config._CONFIG)
        neptune.init(project_qualified_name="melindafkiss/sandbox")

        exp = neptune.create_experiment(params=params, name="exp")
        #ONLY WORKS FOR ONE GIN-CONFIG FILE
        with open(FLAGS.gin_file[0]) as ginf:
            param = ginf.readline()
            while param:
                param = param.replace('.','-').replace('=','-').replace(' ','').replace('\'','').replace('\n','').replace('@','')
                #neptune.append_tag(param)
                param = ginf.readline()
        #for tag in opts['tags'].split(','):
        #  neptune.append_tag(tag)
    else:
        neptune.init('shared/onboarding', api_token='ANONYMOUS', backend=neptune.OfflineBackend())

    er = ExperimentRunner(prefix=exp.id)
    er.train()

    params = utils.get_gin_params_as_dict(gin.config._OPERATIVE_CONFIG)
    for k, v in params.items():
        neptune.set_property(k, v)
    neptune.stop()
    print('fin')
コード例 #6
0
    def train_model(self,
                    data_file_name=None,
                    data_frame=None,
                    epochs=50,
                    output_weights_file_name=None,
                    experiment_name='Training'):

        #training params
        test_size = 0.05
        batch_size = None

        assert self.network is not None, 'You must create network before training'
        self.set_corrent_session()

        X = []
        Y = []

        if data_frame is None:
            assert data_file_name is not None
            data = pd.read_pickle(data_file_name)
            for i, row in data.iterrows():
                state_vector = self.vectorize_state(
                    row['observation'].observation_dict)
                value = row['value']
                X.append(state_vector)
                Y.append(value)

        if data_frame is not None:
            data = data_frame
            for i, row in data.iterrows():
                state_vector = row[0]
                evaluation = row[-1]
                X.append(state_vector)
                Y.append(evaluation)

        X = np.array(X)
        Y = np.array(Y)

        self.params['Epochs'] = epochs
        self.params['Data set size'] = X.shape[0]

        self.params['Test set size'] = int(test_size * X.shape[0])
        self.params['batch_size'] = batch_size

        self.start_neptune_experiment(experiment_name=experiment_name,
                                      description='Training dense network',
                                      neptune_monitor=NeptuneMonitor())

        fit_history = self.network.fit(X,
                                       Y,
                                       batch_size=None,
                                       epochs=epochs,
                                       verbose=1,
                                       validation_split=test_size,
                                       callbacks=[self.neptune_monitor])
        neptune.stop()
        if output_weights_file_name is not None:
            self.network.save_weights(output_weights_file_name)
        return fit_history
コード例 #7
0
def run_dqn(experiment_name):
    current_dir = pathlib.Path().absolute()
    directories = Save_paths(data_dir=f'{current_dir}/data', experiment_name=experiment_name)

    game = Winter_is_coming(setup=PARAMS['setup'])
    environment = wrappers.SinglePrecisionWrapper(game)
    spec = specs.make_environment_spec(environment)

    # Build the network.
    def _make_network(spec) -> snt.Module:
        network = snt.Sequential([
            snt.Flatten(),
            snt.nets.MLP([50, 50, spec.actions.num_values]),
        ])
        tf2_utils.create_variables(network, [spec.observations])
        return network

    network = _make_network(spec)

    # Setup the logger
    if neptune_enabled:
        agent_logger = NeptuneLogger(label='DQN agent', time_delta=0.1)
        loop_logger = NeptuneLogger(label='Environment loop', time_delta=0.1)
        PARAMS['network'] = f'{network}'
        neptune.init('cvasquez/sandbox')
        neptune.create_experiment(name=experiment_name, params=PARAMS)
    else:
        agent_logger = loggers.TerminalLogger('DQN agent', time_delta=1.)
        loop_logger = loggers.TerminalLogger('Environment loop', time_delta=1.)

    # Build the agent
    agent = DQN(
        environment_spec=spec,
        network=network,
        params=PARAMS,
        checkpoint=True,
        paths=directories,
        logger=agent_logger
    )
    # Try running the environment loop. We have no assertions here because all
    # we care about is that the agent runs without raising any errors.
    loop = acme.EnvironmentLoop(environment, agent, logger=loop_logger)
    loop.run(num_episodes=PARAMS['num_episodes'])

    last_checkpoint_path = agent.save()

    # Upload last checkpoint
    if neptune_upload_checkpoint and last_checkpoint_path:
        files = os.listdir(last_checkpoint_path)
        for f in files:
            neptune.log_artifact(os.path.join(last_checkpoint_path, f))

    if neptune_enabled:
        neptune.stop()

    do_example_run(game,agent)
コード例 #8
0
    def run_full_training(self, n_iterations, opponent):


        for i in range(n_iterations):
            if main_process:
                print('Game number = {}'.format(i))
            self.run_one_episode(epochs=2)
            if i %2 == 0:
                self.run_test(opponent=opponent, x_coord=i)

        if USE_NEPTUNE and main_process:
            neptune.stop()
コード例 #9
0
def train(cfg, network):
    if cfg.train.dataset[:4] != 'City':
        torch.multiprocessing.set_sharing_strategy('file_system')
    trainer = make_trainer(cfg, network)
    optimizer = make_optimizer(cfg, network)
    scheduler = make_lr_scheduler(cfg, optimizer)
    recorder = make_recorder(cfg)
    if 'Coco' not in cfg.train.dataset:
        evaluator = make_evaluator(cfg)

    begin_epoch = load_model(network,
                             optimizer,
                             scheduler,
                             recorder,
                             cfg.model_dir,
                             resume=cfg.resume)
    # set_lr_scheduler(cfg, scheduler)

    train_loader = make_data_loader(cfg, is_train=True)
    val_loader = make_data_loader(cfg, is_train=False)
    # train_loader = make_data_loader(cfg, is_train=True, max_iter=100)

    global_steps = None
    if cfg.neptune:
        global_steps = {
            'train_global_steps': 0,
            'valid_global_steps': 0,
        }

        neptune.init('hccccccccc/clean-pvnet')
        neptune.create_experiment(cfg.model_dir.split('/')[-1])
        neptune.append_tag('pose')

    for epoch in range(begin_epoch, cfg.train.epoch):
        recorder.epoch = epoch
        trainer.train(epoch, train_loader, optimizer, recorder, global_steps)
        scheduler.step()

        if (epoch + 1) % cfg.save_ep == 0:
            save_model(network, optimizer, scheduler, recorder, epoch,
                       cfg.model_dir)

        if (epoch + 1) % cfg.eval_ep == 0:
            if 'Coco' in cfg.train.dataset:
                trainer.val_coco(val_loader, global_steps)
            else:
                trainer.val(epoch, val_loader, evaluator, recorder)

    if cfg.neptune:
        neptune.stop()

    return network
コード例 #10
0
def main(tasks, features):
    neptune.set_project('sfczekalski/BiasSF')
    neptune.init('sfczekalski/BiasSF')
    neptune.create_experiment(name='Bias summary, raw')

    fraction_range = [0.0, 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.7, 1.0]
    for task in tasks:
        print(f'{task}'.upper())
        for feature in features:
            print(f'{feature}'.upper())
            bias_benchmark(task, feature, fraction_range, plot=False)

    neptune.stop()
コード例 #11
0
ファイル: code.py プロジェクト: wDaniec/pretrainingpp
def run_train(opt):
    print(opt)

    if opt.sendNeptune:
        neptune.init('andrzejzdobywca/pretrainingpp')
        exp = neptune.create_experiment(name=opt.sessionName,
                                        params=vars(opt),
                                        tags=[opt.main_tag, opt.tag, opt.tag2])
    dataset = Dataset(opt)
    net = Network(opt)
    utils.train_model(opt, net, dataset)

    if opt.sendNeptune:
        neptune.stop()
コード例 #12
0
ファイル: base_trainer.py プロジェクト: feipan664/CCM-1
 def train(self):
     for i_iter in range(self.config.num_steps):
         losses = self.iter(i_iter)
         if i_iter == 0 and self.config.neptune:
             neptune.init(project_qualified_name='solacex/segmentation-DA')
             neptune.create_experiment(params=self.config,
                                       name=self.config['note'])
         if i_iter % self.config.print_freq == 0:
             self.print_loss(i_iter)
         if i_iter % self.config.save_freq == 0 and i_iter != 0:
             self.save_model(i_iter)
         if self.config.val and i_iter % self.config.val_freq == 0 and i_iter != 0:
             self.validate()
     neptune.stop()
コード例 #13
0
def obj2(args):
    nf, act_fn, scale_by_channel, scale_by_sample, scale_type = args
    scale_range = (-1, 1)
    bs = 32
    data = (ItemLists(Path("data"),
                      TSList(x_train), TSList(x_val)).label_from_lists(
                          y_train,
                          y_val).databunch(bs=bs, val_bs=bs * 2).scale(
                              scale_type=scale_type,
                              scale_by_channel=scale_by_channel,
                              scale_by_sample=scale_by_sample,
                              scale_range=scale_range))
    model = ResNet(data.features, data.c, act_fn=act_fn, nf=nf)
    neptune.init(project_qualified_name=
                 'andrijdavid/ClinicalBrainComputerInterfacesChallenge2020')
    neptune.create_experiment(name=f'ResNet Hyperparamter Search',
                              description="Optimizing accuracy",
                              params={
                                  'nf': nf,
                                  'act_fn': act_fn,
                                  'scale_by_channel': scale_by_channel,
                                  'scale_by_sample': scale_by_sample,
                                  'scale_type': scale_type,
                                  'bs': bs,
                                  'model': 'resnet',
                                  'epoch': 100
                              },
                              tags=['hyperopt'])
    name = names.get_first_name()
    #     kappa = KappaScore()
    loss_func = LabelSmoothingCrossEntropy()
    learn = Learner(data,
                    model,
                    metrics=[accuracy],
                    loss_func=loss_func,
                    opt_func=Ranger)
    with progress_disabled_ctx(learn) as learn:
        learn.fit_one_cycle(100, callbacks=[NeptuneMonitor()])
    learn.save(f"{name}")
    val = learn.validate()
    learn.destroy()
    data = None
    neptune.log_artifact(f'data/models/{name}.pth')
    neptune.stop()
    return {
        'loss': 1 - (val[1].item()),
        'status': STATUS_OK,
        'kappa': val[-1].item()
    }
コード例 #14
0
    def train_and_evaluate_with_multiple_seeds(self,
                                               no_times,
                                               seeds_from_config=False,
                                               eval_on_train=True):
        accuracy_agg, model_pathes = self.train_with_multiple_seeds(
            no_times, seeds_from_config=seeds_from_config)

        dict_of_seed_results = {}
        aggregation_dict = {
            "TRAIN": {
                "Accuracy": [],
                "Loss": []
            },
            "DEV": {
                "Accuracy": [],
                "Loss": []
            },
            "TEST": {
                "Accuracy": [],
                "Loss": []
            }
        }
        for index, path in enumerate(model_pathes):
            dict_of_results = self.evaluate_from_path(
                path, evaluate_on_train=eval_on_train)
            dict_of_seed_results[f"seed_{index}"] = dict_of_results
            aggregation_dict = update_dict_of_agg(aggregation_dict,
                                                  dict_of_results,
                                                  eval_on_train=eval_on_train)

        # Add a Field of Aggregation
        split_list = ["DEV", "TEST"]
        if eval_on_train:
            split_list.append("TRAIN")
        for split in split_list:
            aggregation_dict[split]["Accuracy"] = np.mean(
                aggregation_dict[split]["Accuracy"])
            aggregation_dict[split]["Loss"] = np.mean(
                aggregation_dict[split]["Loss"])

        dict_of_seed_results["Agg"] = aggregation_dict

        save_json(
            os.path.join(self.configs["checkpointing_path"],
                         "final_scores.json"), dict_of_seed_results)
        if self.configs["use_neptune"]:
            neptune.stop()
        return dict_of_seed_results
コード例 #15
0
def sev_excpetion_hook(exctype, value, tb):
    """Exception handler. A Runtime hibák logolásához"""

    rsna_logger.info("================= ERROR =================")

    trace = format_exception(exctype, value, tb)

    for msg in trace:
        rsna_logger.info("{}".format(msg.replace('\n', '')))

    rsna_logger.info("")
    rsna_logger.info("----- exiting -----")

    if nml and not dev:
        neptune.stop(tb)

    exit(1)
コード例 #16
0
def objfcn(args):
    d1, d2, act_fn, scale_by_channel, scale_by_sample, scale_type, randaugment = args
    scale_range = (-1, 1)
    bs = 128
    data = (ItemLists(Path("data"),
                      TSList(x_train), TSList(x_val)).label_from_lists(
                          y_train,
                          y_val).databunch(bs=bs, val_bs=bs * 2).scale(
                              scale_type=scale_type,
                              scale_by_channel=scale_by_channel,
                              scale_by_sample=scale_by_sample,
                              scale_range=scale_range))
    model = FCN(data.features, data.c, act_fn=act_fn, dilations=[d1, d2])
    neptune.init(project_qualified_name=
                 'andrijdavid/ClinicalBrainComputerInterfacesChallenge2020')
    neptune.create_experiment(
        name=f'FCN Hyperparamter Search',
        description="Optimizing accuracy by searching proper dilation",
        params={
            'pool': 'AdaptiveAvgPool1d',
            'dilation1': d1,
            'dilation2': d2,
            'act_fn': act_fn,
            'scale_by_channel': scale_by_channel,
            'scale_by_sample': scale_by_sample,
            'scale_type': scale_type,
            'randaugment': randaugment,
            'bs': bs,
            'model': 'fcn',
            'epoch': 100
        },
        tags=['hyperopt'])
    kappa = KappaScore()
    learn = Learner(data, model, metrics=[accuracy, kappa])
    if randaugment:
        learn = learn.randaugment()
    learn.fit_one_cycle(100, callbacks=[NeptuneMonitor()])
    val = learn.validate()
    learn.destroy()
    data = None
    neptune.stop()
    return {
        'loss': 1 - (val[1].item()),
        'status': STATUS_OK,
        'kappa': val[-1].item()
    }
コード例 #17
0
ファイル: run.py プロジェクト: tokuma09/MLproject_template
def main(config: DictConfig) -> None:

    # set data
    os.environ['GOOGLE_CLOUD_PROJECT'] = config['project_id']
    params = dict(config['model']['parameters'])

    # get base directory
    base_dir = os.path.dirname(hydra.utils.get_original_cwd())

    # load training API
    module = importlib.import_module(config['model']['file'])

    # load data
    X_train_all, y_train_all, X_test = load_data(config, base_dir)

    # start logging
    neptune.init(api_token=API_TOKEN,
                 project_qualified_name='tokuma09/Example')
    neptune.create_experiment(params=params,
                              name='sklearn-quick',
                              tags=[config['model']['name']])

    # train model using CV
    print('***** Train model *****')
    y_test_preds, oof_preds, models, scores = train(X_train_all, y_train_all,
                                                    X_test, module, config)

    #  CV score
    print('***** log CV score *****')
    score = np.mean(scores)
    neptune.log_metric('CV score', score)

    for i in range(NUM_FOLDS):
        neptune.log_metric('fold score', scores[i])

    # save model
    save_models(models, config, base_dir)

    # save oof result
    save_oof(oof_preds, config, base_dir)

    # prepare submission
    prepare_submission(y_test_preds, config, base_dir)

    neptune.stop()
コード例 #18
0
def covid_with_neptune(params_):
    neptune.init('eyalasulin/covid',
                 api_token='token_hidden_before_submitting')
    # ## configuration

    neptune.create_experiment(name='contrastive_covid', params=params_)

    loss, accuracy, history, auc = united_training.main(params_)

    if history is not None:
        neptune.log_metric('loss', loss)
        neptune.log_metric('accuracy', accuracy)
        for key in history.history.keys():
            for item in history.history[key]:
                neptune.log_metric(f'h_{key}', item)

    neptune.stop()
    return accuracy
コード例 #19
0
    def completed_event(self, stop_time, result):
        if result:
            if not isinstance(result, tuple):
                result = (
                    result,
                )  # transform single result to tuple so that both single & multiple results use same code

            for i, r in enumerate(result):
                if isinstance(r, float) or isinstance(r, int):
                    neptune.log_metric("result_{}".format(i), float(r))
                elif isinstance(r, object):
                    pickle_and_send_artifact(r, "result_{}.pkl".format(i))
                else:
                    warnings.warn(
                        "logging results does not support type '{}' results. Ignoring this result"
                        .format(type(r)))

        neptune.stop()
コード例 #20
0
ファイル: run_fit_param.py プロジェクト: IlyaGusev/purano
 def calc_accuracy(params):
     neptune.create_experiment(name="clustering",
                               params=params,
                               upload_source_files=['configs/*.jsonnet'],
                               tags=neptune_tags)
     clusterer.config = copy.deepcopy(config_copy)
     clusterer.config["clustering"] = params
     clusterer.config["distances"]["cache_distances"] = True
     clusterer.calc_distances()
     clusterer.cluster()
     labels = clusterer.get_labels()
     clusterer.reset_clusters()
     metrics, _ = calc_metrics(markup, url2record, labels)
     accuracy = metrics["accuracy"]
     f1_score_0 = metrics["0"]["f1-score"]
     f1_score_1 = metrics["1"]["f1-score"]
     neptune.log_metric("accuracy", accuracy)
     neptune.log_metric("f1_score_0", f1_score_0)
     neptune.log_metric("f1_score_1", f1_score_1)
     neptune.stop()
     return -accuracy
コード例 #21
0
def main():

    args = _parse_args()

    train_df, test_df = load_train_test_dataset(args.dataset)
    quantizer = GaussianQuantizer.from_pretrained(args.quantizer_path)
    train_states, test_states = quantize_datatset(quantizer, train_df, test_df)
    model = MarkovSequenceGenerator()
    model.fit(train_states)
    sampled = model.sample(len(test_states))
    seq_metrics = calc_stats(test_states, sampled)
    gen_df = restore_features(quantizer, sampled)
    eval_metrics = evaluate_traffic(gen_df, test_df)
    if args.log_neptune:
        neptune.init(
            settings.NEPTUNE_PROJECT,
            settings.NEPTUNE_API_TOKEN,
        )
        neptune.create_experiment(name='markov_model', params=vars(args))
        for name, value in dict(**seq_metrics, **eval_metrics).items():
            neptune.log_metric(name, value)
        neptune.stop()
コード例 #22
0
def tune(classifer,
         params: dict,
         alg: str,
         tags=None,
         preprocessors=None,
         test_size=0.2,
         random_state=42):
    """
    :param classifer: sklearn regressor
    :param params: dict params for regressor for tuning
    :param tags: optional tags for neptune exps, by default module name
    :param preprocessors: optional preprocessors
    :param test_size: size for test datamodules
    :param random_state: random seed for split
    """

    model_name = classifer.__name__
    tags = tags if tags is not None else []
    tags.append(model_name)

    neptune.init(project_qualified_name='jiashuxu/folklore',
                 api_token=NEPTUNE_API)
    neptune.create_experiment(name=model_name, tags=tags)

    neptune_callback = opt_utils.NeptuneCallback(log_study=True,
                                                 log_charts=True)

    study = optuna.create_study(direction="minimize")

    objective = Objective(
        classifer, params, alg,
        *get_data(filter_no=10, preprocess=["standard_scaler", "pca"]))
    study.optimize(objective, n_trials=50, callbacks=[neptune_callback])

    opt_utils.log_study_info(study)

    print(f"best merror score: {study.best_value} with {study.best_params}")

    neptune.stop()
コード例 #23
0
ファイル: source_only_trainer.py プロジェクト: tor4z/CCM
    def train(self):
        if self.config.neptune:
            neptune.init(project_qualified_name='solacex/segmentation-DA')
            neptune.create_experiment(params=self.config,
                                      name=self.config['note'])

        if self.config.multigpu:
            self.optim = optim.SGD(self.model.module.optim_parameters(
                self.config.learning_rate),
                                   lr=self.config.learning_rate,
                                   momentum=self.config.momentum,
                                   weight_decay=self.config.weight_decay)
        else:
            self.optim = optim.SGD(self.model.optim_parameters(
                self.config.learning_rate),
                                   lr=self.config.learning_rate,
                                   momentum=self.config.momentum,
                                   weight_decay=self.config.weight_decay)

        self.loader, _ = dataset.init_source_dataset(
            self.config)  #, source_list=self.config.src_list)

        cu_iter = 0
        for i_iter, batch in enumerate(self.loader):
            cu_iter += 1
            adjust_learning_rate(self.optim, cu_iter, self.config)
            self.optim.zero_grad()
            self.losses = edict({})
            losses = self.iter(batch)

            self.optim.step()
            if cu_iter % self.config.print_freq == 0:
                self.print_loss(cu_iter)
            if self.config.val and cu_iter % self.config.val_freq == 0 and cu_iter != 0:
                miou = self.validate()
                self.model = self.model.train()
        if self.config.neptune:
            neptune.stop()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        help='path to preprocessed .csv dataset',
                        required=True)
    parser.add_argument('--log_neptune',
                        dest='log_neptune',
                        action='store_true',
                        default=False)
    args = parser.parse_args()
    ds_path = pathlib.Path(args.dataset)
    save_dir = BASE_DIR / 'obj' / ('hmm_' + ds_path.stem)

    train_df, test_df = load_train_test_dataset(ds_path)

    generator, bic_dict = HMMGenerator().fit(*select_features(train_df),
                                             min_comp=1,
                                             max_comp=40,
                                             step_comp=2,
                                             return_bic_dict=True)
    generator.save_pretrained(save_dir)

    plot_bics(bic_dict['from'], 'От источника')
    plot_bics(bic_dict['to'], 'К источнику')
    plt.tight_layout()
    plt.savefig(save_dir / 'BICs.png', dpi=300)

    gen_df = generator.sample_packets_like(test_df)
    eval_metrics = evaluate_traffic(gen_df, test_df)
    if args.log_neptune:
        neptune.init(
            NEPTUNE_PROJECT,
            NEPTUNE_API_TOKEN,
        )
        neptune.create_experiment(name='hmm_model', params=vars(args))
        for name, value in eval_metrics.items():
            neptune.log_metric(name, value)
        neptune.stop()
コード例 #25
0
    def run_training(self, n_iterations, opponent):

        if USE_NEPTUNE:
            neptune.create_experiment('Q learning alpha = '.format(self.alpha))
        experience_replay_buffer = None
        for i in range(n_iterations):
            collected_data, there_was_no_action = self.run_one_game_and_collect_data(debug_info=True)
            if not there_was_no_action:
                self.agent.model.train_model(data_frame=collected_data, epochs=1)
                if experience_replay_buffer is None:
                    experience_replay_buffer = collected_data
                else:
                    experience_replay_buffer = experience_replay_buffer.append(collected_data)
            #Run test
            print('Game number = {}'.format(i))
            if i%20 == 0 and i > 0:
                self.agent.model.train_model(data_frame=experience_replay_buffer, epochs=2)

            if i%100 == 0 and i > 0:
                experience_replay_buffer = None
                print('Clearing buffer')

            if i%10 == 0:
                if USE_NEPTUNE:
                    neptune.send_metric('epsilon', x=self.agent.epsilon)
                results = self.arena.run_many_duels('deterministic', [self.agent, opponent], number_of_games=50)
                print(results)
                if USE_NEPTUNE:
                    for pair in results.data.keys():
                        neptune.send_metric(pair[0] + '_wins', x=i, y=results.data[pair].wins)
                        neptune.send_metric(pair[0] + '_reward', x=i, y=results.data[pair].reward)
                        neptune.send_metric(pair[0] + '_victory_points', x=i, y=results.data[pair].victory_points)



        if USE_NEPTUNE:
            neptune.stop()
コード例 #26
0
def main(argv):
    gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param, skip_unknown=True)
    print("Gin parameter bindings:\n{}".format(gin.config_str()))

    use_neptune = "NEPTUNE_API_TOKEN" in os.environ
    exp_id = ''

    if use_neptune:
        neptune.init(project_qualified_name='bbeatrix/curl')
        exp = neptune.create_experiment(params=gin_config_to_dict(gin.config_str()),
                                        name=FLAGS.gin_file[0].split('/')[-1][:-4],
                                        upload_source_files=['./*.py'])
        exp_id = exp.id
    else:
        neptune.init('shared/onboarding', 'ANONYMOUS', backend=neptune.OfflineBackend())

    neptune.log_text('gin_config', gin.config_str())
    neptune.log_artifact(*FLAGS.gin_file, 'gin_config_{}.gin'.format(exp_id))

    exp_manager = ExperimentManager(prefix=exp_id)
    exp_manager.run_experiment()

    neptune.stop()
    print("Fin")
コード例 #27
0
    'r2_test_sklearn', 'charts_sklearn'
}
from_exp_logs = set(exp.get_logs().keys())
assert correct_logs_set == from_exp_logs, '{} - incorrect logs'.format(exp)

# check sklearn parameters
assert set(exp.get_properties().keys()) == set(
    rfr.get_params().keys()), '{} parameters do not match'.format(exp)

# check neptune parameters
assert set(exp.get_parameters().keys()) == set(
    parameters.keys()), '{} parameters do not match'.format(exp)

## Step 5: Stop Neptune experiment after logging summary

neptune.stop()

## Explore results

# Scikit-learn classification

## Step 1: Create and fit gradient boosting classifier

parameters = {
    'n_estimators': 120,
    'learning_rate': 0.12,
    'min_samples_split': 3,
    'min_samples_leaf': 2
}

from sklearn.datasets import load_digits
コード例 #28
0
ファイル: ccm_trainer.py プロジェクト: feipan664/CCM-1
    def train(self):
        if self.config.neptune:
            neptune.init(project_qualified_name="solacex/segmentation-DA")
            neptune.create_experiment(params=self.config,
                                      name=self.config["note"])
        if self.config.resume:
            self.resume()
        else:
            self.round_start = 0

        for r in range(self.round_start, self.config.round):
            torch.manual_seed(1234)
            torch.cuda.manual_seed(1234)
            np.random.seed(1234)
            random.seed(1234)
            self.model = self.model.train()

            self.source_all = get_list(self.config.gta5.data_list)
            self.target_all = get_list(
                self.config.cityscapes.data_list)  #[:100]

            self.cb_thres = self.gene_thres(self.config.cb_prop)
            self.save_pred(r)
            self.plabel_path = osp.join(self.config.plabel, self.config.note,
                                        str(r))

            # Semantic Layout Matching
            self.source_selected = self.semantic_layout_matching(
                r, self.config.src_count)
            # Pixel-wise simialrity matching
            self.source_pixel_selection(r)

            self.optim = torch.optim.SGD(
                self.model.optim_parameters(self.config.learning_rate),
                lr=self.config.learning_rate,
                momentum=self.config.momentum,
                weight_decay=self.config.weight_decay,
            )

            self.loader, _ = dataset.init_concat_dataset(
                self.config,
                plabel_path=self.plabel_path,
                selected=self.source_selected,
                source_plabel_path=self.source_plabel_path,
                target_selected=self.target_all)

            self.config.num_steps = 5000

            for epoch in range(self.config.epochs):
                for i_iter, batch in tqdm(enumerate(self.loader)):
                    cu_step = epoch * len(self.loader) + i_iter
                    self.model = self.model.train()
                    self.losses = edict({})
                    self.optim.zero_grad()
                    adjust_learning_rate(self.optim, cu_step, self.config)
                    self.iter(batch)

                    self.optim.step()
                    if i_iter % self.config.print_freq == 0:
                        self.print_loss(i_iter)
                    if i_iter % self.config.val_freq == 0 and i_iter != 0:
                        miou = self.validate()
                miou = self.validate()
            self.config.learning_rate = self.config.learning_rate / (
                math.sqrt(2))
        if self.config.neptune:
            neptune.stop()
コード例 #29
0
def do_main():
    neptune.init('ods/wheat')
    # Create experiment with defined parameters
    neptune.create_experiment(name=model_name,
                              params=PARAMS,
                              tags=[experiment_name, experiment_tag],
                              upload_source_files=[os.path.basename(__file__)])

    neptune.append_tags(f'fold_{fold}')

    device = torch.device(f'cuda:{gpu_number}') if torch.cuda.is_available(
    ) else torch.device('cpu')
    print(device)

    print(len(train_boxes_df))
    print(len(train_images_df))

    # Leave only > 0
    print('Leave only train images with boxes (validation)')
    with_boxes_filter = train_images_df[image_id_column].isin(
        train_boxes_df[image_id_column].unique())

    negative_images = enumerate_images(DIR_NEGATIVE)
    negative_images = [(negative_prefix + filename[:-4])
                       for filename in negative_images]
    negative_images.sort()
    # take first 100 now...
    negative_images = negative_images[:100]
    """
    spike_images = enumerate_images(DIR_SPIKE)
    spike_images = [(spike_dataset_prefix + filename[:-4]) for filename in spike_images]
    spike_images.sort()
    assert len(spike_images) > 0
    """

    config = get_efficientdet_config('tf_efficientdet_d5')
    net = EfficientDet(config, pretrained_backbone=False)
    load_weights(net,
                 '../timm-efficientdet-pytorch/efficientdet_d5-ef44aea8.pth')

    config.num_classes = 1
    config.image_size = our_image_size
    net.class_net = HeadNet(config,
                            num_outputs=config.num_classes,
                            norm_kwargs=dict(eps=.001, momentum=.01))

    model_train = DetBenchTrain(net, config)
    model_eval = DetBenchEval(net, config)

    manager = ModelManager(model_train, model_eval, device)

    pretrained_weights_file = 'pretrained.pth'

    images_val = train_images_df.loc[(train_images_df[fold_column] == fold)
                                     & with_boxes_filter,
                                     image_id_column].values
    images_train = train_images_df.loc[(train_images_df[fold_column] != fold),
                                       image_id_column].values

    #images_train = list(images_train) + list(negative_images) + list(spike_images)
    images_train = list(images_train) + list(negative_images)
    print(len(images_train), len(images_val))

    train_dataset = WheatDataset(images_train,
                                 DIR_TRAIN,
                                 train_boxes_df,
                                 transforms=get_train_transform(),
                                 is_test=False)
    valid_dataset = WheatDataset(images_val,
                                 DIR_TRAIN,
                                 train_boxes_df,
                                 transforms=get_valid_transform(),
                                 is_test=True)

    train_data_loader = DataLoader(train_dataset,
                                   batch_size=train_batch_size,
                                   shuffle=True,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn,
                                   drop_last=True)

    valid_data_loader = DataLoader(valid_dataset,
                                   batch_size=inf_batch_size,
                                   shuffle=False,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn)

    weights_file = f'{experiment_name}.pth'
    if os.path.exists(pretrained_weights_file):
        # continue training
        print('Continue training, loading weights: ' + pretrained_weights_file)
        load_weights(net, pretrained_weights_file)

    manager.run_train(train_data_loader,
                      valid_data_loader,
                      n_epoches=n_epochs,
                      weights_file=weights_file,
                      factor=factor,
                      start_lr=start_lr,
                      min_lr=min_lr,
                      lr_patience=lr_patience,
                      overall_patience=overall_patience,
                      loss_delta=loss_delta)

    # add tags
    neptune.log_text('save checkpoints as', weights_file[:-4])
    neptune.stop()
コード例 #30
0
ファイル: fubar_allfreeze.py プロジェクト: kjczarne/fubar
w_filename = filepattern('weights_allfreeze_', '.h5')
model.save_weights(w_filename)
# ---------------------------------------------------------------------------------------------------------------------

# -------------------------------------------------------------
# VISUALIZE BASE ARCHITECTURE TO DECIDE WHICH LAYERS TO FREEZE |
# -------------------------------------------------------------
# PUT BREAKPOINT HERE!!!!!!!!!!!!!!!
print(list(show_architecture(base)))
# INSERT DEBUGGER BREAKPOINT DIRECTLY ON THE NEXT COMMAND TO VIEW THE ARCHITECTURE AT RUNTIME
# ---------------------------------------------------------------------------------------------------------------------

# ------------------------
# STOP NEPTUNE EXPERIMENT |
# ------------------------
npt.stop()

# ======================================================================================================================
# ======================================================================================================================

# # --------------
# # FREEZE LAYERS |
# # --------------
# # for now I just pass a slice of layers used in Keras documentation
# frosty(model.layers[:249], frost=True)
# frosty(model.layers[249:], frost=False)
#
# # -----------------------------
# # OPTIONAL: INITIALIZE NEPTUNE |
# # -----------------------------
# npt.init(api_token=npt_token,