예제 #1
0
def _new_exp(name, supress_output):
    if os.path.exists(API_KEY_JSON_PATH):
        exp = Experiment(name,
                         capture_io=False,
                         api_key_getter=_api_key_getter)
    else:
        exp = Experiment(name, capture_io=False)

    # SUPER-hacky, but it's work (needed to supress hd output)
    if supress_output:
        exp._hd.out_buf.write = lambda _: _

    return exp
예제 #2
0
def train_multidec(args):
    print("Training multidec")
    device = torch.device(args.gpu)
    print("Loading dataset...")
    full_dataset = load_multi_csv_data(args, CONFIG)
    print("Loading dataset completed")
    # full_loader = DataLoader(full_dataset, batch_size=args.batch_size, shuffle=False)

    image_encoder = MDEC_encoder(input_dim=args.input_dim, z_dim=args.latent_dim, n_clusters=args.n_clusters,
                                 encodeLayer=[500, 500, 2000], activation="relu", dropout=0)
    image_encoder.load_model(os.path.join(CONFIG.CHECKPOINT_PATH, "image_sdae_" + str(args.latent_dim)) + ".pt")
    text_encoder = MDEC_encoder(input_dim=args.input_dim, z_dim=args.latent_dim, n_clusters=args.n_clusters,
                                encodeLayer=[500, 500, 2000], activation="relu", dropout=0)
    text_encoder.load_model(os.path.join(CONFIG.CHECKPOINT_PATH, "text_sdae_" + str(args.latent_dim)) + ".pt")
    mdec = MultiDEC(device=device, image_encoder=image_encoder, text_encoder=text_encoder, n_clusters=args.n_clusters)
    exp = Experiment("MDEC " + str(args.latent_dim) + '_' + str(args.n_clusters), capture_io=True)
    print(mdec)

    for arg, value in vars(args).items():
        exp.param(arg, value)
    try:
        mdec.fit(full_dataset, lr=args.lr, batch_size=args.batch_size, num_epochs=args.epochs,
                 save_path=CONFIG.CHECKPOINT_PATH)
        print("Finish!!!")

    finally:
        exp.end()
예제 #3
0
def pretrain_ddec(args):
    print("Pretraining...")

    print("Loading dataset...")
    with open(os.path.join(args.text_embedding_dir, 'word_embedding.p'), "rb") as f:
        embedding_model = cPickle.load(f)
    with open(os.path.join(args.text_embedding_dir, 'word_idx.json'), "r", encoding='utf-8') as f:
        word_idx = json.load(f)
    train_dataset, test_dataset = load_pretrain_data(args.image_dir, word_idx[1], args, CONFIG)
    print("Loading dataset completed")

    dualnet = DualNet(pretrained_embedding=embedding_model, text_features=args.text_features, z_dim=args.z_dim, n_classes=args.n_classes)
    if args.resume:
        print("loading model...")
        dualnet.load_model("/4TBSSD/CHECKPOINT/pretrain_" + str(args.z_dim) + "_0.pt")
    exp = Experiment("Dualnet_pretrain_" + str(args.z_dim), capture_io=True)
    print(dualnet)

    for arg, value in vars(args).items():
        exp.param(arg, value)
    try:
        dualnet.fit(train_dataset,  test_dataset, args=args,
                 save_path="/4TBSSD/CHECKPOINT/pretrain_" + str(args.z_dim) + "_0.pt")
        print("Finish!!!")

    finally:
        exp.end()
예제 #4
0
    def __init__(self, name='deathbot', load_weights=False, training=False, batch_size=100, lr=1e-3, location=None):
        self.session = tf.Session()
        self.name = name

        if training: 
            from hyperdash import Experiment
            self.exp = Experiment(name)

        if name in MODELS.keys(): self.model = MODELS[name]() if not training else MODELS[name](self.exp)
        adam = Adam(lr=lr)
        nadam = Nadam(lr=lr)
        #rms = RMSprop(lr=lr)
        #sgd = SGD(lr=lr)
        self.optimizer = adam if name == "evo" else nadam
        loss = ["binary_crossentropy", "categorical_crossentropy", "poisson"]
        self.model.compile(optimizer=self.optimizer, loss=loss[1],
                           metrics=["acc"])

        self.callbacks = []
        if training:
            self.exp.param("lr", lr)
            reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
                                          patience=4, min_lr=1e-4, verbose=1)
            tb = TensorBoard('./models/logs/{}'.format(name), write_graph=True)
            cp = ModelCheckpoint(filepath='./models/weights-{}.hdf5'.format(name), monitor='val_acc', verbose=1,
                                 save_best_only=True)
            hd = Hyperdash(self.exp, self.model)
            es = EarlyStopping('val_acc', patience=5, verbose=1)
            self.callbacks = [cp, tb, hd, reduce_lr, es]

        if load_weights:
            #print(os.listdir(os.getcwd()))
            self.model.load_weights('./final/weights-{}.hdf5'.format(name))
            if training: print('Weights Loaded...')
예제 #5
0
def prepare_experiment(exp_name, model_dir='models'):
    exp = Experiment(exp_name)
    model_dir = os.path.join(model_dir, exp_name, get_jstdate_string())
    checkpoint_filename = os.path.join(model_dir, 'checkpoint_{epoch:02d}-{val_loss:.2f}.hdf5')
    make_dirs(model_dir)
    hd_callback = Hyperdash(['val_acc', 'val_loss'], exp)
    checkpoint = keras.callbacks.ModelCheckpoint(
        checkpoint_filename, monitor='val_loss', verbose=1,
        save_best_only=True, save_weights_only=False, mode='auto', period=1)
    return exp, hd_callback, checkpoint
예제 #6
0
def train_reconstruction_all(args):
    device = torch.device(args.gpu)

    df_input_data = pd.read_csv(os.path.join(
        CONFIG.CSV_PATH, args.prefix + "_" + args.target_csv),
                                index_col=0,
                                encoding='utf-8-sig')
    exp = Experiment(args.target_modal + " SDAE " + str(args.latent_dim),
                     capture_io=True)
    try:
        for arg, value in vars(args).items():
            exp.param(arg, value)
        print("Loading dataset...")

        train_dataset, val_dataset = load_autoencoder_data(
            df_input_data, CONFIG)
        print("Loading dataset completed")
        train_loader, val_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=args.shuffle), \
                                   DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)

        sdae = StackedDAE(input_dim=args.input_dim,
                          z_dim=args.latent_dim,
                          binary=False,
                          encodeLayer=[500, 500, 2000],
                          decodeLayer=[2000, 500, 500],
                          activation="relu",
                          dropout=args.dropout,
                          device=device)
        if args.resume:
            print("resume from checkpoint")
            sdae.load_model(
                os.path.join(
                    CONFIG.CHECKPOINT_PATH, args.prefix + "_" +
                    args.target_modal + "_" + args.target_dataset + "_sdae_" +
                    str(args.latent_dim) + "_all.pt"))
        else:
            sdae.pretrain(train_loader,
                          val_loader,
                          lr=args.lr,
                          batch_size=args.batch_size,
                          num_epochs=args.pretrain_epochs,
                          corrupt=0.2,
                          loss_type="mse")
        sdae.fit(train_loader,
                 val_loader,
                 lr=args.lr,
                 num_epochs=args.epochs,
                 corrupt=0.2,
                 loss_type="mse",
                 save_path=os.path.join(
                     CONFIG.CHECKPOINT_PATH, args.prefix + "_" +
                     args.target_modal + "_" + args.target_dataset + "_sdae_" +
                     str(args.latent_dim) + "_all.pt"))
    finally:
        exp.end()
def train_bayes(params):
    """
    Wrapper around train function to serve as objective function for Gaussian
    optimization in scikit-optimize routine gp_minimize.

    Arguments:
    ----------
        params: list, shape=[nb_layers + 2,]
        List of search space dimensions. Entries have to be tuples 
        (lower_bound, upper_bound) for Reals or Integers.

    Returns:
    --------
        tbd

    """
    # Create Hyperdash hd_experiment
    hd_exp = Experiment(project_name)

    # Translate params into format understood by train function
    # n_layer = 4
    # layer_sizes = hd_exp.param('layer_sizes', (2**np.array(params[:n_layer])).tolist())
    # learning_rate = hd_exp.param('learning rate', 10**params[n_layer])
    # mini_batch_size = hd_exp.param('mini batch size', int(2**params[n_layer + 1]))
    # pkeep = hd_exp.param('dropout prob', 1)
    # hyper_params = [layer_sizes, learning_rate, mini_batch_size, pkeep]
    # hyper_param_str = make_hyper_param_str(hyper_params)

    layer_sizes = [4096] * 4
    learning_rate = hd_exp.param('learning rate', 10**params[0])
    mini_batch_size = hd_exp.param('mini batch size', int(2**params[1]))
    pkeep = hd_exp.param('dropout prob', 1)
    hyper_params = [layer_sizes, learning_rate, mini_batch_size, pkeep]
    hyper_param_str = make_hyper_param_str(hyper_params)

    # Call train function
    tic = time.time()
    logger.info('Start training for ' + hyper_param_str)
    log_df, best_error = train(train_tuple, validation_tuple, hyper_params,
                               nb_epochs, random_seed, hd_exp, project_dir)
    elapsed_time = time.time() - tic
    logger.info('Finished training in {} s.'.format(elapsed_time))

    # Writing Pandas log file to csv file on disk.
    logger.info('Writing pandas DF log to disk.')
    log_df.to_csv(project_dir + '/' + hyper_param_str + '/data_df.csv')

    # Finish Hyperdash Experiment
    hd_exp.end()

    return best_error
예제 #8
0
    def test_experiment_keras_callback(self):
        with patch("sys.stdout", new=StringIO()) as faked_out:
            exp = Experiment("MNIST")
            keras_cb = exp.callbacks.keras
            keras_cb.on_epoch_end(0, {"val_acc": 1, "val_loss": 2})
            # Sleep 1 second due to client sampling
            time.sleep(1)
            keras_cb.on_epoch_end(1, {"val_acc": 3, "val_loss": 4})
            exp.end()

        # Test metrics match what is expected
        metrics_messages = []
        for msg in server_sdk_messages:
            payload = msg["payload"]
            if "name" in payload:
                metrics_messages.append(payload)
        expect_metrics = [
            {
                "is_internal": False,
                "name": "val_acc",
                "value": 1
            },
            {
                "is_internal": False,
                "name": "val_loss",
                "value": 2
            },
            {
                "is_internal": False,
                "name": "val_acc",
                "value": 3
            },
            {
                "is_internal": False,
                "name": "val_loss",
                "value": 4
            },
        ]
        assert len(expect_metrics) == len(metrics_messages)
        for i, message in enumerate(metrics_messages):
            assert message["is_internal"] == expect_metrics[i]["is_internal"]
            assert message["name"] == expect_metrics[i]["name"]
            assert message["value"] == expect_metrics[i]["value"]

        captured_out = faked_out.getvalue()
        assert "error" not in captured_out
예제 #9
0
def main():
    """Start training."""
    exp = Experiment("diffrend test")

    # Parse args
    opt = Parameters().parse()

    for key, val in opt.__dict__.items():
        exp.param(key, val)

    # Create dataset loader
    dataset_load = Dataset_load(opt)

    # Create GAN
    gan = GAN(opt, dataset_load, exp)

    # Train gan
    gan.train()
예제 #10
0
    def __init__(self, output_dir, data_loader, n_words, ixtoword):
        if cfg.TRAIN.FLAG:
            self.model_dir = os.path.join(output_dir, 'Model')
            self.image_dir = os.path.join(output_dir, 'Image')
            mkdir_p(self.model_dir)
            mkdir_p(self.image_dir)

        # torch.cuda.set_device(cfg.GPU_ID)
        cudnn.benchmark = True

        self.batch_size = cfg.TRAIN.BATCH_SIZE
        self.max_epoch = cfg.TRAIN.MAX_EPOCH
        self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL

        self.n_words = n_words
        self.ixtoword = ixtoword
        self.data_loader = data_loader
        self.num_batches = len(self.data_loader)

        self.start_epoch = 0
        self.exp = Experiment("t2s", capture_io=False, api_key_getter=get_api_key_from_env)
예제 #11
0
파일: trainer.py 프로젝트: mzntaka0/fire
 def __init__(self, **kwargs):
     self.data_augmentation = kwargs['data_augmentation']
     self.epoch = kwargs['epoch']
     self.gpu = (kwargs['gpu'] >= 0)
     self.opt = kwargs['opt']
     self.seed = kwargs['seed']
     self.train = kwargs['train']
     self.val = kwargs['val']
     self.batchsize = kwargs['batchsize']
     self.out = kwargs['out']
     self.resume = kwargs['resume']
     self.resume_model = kwargs['resume_model']
     self.resume_opt = kwargs['resume_opt']
     self.hyperdash = kwargs['hyperdash']
     if self.hyperdash:
         self.experiment = Experiment(self.hyperdash)
         for key, val in kwargs.items():
             self.experiment.param(key, val)
     # validate arguments.
     self._validate_arguments()
     self.lowest_loss = 0
     self.device = torch.device('cuda' if kwargs['gpu'] >= 0 else 'cpu')
     #self.experiment.log_multiple_params(kwargs)
     self.dataloader = torch.utils.data.DataLoader
 def on_train_begin(self, logs=None):
     self.exp = Experiment("Deep Weather")
예제 #13
0
def train_reconstruction(train_loader, test_loader, encoder, decoder, args):
    exp = Experiment("Reconstruction Training")
    try:
        lr = args.lr
        encoder_opt = torch.optim.Adam(encoder.parameters(), lr=lr)
        decoder_opt = torch.optim.Adam(decoder.parameters(), lr=lr)

        encoder.train()
        decoder.train()
        steps = 0
        for epoch in range(1, args.epochs+1):
            print("=======Epoch========")
            print(epoch)
            for batch in train_loader:
                feature = Variable(batch)
                if args.use_cuda:
                    encoder.cuda()
                    decoder.cuda()
                    feature = feature.cuda()

                encoder_opt.zero_grad()
                decoder_opt.zero_grad()

                h = encoder(feature)
                prob = decoder(h)
                reconstruction_loss = compute_cross_entropy(prob, feature)
                reconstruction_loss.backward()
                encoder_opt.step()
                decoder_opt.step()

                steps += 1
                print("Epoch: {}".format(epoch))
                print("Steps: {}".format(steps))
                print("Loss: {}".format(reconstruction_loss.data[0] / args.sentence_len))
                exp.metric("Loss", reconstruction_loss.data[0] / args.sentence_len)
                # check reconstructed sentence
                if steps % args.log_interval == 0:
                    print("Test!!")
                    input_data = feature[0]
                    single_data = prob[0]
                    _, predict_index = torch.max(single_data, 1)
                    input_sentence = util.transform_id2word(input_data.data, train_loader.dataset.index2word, lang="en")
                    predict_sentence = util.transform_id2word(predict_index.data, train_loader.dataset.index2word, lang="en")
                    print("Input Sentence:")
                    print(input_sentence)
                    print("Output Sentence:")
                    print(predict_sentence)

            if steps % args.test_interval == 0:
                eval_reconstruction(encoder, decoder, test_loader, args)


            if epoch % args.lr_decay_interval == 0:
                # decrease learning rate
                lr = lr / 5
                encoder_opt = torch.optim.Adam(encoder.parameters(), lr=lr)
                decoder_opt = torch.optim.Adam(decoder.parameters(), lr=lr)
                encoder.train()
                decoder.train()

            if epoch % args.save_interval == 0:
                util.save_models(encoder, args.save_dir, "encoder", steps)
                util.save_models(decoder, args.save_dir, "decoder", steps)

        # finalization
        # save vocabulary
        with open("word2index", "wb") as w2i, open("index2word", "wb") as i2w:
            pickle.dump(train_loader.dataset.word2index, w2i)
            pickle.dump(train_loader.dataset.index2word, i2w)

        # save models
        util.save_models(encoder, args.save_dir, "encoder", "final")
        util.save_models(decoder, args.save_dir, "decoder", "final")

        print("Finish!!!")
    finally:
        exp.end()
예제 #14
0
def train_reconstruction(args):
    device = torch.device(args.gpu)
    print("Loading embedding model...")
    with open(
            os.path.join(CONFIG.DATASET_PATH, args.target_dataset,
                         'word_embedding.p'), "rb") as f:
        embedding_model = cPickle.load(f)
    with open(os.path.join(CONFIG.DATASET_PATH, args.target_dataset,
                           'word_idx.json'),
              "r",
              encoding='utf-8') as f:
        word_idx = json.load(f)
    print("Loading embedding model completed")
    print("Loading dataset...")
    train_dataset, val_dataset = load_text_data(args,
                                                CONFIG,
                                                word2idx=word_idx[1])
    print("Loading dataset completed")
    train_loader, val_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=args.shuffle),\
             DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)

    # t1 = max_sentence_len + 2 * (args.filter_shape - 1)
    t1 = CONFIG.MAX_SENTENCE_LEN
    t2 = int(math.floor(
        (t1 - args.filter_shape) / 2) + 1)  # "2" means stride size
    t3 = int(math.floor((t2 - args.filter_shape) / 2) + 1)
    args.t3 = t3
    embedding = nn.Embedding.from_pretrained(
        torch.FloatTensor(embedding_model))
    text_encoder = text_model.ConvolutionEncoder(embedding, t3,
                                                 args.filter_size,
                                                 args.filter_shape,
                                                 args.latent_size)
    text_decoder = text_model.DeconvolutionDecoder(embedding, args.tau, t3,
                                                   args.filter_size,
                                                   args.filter_shape,
                                                   args.latent_size, device)
    if args.resume:
        print("Restart from checkpoint")
        checkpoint = torch.load(os.path.join(CONFIG.CHECKPOINT_PATH,
                                             args.resume),
                                map_location=lambda storage, loc: storage)
        start_epoch = checkpoint['epoch']
        text_encoder.load_state_dict(checkpoint['text_encoder'])
        text_decoder.load_state_dict(checkpoint['text_decoder'])
    else:
        print("Start from initial")
        start_epoch = 0

    text_autoencoder = text_model.TextAutoencoder(text_encoder, text_decoder)
    criterion = nn.NLLLoss().to(device)
    text_autoencoder.to(device)

    optimizer = AdamW(text_autoencoder.parameters(),
                      lr=1.,
                      weight_decay=args.weight_decay,
                      amsgrad=True)
    step_size = args.half_cycle_interval * len(train_loader)
    clr = cyclical_lr(step_size,
                      min_lr=args.lr,
                      max_lr=args.lr * args.lr_factor)
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, [clr])
    if args.resume:
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
    exp = Experiment("Text autoencoder " + str(args.latent_size),
                     capture_io=False)

    for arg, value in vars(args).items():
        exp.param(arg, value)
    try:
        text_autoencoder.train()

        for epoch in range(start_epoch, args.epochs):
            print("Epoch: {}".format(epoch))
            for steps, batch in enumerate(train_loader):
                torch.cuda.empty_cache()
                feature = Variable(batch).to(device)
                optimizer.zero_grad()
                prob = text_autoencoder(feature)
                loss = criterion(prob.transpose(1, 2), feature)
                loss.backward()
                optimizer.step()
                scheduler.step()

                if (steps * args.batch_size) % args.log_interval == 0:
                    input_data = feature[0]
                    single_data = prob[0]
                    _, predict_index = torch.max(single_data, 1)
                    input_sentence = util.transform_idx2word(
                        input_data.detach().cpu().numpy(),
                        idx2word=word_idx[0])
                    predict_sentence = util.transform_idx2word(
                        predict_index.detach().cpu().numpy(),
                        idx2word=word_idx[0])
                    print("Epoch: {} at {} lr: {}".format(
                        epoch, str(datetime.datetime.now()),
                        str(scheduler.get_lr())))
                    print("Steps: {}".format(steps))
                    print("Loss: {}".format(loss.detach().item()))
                    print("Input Sentence:")
                    print(input_sentence)
                    print("Output Sentence:")
                    print(predict_sentence)
                    del input_data, single_data, _, predict_index
                del feature, prob, loss

            exp.log("\nEpoch: {} at {} lr: {}".format(
                epoch, str(datetime.datetime.now()), str(scheduler.get_lr())))
            _avg_loss, _rouge_1, _rouge_2 = eval_reconstruction_with_rouge(
                text_autoencoder, word_idx[0], criterion, val_loader, device)
            exp.log("\nEvaluation - loss: {}  Rouge1: {} Rouge2: {}".format(
                _avg_loss, _rouge_1, _rouge_2))

            util.save_models(
                {
                    'epoch': epoch + 1,
                    'text_encoder': text_encoder.state_dict(),
                    'text_decoder': text_decoder.state_dict(),
                    'avg_loss': _avg_loss,
                    'Rouge1:': _rouge_1,
                    'Rouge2': _rouge_2,
                    'optimizer': optimizer.state_dict(),
                    'scheduler': scheduler.state_dict()
                }, CONFIG.CHECKPOINT_PATH,
                "text_autoencoder_" + str(args.latent_size))

        print("Finish!!!")

    finally:
        exp.end()
예제 #15
0
    def __init__(self):
        warnings.filterwarnings('ignore')
        self.start_time = time()

        self.args = get_args()
        if self.args.checkpoint_dir_name:
            dir_name = self.args.checkpoint_dir_name
        else:
            dir_name = datetime.datetime.now().strftime('%y%m%d%H%M%S')
        self.path_to_dir = Path(__file__).resolve().parents[1]
        self.path_to_dir = os.path.join(self.path_to_dir, *['log', dir_name])
        os.makedirs(self.path_to_dir, exist_ok=True)

        # tensorboard
        path_to_tensorboard = os.path.join(self.path_to_dir, 'tensorboard')
        os.makedirs(path_to_tensorboard, exist_ok=True)
        self.writer = SummaryWriter(path_to_tensorboard)

        # model saving
        os.makedirs(os.path.join(self.path_to_dir, 'model'), exist_ok=True)
        path_to_model = os.path.join(self.path_to_dir, *['model', 'model.tar'])

        # csv
        os.makedirs(os.path.join(self.path_to_dir, 'csv'), exist_ok=True)
        self.path_to_results_csv = os.path.join(self.path_to_dir,
                                                *['csv', 'results.csv'])
        path_to_args_csv = os.path.join(self.path_to_dir, *['csv', 'args.csv'])
        if not self.args.checkpoint_dir_name:
            with open(path_to_args_csv, 'a') as f:
                args_dict = vars(self.args)
                param_writer = csv.DictWriter(f, list(args_dict.keys()))
                param_writer.writeheader()
                param_writer.writerow(args_dict)

        # logging by hyperdash
        if not self.args.no_hyperdash:
            from hyperdash import Experiment
            self.exp = Experiment('Generation task on ' + self.args.dataset +
                                  ' dataset with GAN')
            for key in vars(self.args).keys():
                exec("self.args.%s = self.exp.param('%s', self.args.%s)" %
                     (key, key, key))
        else:
            self.exp = None

        self.dataloader = get_dataloader(self.args.dataset,
                                         self.args.image_size,
                                         self.args.batch_size)
        sample_data = self.dataloader.__iter__().__next__()[0]
        image_channels = sample_data.shape[1]

        z = torch.randn(self.args.batch_size, self.args.z_dim)
        self.sample_z = z.view(z.size(0), z.size(1), 1, 1)

        self.Generator = Generator(self.args.z_dim, image_channels,
                                   self.args.image_size)
        self.Generator_optimizer = optim.Adam(self.Generator.parameters(),
                                              lr=self.args.lr_Generator,
                                              betas=(self.args.beta1,
                                                     self.args.beta2))
        self.writer.add_graph(self.Generator, self.sample_z)
        self.Generator.to(self.args.device)

        self.Discriminator = Discriminator(image_channels,
                                           self.args.image_size)
        self.Discriminator_optimizer = optim.Adam(
            self.Discriminator.parameters(),
            lr=self.args.lr_Discriminator,
            betas=(self.args.beta1, self.args.beta2))
        self.writer.add_graph(self.Discriminator, sample_data)
        self.Discriminator.to(self.args.device)

        self.BCELoss = nn.BCELoss()

        self.sample_z = self.sample_z.to(self.args.device)
예제 #16
0
        checkpoint = torch.load(MODEL_PATH_BEST)
        net.load_state_dict(checkpoint['state_dict'])
        return "TRAINING AVG LOSS: {}\n" \
               "TRAINING AVG DIFF: {}".format(
            checkpoint["epoch_avg_loss"], checkpoint["epoch_avg_diff"])
    else:
        if optional:
            pass  # model loading was optional, so nothing to do
        else:
            #shit, no model
            raise Exception("model couldn't be found:", MODEL_PATH_BEST)


loss_function = nn.MSELoss()
if hyperdash_support:
    exp = Experiment("simple lstm - fl4")
    exp.param("layers", LSTM_LAYERS)
    exp.param("nodes", HIDDEN_NODES)

if TRAIN:
    optimizer = optim.Adam(net.parameters())
    if CONTINUE:
        old_model_string = loadModel(optional=True)
        print(old_model_string)
else:
    old_model_string = loadModel(optional=False)

loss_history = [9999999
                ]  # very high loss because loss can't be empty for min()
# h0 = Variable(torch.randn(, 3, 20))
# c0 = Variable(torch.randn(2, 3, 20))
예제 #17
0
파일: Model.py 프로젝트: gundamMC/animius
 def init_hyperdash(self, name):
     if name is not None:
         from hyperdash import Experiment
         self.hyperdash = Experiment(name)
예제 #18
0
NUM_STEPS = 100  # episode steps
NUM_EPISODES = 1000
HIDDEN_SIZE = 128
CHKP_FREQ = 100  # model saving freq
WIDTH = 64
HEIGHT = 64
REWARD_BUF = 1000

npa = np.array

from hyperdash import Experiment

exp_name = "3DR-18-constant-shape-conv"
exp_dir = exp_name + "-" + strftime("%Y%m%d%H%M%S")

exp = Experiment(exp_name)


class Policy(nn.Module):
    def __init__(self, hidden_size, num_inputs, num_outputs):
        super(Policy, self).__init__()

        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(3, 32, (3, 3), (1, 1), (1, 1))
        self.conv2 = nn.Conv2d(32, 64, (3, 3), (2, 2), (1, 1))
        self.linear3 = nn.Linear(64 * 32 * 32, num_outputs)
        self.linear3_ = nn.Linear(64 * 32 * 32, num_outputs)

    def forward(self, inputs):
        x = self.relu(self.conv1(inputs))
        x = self.relu(self.conv2(x))
예제 #19
0
# digits.py
from sklearn import svm, datasets
from hyperdash import Experiment

# Preprocess data
digits = datasets.load_digits(100)
test_cases = 50
X_train, y_train = digits.data[:-test_cases], digits.target[:-test_cases]
X_test, y_test = digits.data[-test_cases:], digits.target[-test_cases:]

# Create an experiment with a model name, then autostart
exp = Experiment("Digits Classifier")
# Record the value of hyperparameter gamma for this experiment
gamma = exp.param("gamma", 0.1)
# Param can record any basic type (Number, Boolean, String)

classifer = svm.SVC(gamma=gamma)
classifer.fit(X_train, y_train)

# Record a numerical performance metric
exp.metric("accuracy", classifer.score(X_test, y_test))

# Cleanup and mark that the experiment successfully completed
exp.end()
예제 #20
0
def run_pusher3dof(args, sim=True, vanilla=False):
    try:
        from hyperdash import Experiment

        hyperdash_support = True
    except:
        hyperdash_support = False

    env = NormalizedEnv(gym.make(args.env))

    torques = [1.0] * 3  # if real
    colored = False

    if sim:
        torques = [args.t0, args.t1, args.t2]
        colored = True

    if not vanilla:
        env.env._init(
            torques=torques,
            colored=colored
        )

    if args.seed > 0:
        np.random.seed(args.seed)
        env.seed(args.seed)

    nb_states = env.observation_space.shape[0]
    nb_actions = env.action_space.shape[0]

    agent = DDPG(nb_states, nb_actions, args)
    evaluate = Evaluator(
        args.validate_episodes,
        args.validate_steps,
        args.output,
        max_episode_length=args.max_episode_length
    )

    exp = None

    if args.mode == 'train':
        if hyperdash_support:
            prefix = "real"
            if sim: prefix = "sim"

            exp = Experiment("s2r-pusher3dof-ddpg-{}".format(prefix))
            import socket

            exp.param("host", socket.gethostname())
            exp.param("type", prefix)  # sim or real
            exp.param("vanilla", vanilla)  # vanilla or not
            exp.param("torques", torques)
            exp.param("folder", args.output)

            for arg in ["env", "max_episode_length", "train_iter", "seed", "resume"]:
                arg_val = getattr(args, arg)
                exp.param(arg, arg_val)

        train(args, args.train_iter, agent, env, evaluate,
              args.validate_steps, args.output,
              max_episode_length=args.max_episode_length, debug=args.debug, exp=exp)

        # when done
        exp.end()

    elif args.mode == 'test':
        test(args.validate_episodes, agent, env, evaluate, args.resume,
             visualize=args.vis, debug=args.debug, load_best=args.best)

    else:
        raise RuntimeError('undefined mode {}'.format(args.mode))
예제 #21
0
        plt.ylabel('accuracy')
        plt.plot(self.train_acc, label='train')
        plt.plot(self.val_acc, label='validation')
        plt.legend()
        plt.savefig('history.png')
        plt.pause(0.1)


# cb_my = LossHistory()

# ref: https://qiita.com/shoji9x9/items/896204303a7a56321d4c
from keras.callbacks import Callback
from hyperdash import Experiment


class Hyperdash(Callback):
    def __init__(self, entries, exp):
        super(Hyperdash, self).__init__()
        self.entries = entries
        self.exp = exp

    def on_epoch_end(self, epoch, logs=None):
        for entry in self.entries:
            log = logs.get(entry)
            if log is not None:
                self.exp.metric(entry, log)


exp = Experiment("unet3-1")
hd_callback = Hyperdash(["val_loss", "loss", "val_accuracy", "accuracy"], exp)
예제 #22
0
def demo(args=None):
    from_file = get_api_key_from_file()
    from_env = get_api_key_from_env()
    api_key = from_env or from_file

    if not api_key:
        print("""
            `hyperdash demo` requires a Hyperdash API key. Try setting your API key in the
            HYPERDASH_API_KEY environment variable, or in a hyperdash.json file in the local
            directory or your user's home directory with the following format:

            {
                "api_key": "<YOUR_API_KEY>"
            }
        """)
        return

    print("""
Running the following program:

    from hyperdash import Experiment
    exp = Experiment("Dogs vs. Cats")

    # Parameters
    estimators = exp.param("Estimators", 500)
    epochs = exp.param("Epochs", 5)
    batch = exp.param("Batch Size", 64)

    for epoch in xrange(1, epochs + 1):
        accuracy = 1. - 1./epoch
        loss = float(epochs - epoch)/epochs
        print("Training model (epoch {})".format(epoch))
        time.sleep(1)

        # Metrics
        exp.metric("Accuracy", accuracy)
        exp.metric("Loss", loss)

    exp.end()
    """)
    from hyperdash import Experiment
    exp = Experiment("Dogs vs. Cats")

    # Parameters
    estimators = exp.param("Estimators", 500)
    epochs = exp.param("Epochs", 5)
    batch = exp.param("Batch Size", 64)

    for epoch in xrange(epochs):
        print("Training model (epoch {})".format(epoch))

        accuracy = 1. - 1. / (epoch + 1)
        loss = float(epochs - epoch) / (epochs + 1)

        # Metrics
        exp.metric("Accuracy", accuracy)
        exp.metric("Loss", loss)

        time.sleep(1)

    exp.end()
예제 #23
0
dataset = Dataset(args)
loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True )

def safty_makedirs(path):
    if not os.path.isdir(path):
        os.makedirs(path)

safty_makedirs("./images")
safty_makedirs("./models")

ca.train()
encoder.train()
decoder.train()

exp = Experiment("t2s_vae", capture_io=False, api_key_getter=get_api_key_from_env)

for i in range(args.epochs):
    print(f"\nEpoch: {i+1:d}")
    for idx,  shape in enumerate(loader):
        start_t = time.time()

        inputs = shape.to(device)
        inputs = inputs.view(-1, 4, args.cube_len, args.cube_len, args.cube_len)


        opt_enc.zero_grad()
        opt_ca.zero_grad()
        opt_dec.zero_grad()

예제 #24
0
def train_multidec(args):
    print("Training weight calc")
    device = torch.device(args.gpu)
    df_image_data = pd.read_csv(os.path.join(
        CONFIG.CSV_PATH, args.prefix_csv + "_pca_normalized_image_encoded_" +
        args.target_dataset + ".csv"),
                                index_col=0,
                                encoding='utf-8-sig')
    df_text_data = pd.read_csv(os.path.join(
        CONFIG.CSV_PATH,
        args.prefix_csv + "_text_doc2vec_" + args.target_dataset + ".csv"),
                               index_col=0,
                               encoding='utf-8-sig')

    df_label = pd.read_csv(os.path.join(CONFIG.CSV_PATH, args.label_csv),
                           index_col=0,
                           encoding='utf-8-sig')
    label_array = np.array(df_label['category'])
    n_clusters = np.max(label_array) + 1
    #n_clusters = args.n_clusters

    exp = Experiment(args.prefix_csv + "_ODEC", capture_io=True)

    for arg, value in vars(args).items():
        exp.param(arg, value)
    try:
        acc_list = []
        nmi_list = []
        f_1_list = []
        for fold_idx in range(args.start_fold, args.fold):
            print("Current fold: ", fold_idx)
            df_train = pd.read_csv(os.path.join(
                CONFIG.CSV_PATH, "train_" + str(fold_idx) + "_" +
                args.target_dataset + "_label.csv"),
                                   index_col=0,
                                   encoding='utf-8-sig')
            if args.sampled_n is not None:
                df_train = df_train.sample(n=args.sampled_n, random_state=42)
            df_test = pd.read_csv(os.path.join(
                CONFIG.CSV_PATH, "test_" + str(fold_idx) + "_" +
                args.target_dataset + "_label.csv"),
                                  index_col=0,
                                  encoding='utf-8-sig')
            print("Loading dataset...")
            full_dataset, train_dataset, val_dataset = load_semi_supervised_csv_data(
                df_image_data, df_text_data, df_train, df_test, CONFIG)
            print("\nLoading dataset completed")

            image_encoder = MDEC_encoder(input_dim=args.input_dim,
                                         z_dim=args.latent_dim,
                                         n_clusters=n_clusters,
                                         encodeLayer=[500, 500, 2000],
                                         activation="relu",
                                         dropout=0)
            image_encoder.load_model(
                os.path.join(
                    CONFIG.CHECKPOINT_PATH, args.prefix_model + "_image"
                    "_" + args.target_dataset + "_sdae_" +
                    str(args.latent_dim) + '_' + str(fold_idx)) + ".pt")
            # image_encoder.load_model(os.path.join(CONFIG.CHECKPOINT_PATH, "sampled_plus_labeled_scaled_image_sdae_" + str(fold_idx)) + ".pt")
            text_encoder = MDEC_encoder(input_dim=args.input_dim,
                                        z_dim=args.latent_dim,
                                        n_clusters=n_clusters,
                                        encodeLayer=[500, 500, 2000],
                                        activation="relu",
                                        dropout=0)
            text_encoder.load_model(
                os.path.join(
                    CONFIG.CHECKPOINT_PATH, args.prefix_model + "_text"
                    "_" + args.target_dataset + "_sdae_" +
                    str(args.latent_dim) + '_' + str(fold_idx)) + ".pt")
            # text_encoder.load_model(os.path.join(CONFIG.CHECKPOINT_PATH, "sampled_plus_labeled_scaled_text_sdae_" + str(fold_idx)) + ".pt")
            mdec = MultiDEC(device=device,
                            image_encoder=image_encoder,
                            text_encoder=text_encoder,
                            ours=args.ours,
                            use_prior=args.use_prior,
                            fl=args.fl,
                            n_clusters=n_clusters)

            mdec.load_model(
                os.path.join(
                    CONFIG.CHECKPOINT_PATH, args.prefix_csv + "_odec_" +
                    str(args.latent_dim) + '_' + str(fold_idx)) + ".pt")
            mdec.to(device)
            mdec.eval()
            wcalc = WeightCalc(device=device,
                               ours=args.ours,
                               use_prior=args.use_prior,
                               input_dim=args.input_dim,
                               n_clusters=n_clusters)
            wcalc.fit_predict(
                mdec,
                full_dataset,
                train_dataset,
                val_dataset,
                args,
                CONFIG,
                lr=args.lr,
                batch_size=args.batch_size,
                num_epochs=args.epochs,
                save_path=os.path.join(
                    CONFIG.CHECKPOINT_PATH, args.prefix_csv + "_wcalc_" +
                    str(args.latent_dim) + '_' + str(fold_idx)) + ".pt",
                tol=args.tol,
                kappa=args.kappa)
            acc_list.append(wcalc.acc)
            nmi_list.append(wcalc.nmi)
            f_1_list.append(wcalc.f_1)
        print("#Average acc: %.4f, Average nmi: %.4f, Average f_1: %.4f" %
              (np.mean(acc_list), np.mean(nmi_list), np.mean(f_1_list)))

    finally:
        exp.end()
예제 #25
0
import os
import json
import botometer
import pandas
import tweepy
import time
from subprocess import call
from tqdm import tqdm
from hyperdash import Experiment
import subprocess
import pickle
import copy
import sys
import rotem_helpers

exp = Experiment("bot_meter")

version = 3

users_follow = {}

# rapidapi_key = "OyTByfzOA2mshbg7TNfI9gxuqleyp1Ne1RXjsni94N9JOht0ZB" # now it's called rapidapi key
rapidapi_key = '8a8d64e8b5msh34c092335ddc0b0p125cb8jsne6b7f9d11cea'
twitter_app_auth = {
    'consumer_key': 'XZqb7nIARNbh3x4KxaInQ',
    'consumer_secret': 'MHYtjLH6CqekMxR8sQtH6trnEXfdNCMvd75Dv5akWo',
    'access_token': '245305900-NTgpfmVo4XK39SCwhBZ10SuWEnj1MRu0ymv2h6CJ',
    'access_token_secret': 'XYyP5fG4tQL3chz6p7x71pjTi883CJA59g72Bran1bC2P',
  }
bom = botometer.Botometer(wait_on_ratelimit=True,
                          rapidapi_key=rapidapi_key,
예제 #26
0
import numpy as np
from hyperdash import Experiment
from agent.trainer import Trainer
from agent.util import EpsilonExponentialDecay
from marlenv.goldmine.relative import GoldmineRV
from marlenv.util import GoldmineRecorder
from agent.deepq.simple_dqn import SimpleDQN

name = 'gv_n4'
exp = Experiment(name)

agent_num = 6
task_num = 4
view_range = 3
env = GoldmineRV(agent_num, task_num, view_range)
env.seed(0)
obs_num = 2
observation_space = env.observation_space[0:2] + (env.observation_space[2] *
                                                  obs_num, )


def preprocess(obs):
    n = len(obs)
    pr_obs = np.empty((n, ) + observation_space)
    for i, o in enumerate(obs):
        pr_obs[i] = np.dstack(o)
    return pr_obs


params = {
    'name':
    def gen_estimator(period=None):
        resnet_size = int(flags_obj.resnet_size)
        data_format = flags_obj.data_format
        batch_size = flags_obj.batch_size
        resnet_version = int(flags_obj.resnet_version)
        loss_scale = flags_core.get_loss_scale(flags_obj)
        dtype_tf = flags_core.get_tf_dtype(flags_obj)
        num_epochs_per_decay = flags_obj.num_epochs_per_decay
        learning_rate_decay_factor = flags_obj.learning_rate_decay_factor
        end_learning_rate = flags_obj.end_learning_rate
        learning_rate_decay_type = flags_obj.learning_rate_decay_type
        weight_decay = flags_obj.weight_decay
        zero_gamma = flags_obj.zero_gamma
        lr_warmup_epochs = flags_obj.lr_warmup_epochs
        base_learning_rate = flags_obj.base_learning_rate
        use_resnet_d = flags_obj.use_resnet_d
        use_dropblock = flags_obj.use_dropblock
        dropblock_kp = [float(be) for be in flags_obj.dropblock_kp]
        label_smoothing = flags_obj.label_smoothing
        momentum = flags_obj.momentum
        bn_momentum = flags_obj.bn_momentum
        train_epochs = flags_obj.train_epochs
        piecewise_lr_boundary_epochs = [
            int(be) for be in flags_obj.piecewise_lr_boundary_epochs
        ]
        piecewise_lr_decay_rates = [
            float(dr) for dr in flags_obj.piecewise_lr_decay_rates
        ]
        use_ranking_loss = flags_obj.use_ranking_loss
        use_se_block = flags_obj.use_se_block
        use_sk_block = flags_obj.use_sk_block
        mixup_type = flags_obj.mixup_type
        dataset_name = flags_obj.dataset_name
        kd_temp = flags_obj.kd_temp
        no_downsample = flags_obj.no_downsample
        anti_alias_filter_size = flags_obj.anti_alias_filter_size
        anti_alias_type = flags_obj.anti_alias_type
        cls_loss_type = flags_obj.cls_loss_type
        logit_type = flags_obj.logit_type
        embedding_size = flags_obj.embedding_size
        pool_type = flags_obj.pool_type
        arc_s = flags_obj.arc_s
        arc_m = flags_obj.arc_m
        bl_alpha = flags_obj.bl_alpha
        bl_beta = flags_obj.bl_beta
        exp = None

        if install_hyperdash and flags_obj.use_hyperdash:
            exp = Experiment(flags_obj.model_dir.split("/")[-1])
            resnet_size = exp.param("resnet_size", int(flags_obj.resnet_size))
            batch_size = exp.param("batch_size", flags_obj.batch_size)
            exp.param("dtype", flags_obj.dtype)
            learning_rate_decay_type = exp.param(
                "learning_rate_decay_type", flags_obj.learning_rate_decay_type)
            weight_decay = exp.param("weight_decay", flags_obj.weight_decay)
            zero_gamma = exp.param("zero_gamma", flags_obj.zero_gamma)
            lr_warmup_epochs = exp.param("lr_warmup_epochs",
                                         flags_obj.lr_warmup_epochs)
            base_learning_rate = exp.param("base_learning_rate",
                                           flags_obj.base_learning_rate)
            use_dropblock = exp.param("use_dropblock", flags_obj.use_dropblock)
            dropblock_kp = exp.param(
                "dropblock_kp", [float(be) for be in flags_obj.dropblock_kp])
            piecewise_lr_boundary_epochs = exp.param(
                "piecewise_lr_boundary_epochs",
                [int(be) for be in flags_obj.piecewise_lr_boundary_epochs])
            piecewise_lr_decay_rates = exp.param(
                "piecewise_lr_decay_rates",
                [float(dr) for dr in flags_obj.piecewise_lr_decay_rates])
            mixup_type = exp.param("mixup_type", flags_obj.mixup_type)
            dataset_name = exp.param("dataset_name", flags_obj.dataset_name)
            exp.param("autoaugment_type", flags_obj.autoaugment_type)

        classifier = tf.estimator.Estimator(
            model_fn=model_function,
            model_dir=flags_obj.model_dir,
            config=run_config,
            params={
                'resnet_size': resnet_size,
                'data_format': data_format,
                'batch_size': batch_size,
                'resnet_version': resnet_version,
                'loss_scale': loss_scale,
                'dtype': dtype_tf,
                'num_epochs_per_decay': num_epochs_per_decay,
                'learning_rate_decay_factor': learning_rate_decay_factor,
                'end_learning_rate': end_learning_rate,
                'learning_rate_decay_type': learning_rate_decay_type,
                'weight_decay': weight_decay,
                'zero_gamma': zero_gamma,
                'lr_warmup_epochs': lr_warmup_epochs,
                'base_learning_rate': base_learning_rate,
                'use_resnet_d': use_resnet_d,
                'use_dropblock': use_dropblock,
                'dropblock_kp': dropblock_kp,
                'label_smoothing': label_smoothing,
                'momentum': momentum,
                'bn_momentum': bn_momentum,
                'embedding_size': embedding_size,
                'train_epochs': train_epochs,
                'piecewise_lr_boundary_epochs': piecewise_lr_boundary_epochs,
                'piecewise_lr_decay_rates': piecewise_lr_decay_rates,
                'with_drawing_bbox': flags_obj.with_drawing_bbox,
                'use_ranking_loss': use_ranking_loss,
                'use_se_block': use_se_block,
                'use_sk_block': use_sk_block,
                'mixup_type': mixup_type,
                'kd_temp': kd_temp,
                'no_downsample': no_downsample,
                'dataset_name': dataset_name,
                'anti_alias_filter_size': anti_alias_filter_size,
                'anti_alias_type': anti_alias_type,
                'cls_loss_type': cls_loss_type,
                'logit_type': logit_type,
                'arc_s': arc_s,
                'arc_m': arc_m,
                'pool_type': pool_type,
                'bl_alpha': bl_alpha,
                'bl_beta': bl_beta,
                'train_steps': total_train_steps,
            })
        return classifier, exp
예제 #28
0
model.add(Flatten())


model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer = 'Nadam',
              metrics=['accuracy'])
'''
#start
exp = Experiment("Experiment 1")

input_img = Input(shape=(250, 250, 3))
#input_img2 = Input(shape=(250, 250, 3))

tower_1 = Conv2D(42, (3, 3), activation='relu', padding='same')(input_img)
#tower_1 = BatchNormalization()(tower_1)
tower_1 = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(tower_1)
#tower_1 = BatchNormalization()(tower_1)
#tower_1 = Dropout(.2)(tower_1)

tower_1 = Conv2D(74, (3, 3), activation='relu', padding='same')(tower_1)
#tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1)
tower_1 = BatchNormalization()(tower_1)
tower_1 = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(tower_1)
#tower_1 = BatchNormalization()(tower_1)
예제 #29
0
    return x


def to_var(x, volatile=False):
    x = Variable(torch.from_numpy(x), volatile=volatile)
    if torch.cuda.is_available():
        x = x.cuda()
    return x


def save_model(state):
    torch.save({"state_dict": state}, MODEL_PATH)


loss_function = torch.nn.MSELoss()
exp = Experiment("[sim2real] lstm-realv6")
# exp.param("exp", EXPERIMENT)
# exp.param("layers", LSTM_LAYERS)
# exp.param("nodes", HIDDEN_NODES)
optimizer = torch.optim.Adam(net.parameters())

robot = SingleRobot(debug=False)

for epoch in range(EPOCHS):
    for epi in range(len(ds.current_real)):

        net.zero_hidden()  # !important
        net.hidden[0].detach_()  # !important
        net.hidden[1].detach_()  # !important
        net.zero_grad()
        optimizer.zero_grad()
예제 #30
0
    concat_dataset=ConcatDataset(*datasets)
    val_dataset=TSPDataset(train_params.val_size,train_params.nof_points)

    train_dataloader=DataLoader(concat_dataset,batch_size=train_params.batch_size,num_workers=10)
    val_dataloader=DataLoader(val_dataset,batch_size=train_params.batch_size,num_workers=10)

    model_path="./model/"
    model_params.name="model_5-20.pt"

    model = PointerNet(model_params.embedding_size,
                    model_params.hiddens,
                    model_params.nof_lstms,
                    model_params.bidir,
                    model_params.dropout)
    if train_params.hyperdash:
        exp = Experiment("PtrNet-TSP 5-20")
    else:
        exp = None
    if model_params.gpu:
        device=torch.device('cuda')
    else:
        device=torch.device('cpu')
    write_model_params(model_path,model_params,train_params)
    model.to(device)
    CCE = torch.nn.CrossEntropyLoss()
    optimizer=optim.SGD(filter(lambda p: p.requires_grad,model.parameters()),lr=model_params.lr)
    best_valid_loss=float('inf')
    for epoch in range(train_params.nof_epoch):
        train_iterator = train_dataloader
        val_iterator = val_dataloader
        st_time=time.time()