コード例 #1
0
def main():
    params = {'batch_size': 64}
    modelname = argv[1]
    #Datasets
    partition = load_partition()
    print(len(partition['train']))
    print(len(partition['validation']))
    training_generator = DataGenerator(partition['train'], **params)
    validation_generator = DataGenerator(partition['validation'], **params)

    dm = DataManager()
    dm.load_tokenizer('/mnt/data/b04901058/recsys/token0_Xfull.pk')
    word_index, embedding_matrix = dm.embedding_matrix()
    cnn_model = cnn0(word_index, embedding_matrix)
    cnn_model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

    checkpoint = [
        ModelCheckpoint(
            modelname,  # model filename
            monitor='val_loss',  # quantity to monitor
            verbose=0,  # verbosity - 0 or 1
            save_best_only=True,  # The latest best model will not be overwritten
            mode='auto'),  # The decision to overwrite model is made 
        EarlyStopping(monitor='val_loss', patience=3, verbose=0)
    ]
    cnn_model.fit_generator(generator=training_generator,
                            validation_data=validation_generator,
                            callbacks=checkpoint,
                            verbose=1,
                            use_multiprocessing=True,
                            epochs=12,
                            workers=3)
コード例 #2
0
ファイル: NGCF.py プロジェクト: GuoTong96/LocalGCN
    def train_model(self):
        self.logger.info(self.evaluator.metrics_info())
        for epoch in range(1, self.num_epochs + 1):
            # Generate training instances
            user_input, item_input_pos, item_input_neg = DataGenerator._get_pairwise_all_data(
                self.dataset)
            data_iter = DataIterator(user_input,
                                     item_input_pos,
                                     item_input_neg,
                                     batch_size=self.batch_size,
                                     shuffle=True)

            total_loss = 0.0
            training_start_time = time()
            num_training_instances = len(user_input)
            for bat_users, bat_items_pos, bat_items_neg in data_iter:
                feed_dict = {
                    self.user_input: bat_users,
                    self.pos_items: bat_items_pos,
                    self.neg_items: bat_items_neg
                }
                loss, _ = self.sess.run((self.loss, self.optimizer),
                                        feed_dict=feed_dict)
                total_loss += loss

#             self.logger.info("[iter %d : loss : %f, time: %f]" % (epoch, total_loss/num_training_instances,
#                                                              time()-training_start_time))
            if epoch % self.verbose == 0:
                self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
コード例 #3
0
ファイル: LightGCN_kernel.py プロジェクト: GuoTong96/LocalGCN
    def train_model(self):
        self.logger.info(self.evaluator.metrics_info())
        #         self.w=np.ones((self.n_layers+1,self.n_layers+1))
        for epoch in range(1, self.num_epochs + 1):
            # Generate training instances
            user_input, item_input_pos, item_input_neg = DataGenerator._get_pairwise_all_data(
                self.dataset)
            data_iter = DataIterator(user_input,
                                     item_input_pos,
                                     item_input_neg,
                                     batch_size=self.batch_size,
                                     shuffle=True)

            total_loss = 0.0
            training_start_time = time()
            for bat_users, bat_items_pos, bat_items_neg in data_iter:
                feed_dict = {
                    self.user_input: bat_users,
                    self.item_input: bat_items_pos,
                    self.item_input_neg: bat_items_neg
                }
                loss, _ = self.sess.run((self.loss, self.optimizer),
                                        feed_dict=feed_dict)
                total_loss += loss
            self.logger.info("[iter %d : loss : %f, time: %f]" %
                             (epoch, total_loss / len(user_input),
                              time() - training_start_time))
            #             if epoch<200:
            #                 self.w=anchor_pre
            if epoch % 20 == 0:
                self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
コード例 #4
0
    def train_model(self):
        self.logger.info(self.evaluator.metrics_info())
        best = 0.0
        for epoch in range(1, self.num_epochs + 1):
            # Generate training instances
            user_input, item_input_pos, item_input_neg = DataGenerator._get_pairwise_all_data(
                self.dataset)
            data_iter = DataIterator(user_input,
                                     item_input_pos,
                                     item_input_neg,
                                     batch_size=self.batch_size,
                                     shuffle=True)

            total_loss = 0.0
            training_start_time = time()

            for bat_users, bat_items_pos, bat_items_neg in data_iter:
                feed_dict = {
                    self.user_input: bat_users,
                    self.item_input: bat_items_pos,
                    self.item_input_neg: bat_items_neg
                }
                loss, _ = self.sess.run((self.loss, self.optimizer),
                                        feed_dict=feed_dict)
                total_loss += loss
            self.logger.info("[iter %d : loss : %f, time: %f]" %
                             (epoch, total_loss / len(user_input),
                              time() - training_start_time))
            if epoch % 20 == 0:
                result = self.evaluate()
                self.logger.info("epoch %d:\t%s" % (epoch, result))
                pre = float(result.split('\t')[1])
                if best < pre:
                    best = pre
                    bestresult = self.sess.run(self.ego_embedding)

                    n = 0
                else:
                    n += 1
                    if n >= 20:
                        np.save('tool/%s_weight.npy' % self.data_name,
                                bestresult)
                        break
コード例 #5
0
def training_rnn(train, test):
    config = Config(sampling_rate=800, audio_duration=10, n_folds=n_folds, learning_rate=0.001)
    if DEBUG:
        config = Config(sampling_rate=100, audio_duration=1, n_folds=n_folds, max_epochs=1)
        
    PREDICTION_FOLDER = "predictions_rnn"
    if not os.path.exists(PREDICTION_FOLDER):
        os.mkdir(PREDICTION_FOLDER)
    if os.path.exists('logs/' + PREDICTION_FOLDER):
        shutil.rmtree('logs/' + PREDICTION_FOLDER)
    
    skf = StratifiedKFold(train.label_idx, n_folds=config.n_folds)
    
    for i, (train_split, val_split) in enumerate(skf):
        train_set = train.iloc[train_split]
        val_set = train.iloc[val_split]
        checkpoint = ModelCheckpoint('../model/bestrnn_%d.h5'%i, monitor='val_loss', verbose=1, save_best_only=True)
        early = EarlyStopping(monitor="val_loss", mode="min", patience=5)
        tb = TensorBoard(log_dir='./logs/' + PREDICTION_FOLDER + '/fold_%d'%i, write_graph=True)
    
        callbacks_list = [checkpoint, early, tb]
        print("Fold: ", i)
        print("#"*50)
        model = get_rnn_model(config)
    
        train_generator = DataGenerator(config, '../data/audio_train/', train_set.index, 
                                        train_set.label_idx, batch_size=64,
                                        preprocessing_fn=audio_norm)
        val_generator = DataGenerator(config, '../data/audio_train/', val_set.index, 
                                      val_set.label_idx, batch_size=64,
                                      preprocessing_fn=audio_norm)
    
        history = model.fit_generator(train_generator, callbacks=callbacks_list, validation_data=val_generator,
                                      epochs=config.max_epochs, use_multiprocessing=True, workers=6, max_queue_size=20)
    
        model.load_weights('../model/best1d_%d.h5'%i)
    
        # Save train predictions
        train_generator = DataGenerator(config, '../data/audio_train/', train.index, batch_size=128,
                                        preprocessing_fn=audio_norm)
        predictions = model.predict_generator(train_generator, use_multiprocessing=True, 
                                              workers=6, max_queue_size=20, verbose=1)
        np.save(PREDICTION_FOLDER + "/train_predictions_%d.npy"%i, predictions)
    
        # Save test predictions
        test_generator = DataGenerator(config, '../data/audio_test/', test.index, batch_size=128,
                                        preprocessing_fn=audio_norm)
        predictions = model.predict_generator(test_generator, use_multiprocessing=True, 
                                              workers=6, max_queue_size=20, verbose=1)
        np.save(PREDICTION_FOLDER + "/test_predictions_%d.npy"%i, predictions)
    
        # Make a submission file
        top_3 = np.array(LABELS)[np.argsort(-predictions, axis=1)[:, :3]]
        predicted_labels = [' '.join(list(x)) for x in top_3]
        test['label'] = predicted_labels
        test[['label']].to_csv(PREDICTION_FOLDER + "/predictions_%d.csv"%i)
    
    
    pred_list = []
    for i in range(n_folds):
        pred_list.append(np.load("predictions_1d_conv/test_predictions_%d.npy"%i))
    prediction = np.ones_like(pred_list[0])
    for pred in pred_list:
        prediction = prediction*pred
    prediction = prediction**(1./len(pred_list))
    # Make a submission file
    top_3 = np.array(LABELS)[np.argsort(-prediction, axis=1)[:, :3]]
    predicted_labels = [' '.join(list(x)) for x in top_3]
    test = pd.read_csv('../data/sample_submission.csv')
    test['label'] = predicted_labels
    test[['fname', 'label']].to_csv("../result/rnn_ensembled_submission.csv", index=False)
コード例 #6
0
def main(args):
    with open(args.config, 'r') as f:
        config = json.load(f)

    if config['optimizer'] == 'SGD':
        optimizer = SGD(lr=config['learning_rate'],
                        decay=config['learning_rate'] / config['epochs'],
                        momentum=config['momentum'])
    else:
        raise Exception('Unsupported optimizer: {}.'.format(
            config['optimizer']))

    model_name = str.lower(config['model'])
    if model_name == 'lstm':
        model = LSTM(config['input_length'], 2)
    elif model_name == 'conv1d':
        model = Conv1D(config['input_length'], 2)
    elif model_name == 'conv2d':
        model = Conv2D(config['input_length'], 2)
    else:
        raise Exception('Unsupported model: {}.'.format(config['model']))

    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    wav_paths = glob('{}/**'.format(args.data_dir), recursive=True)
    wav_paths = [x for x in wav_paths if '.wav' in x]
    classes = sorted(os.listdir(args.data_dir))
    le = LabelEncoder()
    le.fit(classes)
    labels = [get_class(x, args.data_dir) for x in wav_paths]
    labels = le.transform(labels)

    print('CLASSES: ', list(le.classes_))
    print(le.transform(list(le.classes_)))

    wav_train, wav_val, label_train, label_val = train_test_split(
        wav_paths,
        labels,
        test_size=config['validation_split'],
        random_state=0)
    tg = DataGenerator(wav_train,
                       label_train,
                       config['input_length'],
                       len(set(label_train)),
                       batch_size=config['batch_size'])
    vg = DataGenerator(wav_val,
                       label_val,
                       config['input_length'],
                       len(set(label_val)),
                       batch_size=config['batch_size'])

    output_sub_dir = os.path.join(args.output_dir, model_name,
                                  datetime.now().strftime('%Y%m%d_%H%M%S'))
    os.makedirs(output_sub_dir)

    callbacks = [
        EarlyStopping(monitor='val_loss',
                      patience=config['patience'],
                      restore_best_weights=True,
                      verbose=1),
        ModelCheckpoint(filepath=os.path.join(
            output_sub_dir, 'model.{epoch:02d}-{val_loss:.4f}.h5'),
                        monitor='val_loss',
                        save_best_only=True,
                        verbose=1),
        CSVLogger(os.path.join(output_sub_dir, 'epochs.csv'))
    ]

    model.fit(tg,
              validation_data=vg,
              epochs=config['epochs'],
              verbose=1,
              callbacks=callbacks)
コード例 #7
0
print('train_img_nums:', len(train_img_names))
print('train_label_nums:', len(train_labels))

# with open(label_txt_path_test, 'r') as f:
#     ff = f.readlines()
#     for i, line in enumerate(ff):
#         line = line.strip()
#         img_name = line.split()[0]
#         label = line.split()[1:]
#         label = list(map(int,label))
#         img_names_test.append(img_name)
#         labels_test.append(label)
print('test_img_nums:', len(test_img_names))
print('test_label_nums:', len(test_labels))

train_generator = DataGenerator(img_root=imgs_path, list_IDs=train_img_names, labels=train_labels,
                                batch_size=batch_size, label_max_length=max_labels, n_channels=3)
test_generator = DataGenerator(img_root=imgs_path, list_IDs=test_img_names, labels=test_labels,
                               batch_size=batch_size, label_max_length=max_labels, n_channels=3)


# In[3]:


# PE(pos, 2i)   = sin(pos / 10000^(2i/d_model))
# PE(pos, 2i+1) = cos(pos / 10000^(2i/d_model))

# pos.shape: [sentence_length, 1]
# i.shape  : [1, d_model]
# result.shape: [sentence_length, d_model]
def get_angles(pos, i, d_model):
    angle_rates = 1 / np.power(10000,
コード例 #8
0
def train():
    generator = DataGenerator(batch_size=batch_size, epoch=epoch)
    iterator = generator.train_dataset.make_one_shot_iterator()

    image, noise_input = iterator.get_next()

    gan = SimpleGan()

    gen_loss, disc_loss = gan.train(noise_input, image)

    global_step = tf.Variable(0, trainable=False)
    gen_start_learning_rate = 0.000001
    gen_learning_rate = tf.train.exponential_decay(gen_start_learning_rate,
                                                   global_step,
                                                   100,
                                                   0.8,
                                                   staircase=True)
    disc_start_learning_rate = 0.0001
    disc_learning_rate = tf.train.exponential_decay(disc_start_learning_rate,
                                                    global_step,
                                                    100,
                                                    0.8,
                                                    staircase=True)
    train_gan = tf.contrib.layers.optimize_loss(
        loss=gen_loss,
        global_step=global_step,
        learning_rate=gen_learning_rate,
        optimizer=tf.train.AdamOptimizer,
        clip_gradients=9.0,
        summaries=["learning_rate", "loss"])
    train_disc = tf.contrib.layers.optimize_loss(
        loss=disc_loss,
        global_step=global_step,
        learning_rate=disc_learning_rate,
        optimizer=tf.train.AdamOptimizer,
        summaries=["learning_rate", "loss"])

    with tf.Session() as sess:
        saver = tf.train.Saver()
        if loading_model:
            saver.restore(sess, model_file)
        else:
            tf.global_variables_initializer().run()

        try:
            step = 0
            while True:
                # _, gl = sess.run([train_gan, gen_loss])
                _, dl = sess.run([train_disc, disc_loss])

                for i in range(10):
                    # _, dl = sess.run([train_disc, disc_loss])
                    _, gl = sess.run([train_gan, gen_loss])

                _, dl = sess.run([train_disc, disc_loss])

                if step % 10 == 0:
                    print(
                        "Minibatch at step %d ==== gen_loss: %.2f, disc_loss: %.2f"
                        % (step, gl, dl))
                    # print("Minibatch at step %d ==== disc_loss: %.2f" % (step, dl))
                if (step + 1) % 100 == 0 and saving_model:
                    save_path = saver.save(sess, model_file)
                    print("Model saved in %s" % save_path)

                step += 1
        except tf.errors.OutOfRangeError:
            if saving_model:
                save_path = saver.save(sess, model_file)
                print("Model saved in %s" % save_path)