def test_validate_callbacks_predefined_callbacks(self):
        supported_predefined_callbacks = [
            callbacks.TensorBoard(),
            callbacks.CSVLogger(filename='./log.csv'),
            callbacks.EarlyStopping(),
            callbacks.ModelCheckpoint(filepath='./checkpoint'),
            callbacks.TerminateOnNaN(),
            callbacks.ProgbarLogger(),
            callbacks.History(),
            callbacks.RemoteMonitor()
        ]

        distributed_training_utils_v1.validate_callbacks(
            supported_predefined_callbacks, adam.Adam())

        unsupported_predefined_callbacks = [
            callbacks.ReduceLROnPlateau(),
            callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001)
        ]

        for callback in unsupported_predefined_callbacks:
            with self.assertRaisesRegex(
                    ValueError, 'You must specify a Keras Optimizer V2'):
                distributed_training_utils_v1.validate_callbacks(
                    [callback], tf.compat.v1.train.AdamOptimizer())
Ejemplo n.º 2
0
 def modeler(self, name, add_dense=0):
     visualize = callbacks.RemoteMonitor(root='http://localhost:5000',
                                         path='/trainprogress',
                                         field='data',
                                         headers={'wooden': name},
                                         send_as_json=True)
     opti_stop = callbacks.EarlyStopping(monitor='val_acc',
                                         min_delta=0.01,
                                         patience=10,
                                         verbose=0,
                                         mode='auto',
                                         baseline=None,
                                         restore_best_weights=True)
     train, test = self.generators()
     model = self.large_model(add_dense)
     model.summary()
     train_map = train.class_indices
     file = self.json_dir + "/models_multi/labels_{}.json".format(name)
     with open(file, 'w') as f:
         json.dump(train_map, f)
     print(train_map)
     model.fit_generator(train,
                         steps_per_epoch=100,
                         epochs=20,
                         validation_data=test,
                         validation_steps=100,
                         callbacks=[visualize, opti_stop])
     model.save(self.json_dir + '/models_multi/model_{}.h5'.format(name))
     #        scores = model.evaluate_generator(model, test, steps=100)
     del model
     #        print("Large CNN Error: %.2f%%" % (100 - scores[1] * 100))
     return print("Done")
Ejemplo n.º 3
0
def train(vocab):
    input_file = os.path.join('raps.txt')
    vocab_file = os.path.join(vocab)
    with codecs.open(input_file, 'r', encoding=None) as f:
        data = f.read()
    x_text = data.split()
    word_counts = collections.Counter(x_text)
    vocabulary_inv = [x[0] for x in word_counts.most_common()]
    words = [x[0] for x in word_counts.most_common()]
    vocab_size = len(words)
    vocabulary_inv = list(sorted(vocabulary_inv))
    vocabs = {x: i for i, x in enumerate(vocabulary_inv)}

    with open(os.path.join(vocab_file), 'wb') as f:
        cPickle.dump((words, vocabs, vocabulary_inv), f)

    sequences = []
    next_words = []
    for i in range(0, len(x_text) - seq_length, sequences_step):
        sequences.append(x_text[i:i + seq_length])
        next_words.append(x_text[i + seq_length])

    X = np.zeros((len(sequences), seq_length, vocab_size), dtype=np.bool)
    y = np.zeros((len(sequences), vocab_size), dtype=np.bool)
    for i, sentence in enumerate(sequences):
        for t, word in enumerate(sentence):
            X[i, t, vocabs[word]] = 1
        y[i, vocabs[next_words[i]]] = 1

    model = Sequential()
    model.add(LSTM(rnn_size, input_shape=(seq_length, vocab_size)))
    model.add(Dense(vocab_size))
    model.add(Activation('softmax'))
    optimizer = RMSprop(lr=learning_rate)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    call1 = ModelCheckpoint(neural_network,
                            monitor='loss',
                            verbose=1,
                            save_best_only=True,
                            mode='min')
    call2 = NBatchLogger(display=1)
    call3 = callbacks.RemoteMonitor(root=url,
                                    field='epic',
                                    path='/publish/epoch/')
    callbacks_list = [call1, call2, call3]
    model.fit(X,
              y,
              batch_size=batch_size,
              epochs=num_epochs,
              validation_split=0.3,
              verbose=1,
              callbacks=callbacks_list)
Ejemplo n.º 4
0
def tests_RemoteMonitorWithJsonPayload():
    (X_train, y_train), (X_test, y_test) = get_data_callbacks()
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    cbks = [callbacks.RemoteMonitor(send_as_json=True)]

    with patch('requests.post'):
        model.fit(X_train, y_train, batch_size=batch_size,
                  validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
args = parser.parse_args()
# RANDOM_LABELS = args.random
NB_EPOCHS = args.nb_epochs
VGG_WEIGHTS = args.vgg16_weights
FC_MODEL = args.FC_MODEL
F_TYPE = args.F_TYPE
HARD_TRAINING_MAP = args.HARD_TRAINING_MAP
HARD_VALIDATION_MAP = args.HARD_VALIDATION_MAP
PLOT = args.PLOT
OUTDIR = args.OUTDIR + "/"

if not os.path.exists(OUTDIR):
    os.makedirs(OUTDIR)

remote = callbacks.RemoteMonitor(root='http://192.168.40.153:9000')

hera_model = HeraModel(
    {
        'id':
        'Fine-Tuning of VGG16'  # any ID you want to use to identify your model
    },
    {
        # location of the local hera server, out of the box it's the following
        'domain': '192.168.40.153',
        'port': 4000
    })

if HARD_TRAINING_MAP is not None:
    hard_train, hard_train_labels, hard_train_images = parse_mapping(
        mapping_file=HARD_TRAINING_MAP)
Ejemplo n.º 6
0
from glob import glob
import numpy as np

from utils import load_pkl, dump_pkl
from config.global_parameters import frameWidth, frameHeight, genreLabels
from config.resources import video_resource
from video import extract_feature_video, gather_videos
from model_utils import spatial_model

from keras.utils.np_utils import to_categorical
"""testing hualos"""
from keras import callbacks
remote = callbacks.RemoteMonitor(root='http://localhost:9000')
#collect videos for each genre
#extract spatial features for each
#create your model
#train/fit it
#save the model
#test it


def gather_genre(genre, limit_videos=100):

    print "Gathering features for", genre,
    genreFeatures = gather_videos(genre, limit_videos)
    print "OK."
    print genreFeatures.shape
    dump_pkl(genreFeatures, genre + str(limit_videos))


def gather():
Ejemplo n.º 7
0
from keras.optimizers import Adam
from keras.regularizers import l2

import sys
sys.path.insert(0, 'layers/')
from graph import GraphConvolution
from utils import *

import time
import numpy as np
import nmslib
import scipy
from sklearn.neighbors import NearestNeighbors

from keras import callbacks
remote = callbacks.RemoteMonitor(root='http://localhost:9000', headers=None)

# Define parameters
DATASET = 'email'  #'chg-miner'
FILTER = 'localpool'  # 'chebyshev'
MAX_DEGREE = 4  # maximum polynomial degree
SYM_NORM = True  # symmetric (True) vs. left-only (False) normalization
NB_EPOCH = 2000
PATIENCE = 50  # early stopping patience

# Get data
X, A, y = load_edgelist(DATASET)
y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask = get_splits(
    y)

Ejemplo n.º 8
0
from config.global_parameters import default_model_name, number_of_classes, number_of_frames
from utils import load_pkl, augment_labels_lstm, gather_features, gather_raw_data, dump_pkl
from video import sequencify
import numpy as np
from model_utils import text_model, vis_model, good_text_model
from keras.optimizers import SGD
import keras.backend as K
from keras import callbacks
from keras.callbacks import ModelCheckpoint
from keras.models import Model, Sequential
from keras.layers import Merge, Dense, Dropout, Embedding, Input, LSTM, merge, BatchNormalization, Flatten, Reshape, Lambda
from keras.utils.visualize_util import plot
from bilinear_tensor import BilinearTensorLayer

remote = callbacks.RemoteMonitor(root='http://128.143.63.199:9009')


def bilinear_projection(inputs):
    x, y = inputs
    batch_size = K.shape(x)[0]
    outer_product = x[:, :, np.newaxis] * y[:, np.newaxis, :]
    return K.reshape(outer_product, (batch_size, -1))


def train_classifier_video(trainVideoFeatures,
                           trainLabels,
                           valVideoFeatures=None,
                           valLabels=None):

    input_dim = 4096
Ejemplo n.º 9
0
from __future__ import print_function
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop, Adam
from keras.utils import np_utils
from keras import callbacks
remote = callbacks.RemoteMonitor(root='http://localhost:9000',
                                 path='/publish/epoch/end/',
                                 field='data',
                                 headers=None)

import matplotlib.pyplot as plt

np.random.seed(1671)  # for reproducibility

# network and training
NB_EPOCH = 20
BATCH_SIZE = 128
VERBOSE = 1
NB_CLASSES = 10  # number of outputs = number of digits
OPTIMIZER = Adam()  # optimizer, explainedin this chapter
N_HIDDEN = 256
VALIDATION_SPLIT = 0.2  # how much TRAIN is reserved for VALIDATION
DROPOUT = 0.3

# data: shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()

#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784
Ejemplo n.º 10
0
def train(model,
          train_x,
          y_train,
          x_val,
          y_val,
          batch_size=32,
          epochs=32,
          plot=False):
    """ function : train

    fit data on a model and return the trained model

    Args:
        model : keras.models.Model
            the model to evaluate
        x_train : dict
            a dictionary mapping input names to actual data
        y_train : np.ndarray
            the targets of the train data
        x_val : dict
            a dictionary mapping input names to actual data
        y_val : np.ndarray
            the targets of the validation data
        batch_size : int [optional, default: 32]
            the size of the batches to be fed into the network
        epochs : int [optional, default: 32]
            the number of epochs (times to run the network)
    Returns:
        r : list
            list of the loss and metrics specified by the model after running
            the model on the test data
    """
    history = model.fit(
        x=train_x,
        y=y_train,
        callbacks=[callbacks.RemoteMonitor(root='http://localhost:9000')],
        batch_size=batch_size,
        epochs=epochs,
        validation_data=(x_val, y_val),
        verbose=int(cfg.verbosity))

    if cfg.model_save_name != "":
        model.save("model/" + cfg.model_save_name)

    if cfg.save_images:
        plt.plot(history.history['acc'])
        plt.plot(history.history['val_acc'])
        plt.title('Model accuracy (lead {})'.format(cfg.current_lead))
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'validation'], loc='upper left')
        plt.savefig("images/accuracy_lead_{}_{}.png".format(
            cfg.current_lead, cfg.t))
        plt.clf()
        # plt.show()
        # summarize history for loss
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'])
        plt.title('Model loss (lead {})'.format(cfg.current_lead))
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'validation'], loc='upper left')
        plt.savefig("images/loss_lead_{}_{}.png".format(
            cfg.current_lead, cfg.t))
        plt.clf()
        # plt.show()
    return model
Ejemplo n.º 11
0
import numpy as np
import cv2
import random
from keras.datasets import mnist
from keras.models import Sequential
#from keras.initializations import norRemal, identity
from keras.layers.recurrent import SimpleRNN, LSTM, GRU
from keras.optimizers import RMSprop, Adadelta, SGD
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.core import Dense, Activation, TimeDistributedDense, Dropout, Reshape, Flatten
from keras.layers.wrappers import TimeDistributed
from keras.models import model_from_json
#import json
from keras.utils import np_utils
from keras import callbacks
remote = callbacks.RemoteMonitor(root='http://10.35.73.88:9000')

# for reproducibility
#np.random.seed(2016)
#random.seed(2016)

#define some run parameters
batch_size = 32
nb_epochs = 100000
maxToAdd = 25  #40
size = 60

import glob
import sys

## function to get classes for each image