Ejemplo n.º 1
1
def test_image_data_generator_training():
    np.random.seed(1337)
    img_gen = ImageDataGenerator(rescale=1.)  # Dummy ImageDataGenerator
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit_generator(img_gen.flow(x_train, y_train, batch_size=16),
                                  epochs=10,
                                  validation_data=img_gen.flow(x_test, y_test,
                                                               batch_size=16),
                                  verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    model.evaluate_generator(img_gen.flow(x_train, y_train, batch_size=16))
Ejemplo n.º 2
0
def train(dataReader, oneHot, oneHotAveraged, contextHashes):
	n = (Epochs + 1) * SamplesPerEpoch  # TODO + 1 should not be needed

	tokeniser = Tokenizer(nb_words=MaxWords)
	tokeniser.fit_on_texts((row[0] for row in dataReader.trainingData(n)))

	# `word_index` maps each word to its unique index
	dictionarySize = len(tokeniser.word_index) + 1

	oneHotDimension        = (1 if oneHotAveraged else SequenceLength) * dictionarySize if oneHot else 0
	contextHashesDimension = dictionarySize * 2 if contextHashes else 0

	model = Sequential()
	model.add(Dense(EmbeddingDim, input_dim=(oneHotDimension + contextHashesDimension)))
	model.add(Dense(Labels, activation='softmax'))
	model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

	trainingGenerator   = mapGenerator(dataReader.trainingData(n),   tokeniser, dictionarySize, oneHot, oneHotAveraged, contextHashes)
	validationGenerator = mapGenerator(dataReader.validationData(n), tokeniser, dictionarySize, oneHot, oneHotAveraged, contextHashes)

	model.fit_generator(trainingGenerator,
		nb_epoch=Epochs,
		samples_per_epoch=SamplesPerEpoch,
		validation_data=validationGenerator,
		nb_val_samples=SamplesPerEpoch)

	model2 = Sequential()
	model2.add(Dense(EmbeddingDim, input_dim=(oneHotDimension + contextHashesDimension), weights=model.layers[0].get_weights()))

	return model, model2, tokeniser, dictionarySize
Ejemplo n.º 3
0
def train_CAE():

    encoder = containers.Sequential()
    encoder.add(Permute((3,1,2),input_shape=(h,w,ch))) # reorder input to ch, h, w (no sample axis)
    encoder.add(GaussianNoise(0.05)) # corrupt inputs slightly
    encoder.add(Convolution2D(16,3,3,init='glorot_uniform',border_mode='same'))
    encoder.add(MaxPooling2D((2,2)))
    encoder.add(Activation('tanh'))
    encoder.add(Convolution2D(32,3,3,init='glorot_uniform',border_mode='same'))
    encoder.add(MaxPooling2D((2,2)))
    encoder.add(Activation('tanh'))
    decoder = containers.Sequential()
    decoder.add(UpSampling2D((2,2),input_shape=(32,32,32)))
    decoder.add(Convolution2D(3,3,3,init='glorot_uniform',border_mode='same'))
    decoder.add(Activation('tanh'))
    decoder.add(UpSampling2D((2,2),input_shape=(16,64,64)))
    decoder.add(Convolution2D(3,3,3,init='glorot_uniform',border_mode='same'))
    decoder.add(Activation('tanh'))
    decoder.add(Permute((2,3,1)))
    autoencoder = AutoEncoder(encoder,decoder)

    model = Sequential()
    model.add(autoencoder)
    model.compile(optimizer='rmsprop', loss='mae')
    # if shapes don't match, check the output_shape of encoder/decoder
    genr = image_generator(biz_id_train['photo_id'], batch_size)
    model.fit_generator(genr, samples_per_epoch=len(biz_id_train), nb_epoch=10)
Ejemplo n.º 4
0
class CNN(object):
    def __init__(self):
        self.model = Sequential([
            Conv2D(50, (3, 3), input_shape=(28, 28, 1), padding='same', activation='relu'),
            Conv2D(50, (3, 3), input_shape=(28, 28, 1), padding='same', activation='relu'),
            MaxPool2D(pool_size=(4, 4), strides=(3, 3), padding='same'),

            Conv2D(32, (3, 3), padding='same', activation='relu' ),
            Conv2D(32, (3, 3), padding='same', activation='relu' ),
            MaxPool2D(pool_size=(7, 7), strides=(3, 3), padding='same'),

            Flatten(),
            Dropout(0.5),
            Dense(64, activation='relu'),
            Dense(10, activation='softmax')
        ])

        self.model.compile(loss=categorical_crossentropy, optimizer=Adam(), metrics=['accuracy'])

    def train(self):
        mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)

        train_datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2)
        train_datagen.fit(mnist.train.images.reshape(-1, 28, 28, 1))

        x_test, y_test = mnist.test.images.reshape(-1, 28, 28, 1), mnist.test.labels
        self.model.fit_generator(train_datagen.flow(mnist.train.images.reshape(-1, 28, 28, 1), mnist.train.labels),
                       #batch_size=128,
                       epochs=20,
                       verbose=1,
                       validation_data=(x_test, y_test),
                       callbacks=[TrainValTensorBoard(log_dir='./logs/cnn4', histogram_freq=1, write_grads=True)])

        score = self.model.evaluate(x_test, y_test, verbose=0)
        print('Loss', score[0], 'acc', score[1])
Ejemplo n.º 5
0
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_data_callbacks()
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    train_generator = data_generator(X_train, y_train, batch_size)
    model.fit_generator(train_generator, len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
Ejemplo n.º 6
0
def test_multiprocessing_fit_error():
    batch_size = 10
    good_batches = 3

    def custom_generator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (50, 2)),
                   np.random.randint(batch_size, 2, 50))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    samples = batch_size * (good_batches + 1)

    with pytest.raises(StopIteration):
        model.fit_generator(
            custom_generator(), samples, 1,
            workers=4, use_multiprocessing=True,
        )

    with pytest.raises(StopIteration):
        model.fit_generator(
            custom_generator(), samples, 1,
            use_multiprocessing=False,
        )
Ejemplo n.º 7
0
def main():
    ext = extension_from_parameters()

    out_dim = 1
    loss = 'mse'
    metrics = None
    #metrics = ['accuracy'] if CATEGORICAL else None

    datagen = RegressionDataGenerator()
    train_gen = datagen.flow(batch_size=BATCH_SIZE)
    val_gen = datagen.flow(val=True, batch_size=BATCH_SIZE)
    val_gen2 = datagen.flow(val=True, batch_size=BATCH_SIZE)

    model = Sequential()
    for layer in LAYERS:
        if layer:
            model.add(Dense(layer, input_dim=datagen.input_dim, activation=ACTIVATION))
            if DROP:
                model.add(Dropout(DROP))
    model.add(Dense(out_dim))

    model.summary()
    model.compile(loss=loss, optimizer='rmsprop', metrics=metrics)

    train_samples = int(datagen.n_train/BATCH_SIZE) * BATCH_SIZE
    val_samples = int(datagen.n_val/BATCH_SIZE) * BATCH_SIZE

    history = BestLossHistory(val_gen2, val_samples, ext)
    checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)

    model.fit_generator(train_gen, train_samples,
                        nb_epoch = NB_EPOCH,
                        validation_data = val_gen,
                        nb_val_samples = val_samples,
                        callbacks=[history, checkpointer])
Ejemplo n.º 8
0
def test_multiprocessing_fit_error():

    batch_size = 32
    good_batches = 5

    def myGenerator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (500, 2)),
                   np.random.randint(batch_size, 2, 500))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    samples = batch_size * (good_batches + 1)

    with pytest.raises(Exception):
        model.fit_generator(
            myGenerator(), samples, 1,
            nb_worker=4, pickle_safe=True,
        )

    with pytest.raises(Exception):
        model.fit_generator(
            myGenerator(), samples, 1,
            pickle_safe=False,
        )
Ejemplo n.º 9
0
class MLP(BaseEstimator):
    def __init__(self, verbose=0, model=None, final_activation='sigmoid'):
        self.verbose = verbose
        self.model = model
        self.final_activation = final_activation

    def fit(self, X, y):
        if not self.model:
            self.model = Sequential()
            self.model.add(Dense(1000, input_dim=X.shape[1]))
            self.model.add(Activation('relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(y.shape[1]))
            self.model.add(Activation(self.final_activation))
            self.model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01))
        self.model.fit_generator(generator=_batch_generator(X, y, 256, True),
                                 samples_per_epoch=X.shape[0], nb_epoch=20, verbose=self.verbose)

    def predict(self, X):
        pred = self.predict_proba(X)
        return sparse.csr_matrix(pred > 0.2)

    def predict_proba(self, X):
        pred = self.model.predict_generator(generator=_batch_generatorp(X, 512), val_samples=X.shape[0])
        return pred
Ejemplo n.º 10
0
def CNN(trainDir, validationDir, classNum):
    model = Sequential()
    model.add(Convolution2D(4, 3, 3, input_shape=(img_width, img_height, 1)))
    model.add(Activation('relu'))
    model.add(Convolution2D(4, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer
    model.add(Convolution2D(8, 3, 3))
    model.add(Activation('relu'))
    model.add(Convolution2D(8, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Convolution2D(16, 3, 3))
    # model.add(Activation('relu'))
    # model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dropout(0.6))
    model.add(Dense(classNum))
    model.add(Activation('softmax'))
    # test
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # this is the augmentation configuration we will use for training
    train_datagen = ImageDataGenerator(
            rescale=1./255,
            shear_range=0.2,
            zca_whitening=True,
            zoom_range=0.2,
            horizontal_flip=False)
    # this is the augmentation configuration we will use for testing:
    # only rescaling
    test_datagen = ImageDataGenerator(rescale=1./255, zca_whitening=True)
    train_generator = train_datagen.flow_from_directory(
            trainDir,
            target_size=(img_width, img_height),
            batch_size=32,
            color_mode='grayscale',
            class_mode='categorical')
    validation_generator = test_datagen.flow_from_directory(
            validationDir,
            target_size=(img_width, img_height),
            batch_size=32,
            color_mode='grayscale',
            class_mode='categorical')
    model.fit_generator(
            train_generator,
            samples_per_epoch=nb_train_samples,
            nb_epoch=nb_epoch,
            validation_data=validation_generator,
            nb_val_samples=nb_validation_samples)
    return model
Ejemplo n.º 11
0
def try_params( n_iterations, params, data=None, datamode='memory'):

	print "iterations:", n_iterations
	print_params( params )

        batchsize = 100
        if datamode == 'memory':
            X_train, Y_train = data['train']
            X_valid, Y_valid = data['valid']
            inputshape = X_train.shape[1:]
        else:
            train_generator = data['train']['gen_func'](batchsize, data['train']['path'])
            valid_generator = data['valid']['gen_func'](batchsize, data['valid']['path'])
            train_epoch_step = data['train']['n_sample'] / batchsize
            valid_epoch_step = data['valid']['n_sample'] / batchsize
            inputshape = data['train']['gen_func'](batchsize, data['train']['path']).next()[0].shape[1:]

        model = Sequential()
	model.add(Conv2D(128, (1, 24), padding='same', input_shape=inputshape, activation='relu'))
        model.add(GlobalMaxPooling2D())

        model.add(Dense(32,activation='relu'))
        model.add(Dropout(params['DROPOUT']))
        model.add(Dense(2))
        model.add(Activation('softmax'))

        optim = Adadelta
        myoptimizer = optim(epsilon=params['DELTA'], rho=params['MOMENT'])
        mylossfunc = 'categorical_crossentropy'
        model.compile(loss=mylossfunc, optimizer=myoptimizer,metrics=['accuracy'])

        early_stopping = EarlyStopping( monitor = 'val_loss', patience = 3, verbose = 0 )

        if datamode == 'memory':
            model.fit(
                    X_train,
                    Y_train,
                    batch_size=batchsize,
                    epochs=int( round( n_iterations )),
                    validation_data=(X_valid, Y_valid),
                    callbacks = [ early_stopping ])
            score, acc = model.evaluate(X_valid,Y_valid)
        else:
            model.fit_generator(
                    train_generator,
                    steps_per_epoch=train_epoch_step,
                    epochs=int( round( n_iterations )),
                    validation_data=valid_generator,
                    validation_steps=valid_epoch_step,
                    callbacks = [ early_stopping ])
            score, acc = model.evaluate_generator(valid_generator, steps=valid_epoch_step)

	return { 'loss': score, 'model': (model.to_json(), optim, myoptimizer.get_config(), mylossfunc) }
Ejemplo n.º 12
0
def lm():
    maxlen=10
    cfig = getattr(config, 'get_config_morph')('cs')
    batch_size, nb_epoch = cfig['batch_size'], 200
    X_train, y_train = getTextFile(cfig['train_file'], cfig['train_dic'], cfig)
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen, padding='post')
    y_train = sequence.pad_sequences(y_train, maxlen=maxlen, padding='post')
    #X_train, y_train = X_train[:10050], y_train[:10050]
    print X_train.shape, y_train.shape
    #X_test, y_test = getTextFile(cfig['test_file'], cfig['train_dic'], cfig)
    #X_test = sequence.pad_sequences(X_test, maxlen=10, padding='post')
    #y_test = sequence.pad_sequences(y_test, maxlen=10, padding='post')
    '''
    y_train_tensor3 = np.zeros((len(X_train), maxlen, cfig['vocab_size']), dtype=np.bool)
    i, t = 0, 0
    for sentence in y_train:
        t = 0
        for v in sentence:
            y_train_tensor3[i][t][v] = True
            t += 1
        i += 1
    k = 0
    for i , j in generate_data(X_train, y_train, 200, cfig['vocab_size']):
        print i.shape , j.shape
        if k > 20:
            break
        k += 1
    exit(0)
    '''
    print 'Build model...'
    model = Sequential()
    model.add(Embedding(cfig['vocab_size'], 128, dropout=0.2))
    model.add(LSTM(128, return_sequences=True)) #- original
    model.add(TimeDistributedDense(cfig['vocab_size']))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.summary()

    print 'Train...'
    model.fit_generator(generate_data(X_train, y_train, batch_size, cfig['vocab_size']),
                        #samples_per_epoch=len(X_train)/batch_size,
                        samples_per_epoch=1000,
                        nb_epoch=nb_epoch)
    #model.fit(X_train, y_train_tensor3)
    exit(0)
    cnt = 0
    for i , j in generate_data(X_train, y_train, 200, cfig['vocab_size']):
        #model.train_on_batch(i, j)
        history= model.fit(i, j, batch_size=10, nb_epoch=1,verbose=0)
        if cnt >= 3:
            break
        cnt += 1
Ejemplo n.º 13
0
def train_model(genre, dir_model, MP):
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) #check gpu is being used
    
    batch_size = MP['bs']
    lstm_size = MP['lstm_size']
    seq_length = MP['seq_length']
    drop = MP['dropout']
    lr = MP['lr']
    epochs = MP['epochs']
    
    text_to_int, int_to_text, n_chars = np.load('playlists/%s/ancillary_char.npy'%genre)
    vocab_size = len(text_to_int)
    X = np.load('playlists/%s/X_sl%d_char.npy'%(genre, seq_length))
    y = np.load('playlists/%s/y_sl%d_char.npy'%(genre, seq_length))

    # randomly shuffle samples before test/valid split
    np.random.seed(40)
    ran = [i for i in range(len(X))]
    np.random.shuffle(ran)
    
    X_train, X_valid, y_train, y_valid = train_test_split(X[ran], y[ran], test_size=0.2, random_state=42)

    try:
        model = load_model(dir_model)
        print("successfully loaded previous model, continuing to train")
    except:
        print("generating new model")
        model = Sequential()
        model.add(GRU(lstm_size, dropout=drop, recurrent_dropout=drop, return_sequences=True,
                      input_shape=(seq_length, vocab_size)))
        for i in range(MP['n_layers'] - 1):
            model.add(GRU(lstm_size, dropout=drop, recurrent_dropout=drop, return_sequences=True))
        model.add(TimeDistributed(Dense(vocab_size, activation='softmax'))) #output shape=(bs, sl, vocab)

        decay = 0.5*lr/epochs
        optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=decay, clipvalue=1)
        #optimizer = RMSprop(lr=lr, decay=decay)
        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['categorical_accuracy'])
    print(model.summary())

    # callbacks
    checkpoint = ModelCheckpoint(dir_model, monitor='loss', save_best_only=True, mode='min')
    #earlystop = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=3)
    callbacks_list = [checkpoint]

    # train
    model.fit_generator(one_hot_gen(X_train, y_train, vocab_size, seq_length, batch_size),
                        steps_per_epoch=len(X_train)/batch_size, epochs=epochs, callbacks=callbacks_list,
                        validation_data=one_hot_gen(X_valid, y_valid, vocab_size, seq_length, batch_size),
                        validation_steps=len(X_valid)/batch_size)
    model.save(dir_model)
Ejemplo n.º 14
0
def test_sequential_fit_generator():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit_generator(data_generator(True), len(X_train), nb_epoch, show_accuracy=False)
    model.fit_generator(data_generator(True), len(X_train), nb_epoch, show_accuracy=True)
    model.fit_generator(data_generator(True), len(X_train), nb_epoch, show_accuracy=False, validation_data=(X_test, y_test))
    model.fit_generator(data_generator(True), len(X_train), nb_epoch, show_accuracy=True, validation_data=(X_test, y_test))

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.9)
Ejemplo n.º 15
0
def test_sequential_fit_generator():
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    def data_generator(train):
        if train:
            max_batch_index = len(x_train) // batch_size
        else:
            max_batch_index = len(x_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (x_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (x_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_class))
    model.pop()
    model.add(Dense(num_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit_generator(data_generator(True), 5, epochs)
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=(x_test, y_test))
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=data_generator(False),
                        validation_steps=3)
    model.fit_generator(data_generator(True), 5, epochs, max_queue_size=2)
    model.evaluate(x_train, y_train)
Ejemplo n.º 16
0
def model(datagen, X_train, Y_train, X_test, Y_test):
    batch_size = 32
    nb_epoch = 200

    # input image dimensions
    img_rows, img_cols = 32, 32
    # the CIFAR10 images are RGB
    img_channels = 3

    model = Sequential()

    model.add(Convolution2D(32, 3, 3, border_mode='same',
                            input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    # let's train the model using SGD + momentum (how original).
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    # fit the model on the batches generated by datagen.flow()
    model.fit_generator(datagen.flow(X_train, Y_train,
                        batch_size=batch_size),
                        samples_per_epoch=X_train.shape[0],
                        nb_epoch=nb_epoch,
                        validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Ejemplo n.º 17
0
def test_multiprocessing_training_fromfile():

    reached_end = False

    arr_data = np.random.randint(0,256, (500, 200))
    arr_labels = np.random.randint(0, 2, 500)
    np.savez("data.npz", **{"data": arr_data, "labels": arr_labels})

    def myGenerator():

        batch_size = 32
        n_samples = 500

        arr = np.load("data.npz")

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr["data"][start: end]
            y = arr["labels"][start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(10, input_shape=(200, )))
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('linear'))
    model.compile(loss='mse', optimizer='adadelta')

    model.fit_generator(myGenerator(),
                        samples_per_epoch=320,
                        nb_epoch=1,
                        verbose=1,
                        max_q_size=10,
                        nb_worker=2,
                        pickle_safe=True)

    model.fit_generator(myGenerator(),
                        samples_per_epoch=320,
                        nb_epoch=1,
                        verbose=1,
                        max_q_size=10,
                        pickle_safe=False)
    reached_end = True

    assert reached_end
Ejemplo n.º 18
0
def test_TerminateOnNaN():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_data_callbacks()

    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    cbks = [callbacks.TerminateOnNaN()]
    model = Sequential()
    initializer = initializers.Constant(value=1e5)
    for _ in range(5):
        model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
                        kernel_initializer=initializer))
    model.add(Dense(num_classes, activation='linear'))
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop')

    # case 1 fit
    history = model.fit(X_train, y_train, batch_size=batch_size,
                        validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
    loss = history.history['loss']
    assert len(loss) == 1
    assert loss[0] == np.inf

    history = model.fit_generator(data_generator(X_train, y_train, batch_size),
                                  len(X_train),
                                  validation_data=(X_test, y_test),
                                  callbacks=cbks,
                                  epochs=20)
    loss = history.history['loss']
    assert len(loss) == 1
    assert loss[0] == np.inf or np.isnan(loss[0])
Ejemplo n.º 19
0
def Convolution(test, df, flip_indices):
    img_rows, img_cols = 96, 96 # images are 96x96 pixels
    img_channels = 1 # images are grey scale - if RGB use img_channels = 3
    nb_filter = 32 # common and efficient to use multiples of 2 for filters
    nb_epoch = 1
    batch_size = 32
    X, y = load(df, test)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=99)
    
    model = Sequential()
    model.add(Convolution2D(nb_filter = nb_filter, nb_row = 3, nb_col = 3, 
#                            border_mode = 'same', # this causes a crash, even though default is 'same'....
                            input_shape = X_train.shape[1:])) # must set input shape for first call to Convolutional2D
    model.add(Activation('relu')) # default convolutional activation function - should try Leaky ReLU and PReLU functions : http://arxiv.org/abs/1502.01852
    model.add(MaxPooling2D()) # pooling 2x2 with stride 2 is default - reduces size of matrix by half in both dimensions
    
    # There is a new paper quesitoning the use of max pooling - finding that replacement with convolutional layers with increased stride works better    
    
    model.add(Dropout(0.1))
    model.add(Convolution2D(nb_filter = nb_filter * 2, nb_row = 2, nb_col = 2))
    model.add(Activation('relu'))
    model.add(MaxPooling2D())
    model.add(Dropout(0.2))
    model.add(Convolution2D(nb_filter = nb_filter * 4, nb_row = 2, nb_col = 2))
    model.add(Activation('relu'))
    model.add(MaxPooling2D())
    model.add(Dropout(0.3))
    model.add(Flatten()) # flatten the data before fully connected layer (reg. neural net)
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.4))
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    if len(flip_indices) == 12:
        model.add(Dense(30))
    else:
        model.add(Dense(8))
    model.compile(loss = 'mean_squared_error', # other objectives : http://keras.io/objectives/
                  optimizer = 'Adam') # discussion : http://cs231n.github.io/neural-networks-3/ : "Adam is currently recommended as the default algorithm to use" - cs231n
    dataGen = createDataGen(flip_indices = flip_indices, horizontal_flip = True)
    checkpointer = ModelCheckpoint(filepath="/tmp/weights.hdf5", verbose=1, save_best_only=True)
    earlystopping = EarlyStopping(monitor = 'val_loss', patience = 20, verbose = 0, mode = 'auto')
    model.fit_generator(dataGen.flow(X_train, y_train, batch_size = batch_size), nb_epoch = nb_epoch,
                                     samples_per_epoch = X_train.shape[0],
                                     validation_data = (X_test, y_test),
                                     callbacks = [checkpointer, earlystopping])
class TrainAllCnn(object):

    def __init__(self):
        # prepare trainigs data
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
        print('X_train shape:', X_train.shape)
        print(X_train.shape[0], 'train samples')
        print(X_test.shape[0], 'test samples')

        self.Y_train = np_utils.to_categorical(y_train, 10)
        self.Y_test = np_utils.to_categorical(y_test, 10)

        self.X_train = X_train.astype('float32')
        self.X_test = X_test.astype('float32')
        self.X_train /= 255
        self.X_test /= 255

    def build_model(self):
        # we want an sequential model
        self.model = Sequential()

        self.model.add(Convolution2D(96, 3, 3, border_mode='same', input_shape=(3, 32, 32)))
        self.model.add(Activation('relu'))
        self.model.add(Convolution2D(96, 3, 3, border_mode='same'))
        self.model.add(Activation('relu'))
        self.model.add(Convolution2D(96, 3, 3, subsample=(2,2), border_mode='same'))
        self.model.add(Activation('relu'))
        self.model.add(Convolution2D(192, 3, 3, border_mode='same'))
        self.model.add(Activation('relu'))
        self.model.add(Convolution2D(192, 3, 3, border_mode='same'))
        self.model.add(Activation('relu'))
        self.model.add(Convolution2D(96, 3, 3, subsample=(2, 2), border_mode='same'))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(3, 3), strides=(2,2)))
        self.model.add(Convolution2D(192, 3, 3))
        self.model.add(Activation('relu'))
        self.model.add(Convolution2D(192, 1, 1))
        self.model.add(Activation('relu'))
        self.model.add(Convolution2D(10, 1, 1))
        self.model.add(Activation('relu'))
        self.model.add(Flatten())
        self.model.add(Activation('softmax'))

        # now we compile the model, asoptimizer we use stochastik gradient decent with momentum
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

    def train(self, batch, epoch):
        self.build_model()
        # now we fit the model
        # with 200 epochs this takes a while ...
        return self.model.fit(self.X_train, self.Y_train, batch_size=batch, nb_epoch=epoch, validation_data=(self.X_test, self.Y_test), shuffle=True)

    def train_argument_images(self, batch, epoch):
        self.build_model()

        datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False, zoom_range=0.1)
        datagen.fit(self.X_train)
        return self.model.fit_generator(datagen.flow(self.X_train, self.Y_train, batch_size=batch), samples_per_epoch=self.X_train.shape[0], nb_epoch=epoch, validation_data=(self.X_test, self.Y_test)).model
Ejemplo n.º 21
0
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size],
                       y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True), len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
def main():
    ext = extension_from_parameters()

    out_dim = 1
    loss = 'mse'
    metrics = None
    #metrics = ['accuracy'] if CATEGORICAL else None

    reshape = LOCALLY_CONNECTED_LAYERS is not None

    datagen = RegressionDataGenerator()
    train_gen = datagen.flow(batch_size=BATCH_SIZE, reshape=reshape)
    val_gen = datagen.flow(val=True, batch_size=BATCH_SIZE, reshape=reshape)
    val_gen2 = datagen.flow(val=True, batch_size=BATCH_SIZE, reshape=reshape)

    model = Sequential()

    if LOCALLY_CONNECTED_LAYERS:
        for layer in LOCALLY_CONNECTED_LAYERS:
            if layer:
                model.add(LocallyConnected1D(*layer, input_shape=(datagen.input_dim, 1), activation=ACTIVATION))
                if POOL:
                    model.add(MaxPooling1D(pool_length=POOL))
        model.add(Flatten())

    for layer in DENSE_LAYERS:
        if layer:
            model.add(Dense(layer, input_dim=datagen.input_dim, activation=ACTIVATION))
            if DROP:
                model.add(Dropout(DROP))
    model.add(Dense(out_dim))

    model.summary()
    model.compile(loss=loss, optimizer='sgd', metrics=metrics)

    train_samples = int(datagen.n_train/BATCH_SIZE) * BATCH_SIZE
    val_samples = int(datagen.n_val/BATCH_SIZE) * BATCH_SIZE

    history = BestLossHistory(val_gen2, val_samples, ext)
    checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)

    model.fit_generator(train_gen, train_samples,
                        nb_epoch = NB_EPOCH,
                        validation_data = val_gen,
                        nb_val_samples = val_samples,
                        callbacks=[history, checkpointer])
Ejemplo n.º 23
0
def test_multiprocessing_training():

    reached_end = False

    arr_data = np.random.randint(0, 256, (500, 2))
    arr_labels = np.random.randint(0, 2, 500)

    def myGenerator():

        batch_size = 32
        n_samples = 500

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    model.fit_generator(myGenerator(),
                        samples_per_epoch=320,
                        nb_epoch=1,
                        verbose=1,
                        max_q_size=10,
                        nb_worker=4,
                        pickle_safe=True)

    model.fit_generator(myGenerator(),
                        samples_per_epoch=320,
                        nb_epoch=1,
                        verbose=1,
                        max_q_size=10,
                        pickle_safe=False)

    reached_end = True

    assert reached_end
Ejemplo n.º 24
0
def train(train_generator,train_size,input_num,dims_num):
    print("Start Train Job! ")
    start=time.time()
    inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
    layer1=LSTM(128)
    output=Dense(2,activation="softmax",name="Output")
    optimizer=Adam()
    model=Sequential()
    model.add(inputs)
    model.add(layer1)
    model.add(Dropout(0.5))
    model.add(output)
    call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
    model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
    model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
#    model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
    model.save(model_dir)
    end=time.time()
    print("Over train job in %f s"%(end-start))
class SequenceTaggingMachine(object):

    def __init__(self, mask ):

        self.model = None
        self.mask = mask


    def train(self, train_corpus, valid_corpus, learning_param):

        self.model = Sequential()

        self.model.add(Embedding(train_corpus.source_cell_num(), 256, mask_zero = True))

        self.model.add(Bidirectional(LSTM(128, return_sequences=True)))
        self.model.add(Bidirectional(LSTM(128, return_sequences=True)))

        self.model.add(TimeDistributed(Dense(input_dim=128, output_dim=train_corpus.target_cell_num())))
        self.model.add(Activation('softmax'))

        self.model.compile(loss=lambda output,target: masked_categorical_crossentropy(output, target, self.mask),
                           optimizer='rmsprop',
                           metrics=[lambda y_true, y_pred: masked_categorical_accuracy(y_true, y_pred, self.mask)])


        logging.debug("Preparing data iter")
        problem = SequenceTaggingProblem(train_corpus)
        data_train = BucketIter(problem, learning_param.batch_size, max_pad_num=learning_param.max_pad)

        val_problem = SequenceTaggingProblem(valid_corpus)
        data_val = BucketIter(val_problem, learning_param.batch_size, max_pad_num=learning_param.max_pad)

        checkpointer = ModelCheckpoint(filepath="weights.{epoch:03d}-{val_loss:.2f}.hdf5", verbose=1)

        logging.debug("Begin train model")
        self.model.fit_generator(bucket_iter_adapter(data_train,train_corpus.target_cell_num()),
                                 samples_per_epoch=train_corpus.corpus_size(), nb_epoch=100, verbose=1,
                                 validation_data=bucket_iter_adapter(data_val, train_corpus.target_cell_num()),
                                 nb_val_samples = valid_corpus.corpus_size(), callbacks=[checkpointer])

        print "Model is trained"
Ejemplo n.º 26
0
def test_multiprocessing_training_fromfile(in_tmpdir):
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)
    np.savez('data.npz', **{'data': arr_data, 'labels': arr_labels})

    def custom_generator():

        batch_size = 10
        n_samples = 50

        arr = np.load('data.npz')

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr['data'][start: end]
            y = arr['labels'][start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    model.fit_generator(custom_generator(),
                        steps_per_epoch=5,
                        epochs=1,
                        verbose=1,
                        max_queue_size=10,
                        workers=2,
                        use_multiprocessing=True)

    model.fit_generator(custom_generator(),
                        steps_per_epoch=5,
                        epochs=1,
                        verbose=1,
                        max_queue_size=10,
                        use_multiprocessing=False)

    os.remove('data.npz')
Ejemplo n.º 27
0
def rnn_gru(float_data, lookback=1440, step=6):
    model = Sequential()
    model.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))
    model.add(layers.Dense(1))

    model.compile(optimizer=RMSprop(), loss='mae')
    history = model.fit_generator(train_gen,
                                  steps_per_epoch=500,
                                  epochs=20,
                                  validation_data=val_gen,
                                  validation_steps=val_steps)
    draw_histroy(history)
Ejemplo n.º 28
0
def test_multithreading_from_file():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)
    np.savez('data_threads.npz', **{'data': arr_data, 'labels': arr_labels})

    @threadsafe_generator
    def custom_generator():
        batch_size = 10
        n_samples = 50

        arr = np.load('data_threads.npz')

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr['data'][start: end]
            y = arr['labels'][start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker threads, consume on main thread:
    #   - All worker threads share the SAME generator
    model.fit_generator(custom_generator(),
                        steps_per_epoch=STEPS_PER_EPOCH,
                        epochs=1,
                        verbose=1,
                        validation_steps=None,
                        max_queue_size=10,
                        workers=WORKERS,
                        use_multiprocessing=False)

    # - Produce data on 1 worker thread, consume on main thread:
    #   - Worker thread is the only thread running the generator
    model.fit_generator(custom_generator(),
                        steps_per_epoch=STEPS_PER_EPOCH,
                        epochs=1,
                        verbose=1,
                        validation_steps=None,
                        max_queue_size=10,
                        workers=1,
                        use_multiprocessing=False)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    model.fit_generator(custom_generator(),
                        steps_per_epoch=STEPS_PER_EPOCH,
                        epochs=1,
                        verbose=1,
                        validation_steps=None,
                        max_queue_size=10,
                        workers=0,
                        use_multiprocessing=False)

    os.remove('data_threads.npz')
Ejemplo n.º 29
0
def train_xy(epochs=50, batch_size=32, h=256, w=256, ch=3, train_p=0.8, valid_p=0.1):
    print("Compiling Model")
    t_comp = time()
    model = Sequential()
    # reshape input to ch, h, w (no sample axis)
    model.add(Reshape(dims=(h, w, ch), input_shape=(ch * h * w,)))
    model.add(Permute((3, 1, 2)))
    # add conv layers
    model.add(Convolution2D(16, 3, 3, init="glorot_uniform", activation="relu", subsample=(1, 1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(32, 3, 3, init="glorot_uniform", activation="relu", subsample=(1, 1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(64, 3, 3, init="glorot_uniform", activation="relu", subsample=(1, 1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(64, 3, 3, init="glorot_uniform", activation="relu", subsample=(1, 1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(output_dim=2000, init="glorot_uniform", activation="relu", W_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    model.add(Dense(output_dim=2000, init="glorot_uniform", activation="relu", W_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    model.add(Dense(output_dim=4, init="glorot_uniform", activation="relu"))
    model.compile(optimizer="rmsprop", loss="mse")
    t_train = time()
    print("Took %.1fs" % (t_train - t_comp))
    # split dataset
    i_test = int(train_p * nrow) / batch_size * batch_size
    i_valid = int(i_test * (1 - valid_p)) / batch_size * batch_size
    X_train, Y_train = X_[:i_valid,], Y_[:i_valid,]
    X_valid, Y_valid = X_[i_valid:i_test,], Y_[i_valid:i_test,]

    # naive fitting to lower rmse faster
    hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=10, verbose=1, validation_split=0.1)
    print(hist)
    # fit by batch using generator!
    img_aug = image_augmentor(X_, Y_, i_valid)
    hist = model.fit_generator(
        generator=img_aug,
        samples_per_epoch=i_valid,
        nb_epoch=5000,
        verbose=1,
        validation_data=(X_valid, Y_valid),
        nb_worker=1,
    )
    rmse_test = model.evaluate(X_[i_test:,], Y_[i_test:,])
    print("Test RMSE: %.4f" % rmse_test)

    # save model
    model_json = model.to_json()
    open(path_img + "locate/model_116.json", "w").write(model_json)
    model.save_weights(path_img + "locate/model_116_weights.h5")
Ejemplo n.º 30
0
def test_stop_training_csv(tmpdir):
    np.random.seed(1337)
    fp = str(tmpdir / 'test.csv')
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)

    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]
    model = Sequential()
    for _ in range(5):
        model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='linear'))
    model.compile(loss='mean_squared_error',
                  optimizer='rmsprop')

    def data_generator():
        i = 0
        max_batch_index = len(X_train) // batch_size
        tot = 0
        while 1:
            if tot > 3 * len(X_train):
                yield (np.ones([batch_size, input_dim]) * np.nan,
                       np.ones([batch_size, num_classes]) * np.nan)
            else:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            i += 1
            tot += 1
            i = i % max_batch_index

    history = model.fit_generator(data_generator(),
                                  len(X_train) // batch_size,
                                  validation_data=(X_test, y_test),
                                  callbacks=cbks,
                                  epochs=20)
    loss = history.history['loss']
    assert len(loss) > 1
    assert loss[-1] == np.inf or np.isnan(loss[-1])

    values = []
    with open(fp) as f:
        for x in reader(f):
            values.append(x)

    assert 'nan' in values[-1], 'The last epoch was not logged.'
    os.remove(fp)
Ejemplo n.º 31
0
def train_a_model(trainfile):
    '''
    :param trainfile:
    :return:
    '''
    #load the dataset using pandas
    data = pd.read_csv(trainfile)
    #check len of rows and its intance len

    with open(trainfile) as f:
        content = f.readlines()

    lines = np.array(content)

    num_of_instances = lines.size
    print("number of instances: ", num_of_instances)

    #save the data in train and validation data
    x_train, y_train = [], []

    for i in range(1, num_of_instances):
        try:
            emotion = lines[i].split(",")[0]

            val = lines[i].split(",")[1:]

            pixels = np.array(val, 'float32')

            y_train.append(emotion)
            x_train.append(pixels)
        except:
            print("", end="")

    # data transformation for train and test sets
    x_train = np.array(x_train, 'float32')

    x_train /= 255  #normalize inputs between [0, 1]

    x_train = x_train.reshape(x_train.shape[0], 48, 48, 1)
    x_train = x_train.astype('float32')

    print(x_train.shape[0], 'train samples')

    y_train = []

    for i in range(1, num_of_instances):
        try:
            emotion = lines[i].split(",")[0]

            y_train.append(emotion)
        except:
            print("", end="")

    le = LabelEncoder()
    y_train = le.fit_transform(y_train)
    a_train = []
    for i in range(1, num_of_instances):
        emotion = y_train[i - 1]
        emotion = keras.utils.to_categorical(emotion, 3)

        a_train.append(emotion)

    y_train = a_train
    y_train = np.array(y_train, 'float32')

    #variables
    num_classes = 3
    #fear, happy, sad
    batch_size = 256
    epochs = 10

    #construct CNN structure
    model = Sequential()

    #1st convolution layer
    model.add(Conv2D(64, (5, 5), activation='relu', input_shape=(48, 48, 1)))
    model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))

    #2nd convolution layer
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))

    #3rd convolution layer
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Flatten())

    #fully connected neural networks
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.2))

    #-----------------------------
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.3))
    #-----------------------------

    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.2))

    model.add(Dense(num_classes, activation='softmax'))

    ################################
    #------------------------------
    #batch process
    gen = ImageDataGenerator()
    train_generator = gen.flow(x_train, y_train, batch_size=batch_size)

    #------------------------------

    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])

    #------------------------------

    print('Training start please wait....')
    #model.fit_generator(x_train, y_train, epochs=epochs) #train for all trainset
    model.fit_generator(train_generator,
                        steps_per_epoch=batch_size,
                        epochs=epochs)  #train for randomly selected one

    print("model Training completed")
    model.save('models.h5')

    return model
Ejemplo n.º 32
0
#adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
#adagrad = keras.optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)

# chose categorical_crossentropy to avoid slow learning for the sigma derivative.
model.compile(loss='categorical_crossentropy',
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])
#Create the generators for the test and train batches
training_gen = generate(batch_size, True)
valid_gen = generate(batch_size, False)

#fit the model

history = model.fit_generator(generator=training_gen,
                              verbose=1,
                              epochs=epochs,
                              steps_per_epoch=8000 / batch_size,
                              validation_data=valid_gen,
                              validation_steps=2000 / batch_size)

#score = model.evaluate(x_test, y_test, verbose=0)
#print('Test loss:', score[0])
#print('Test accuracy:', score[1])
model.save('./chest_xray_net.h5')

print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
Ejemplo n.º 33
0
restnet.trainable = True
set_trainable = False
for layer in restnet.layers:
    if layer.name in ['res5c_branch2b', 'res5c_branch2c', 'activation_49']:
        set_trainable = True
    if set_trainable:
        layer.trainable = True
    else:
        layer.trainable = False
layers = [(layer, layer.name, layer.trainable) for layer in restnet.layers]
pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable'])

model_finetuned = Sequential()
model_finetuned.add(restnet)
model_finetuned.add(Dense(512, activation='relu', input_dim=(300, 300, 3)))
model_finetuned.add(Dropout(0.3))
model_finetuned.add(Dense(512, activation='relu'))
model_finetuned.add(Dropout(0.3))
model_finetuned.add(Dense(1, activation='sigmoid'))
model_finetuned.compile(loss='binary_crossentropy',
                        optimizer=optimizers.RMSprop(lr=1e-5),
                        metrics=['accuracy'])
model_finetuned.summary()

history_1 = model_finetuned.fit_generator(train_generator,
                                          steps_per_epoch=100,
                                          epochs=2,
                                          validation_data=val_generator,
                                          validation_steps=100,
                                          verbose=1)
model_finetuned.save('men_women_tlearn_finetune_img_aug_restnet50.h5')
Ejemplo n.º 34
0
print("video: training ", model_name)
weights_path = "models_video/" + model_name + "_" + "{}".format(
    deformation) + ".hdf5"
# base_model = cnnModels(model_name, image_shape=(64, 64, 1))

# print(base_model.model.summary())

load_weights = False

train_generator, validation_generator, steps_per_epoch = get_generators()

# Helper: TensorBoard
tb = TensorBoard(log_dir='./cnn_video/logs/' + '{}'.format(deformation))

# Helper: Stop when we stop learning.
early_stopper = EarlyStopping(patience=10)

# # Helper: Save results.
# timestamp = time.time()
# csv_logger = CSVLogger('./cnn_video/logs/' + model_name + '-' + 'training-' + \
#                        str(timestamp) + '.log')

model.fit_generator(train_generator,
                    steps_per_epoch=steps_per_epoch,
                    validation_data=validation_generator,
                    validation_steps=10,
                    callbacks=[tb, early_stopper],
                    epochs=epochs)

model.save_weights(weights_path, overwrite=True)
Ejemplo n.º 35
0
class Ghouzam(DigitClassifier):
    '''Ghouzam digit classifier. Source : https://www.kaggle.com/yassineghouzam/introduction-to-cnn-keras-0-997-top-6?fbclid=IwAR12dON7-HGwZrjwan9H8UUaftW3Jk7nyv6mJuQ6keSr9yPEYEMwsQhQLdg'''
    def __init__(self, warm_start=None, epochs=10, batch_size=86):
        super().__init__(warm_start)

        self.epochs = epochs
        self.batch_size = batch_size

        if self.model is None:
            self.model = Sequential()

            self.model.add(
                Conv2D(filters=32,
                       kernel_size=(5, 5),
                       padding='Same',
                       activation='relu',
                       input_shape=(28, 28, 1)))
            self.model.add(
                Conv2D(filters=32,
                       kernel_size=(5, 5),
                       padding='Same',
                       activation='relu'))
            self.model.add(MaxPool2D(pool_size=(2, 2)))
            self.model.add(Dropout(0.25))
            self.model.add(
                Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='Same',
                       activation='relu'))
            self.model.add(
                Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='Same',
                       activation='relu'))
            self.model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
            self.model.add(Dropout(0.25))

            self.model.add(Flatten())
            self.model.add(Dense(256, activation='relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(10, activation='softmax'))

            # Optimizer
            optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)

            # Compile model
            self.model.compile(optimizer=optimizer,
                               loss='categorical_crossentropy',
                               metrics=['accuracy'])

            # Data augmentation to prevent overfitting
            self.datagen = ImageDataGenerator(
                featurewise_center=False,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=
                False,  # divide inputs by std of the dataset
                samplewise_std_normalization=
                False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=
                10,  # randomly rotate images in the range (degrees, 0 to 180)
                zoom_range=0.1,  # Randomly zoom image 
                width_shift_range=
                0.1,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=
                0.1,  # randomly shift images vertically (fraction of total height)
                horizontal_flip=False,  # randomly flip images
                vertical_flip=False  # randomly flip images
            )

    def fit(self, X_LS, y_LS, X_VS, y_VS):
        '''Fits model.'''

        # One-hot encoding
        y_LS = to_categorical(y_LS)
        y_VS = to_categorical(y_VS)

        # Fit
        self.datagen.fit(X_LS)

        learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
                                                    patience=3,
                                                    verbose=1,
                                                    factor=0.5,
                                                    min_lr=0.00001)

        self.model.fit_generator(self.datagen.flow(X_LS,
                                                   y_LS,
                                                   batch_size=self.batch_size),
                                 epochs=self.epochs,
                                 validation_data=(X_VS, y_VS),
                                 verbose=2,
                                 steps_per_epoch=X_LS.shape[0] //
                                 self.batch_size,
                                 callbacks=[learning_rate_reduction])

        return self
Ejemplo n.º 36
0
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_data_dir,
                                                    target_size=(img_width,
                                                                 img_height),
                                                    batch_size=batch_size,
                                                    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

#model.load_weights(networkFile)

history = model.fit_generator(train_generator,
                              steps_per_epoch=nb_train_samples // batch_size,
                              epochs=epochs,
                              validation_data=validation_generator,
                              validation_steps=nb_validation_samples //
                              batch_size)
printHistory(history, insulators)
model.save_weights(networkFile)
Ejemplo n.º 37
0
print(model.summary())

# ### Learning 1: New layers

epochs = 10
workers = 4
use_multiprocessing = False

print('Training for', epochs, 'epochs with', workers,
      'workers, use_multiprocessing is', use_multiprocessing)

history = model.fit_generator(train_generator,
                              steps_per_epoch=nimages_train // batch_size,
                              epochs=epochs,
                              validation_data=validation_generator,
                              validation_steps=nimages_validation //
                              batch_size,
                              verbose=2,
                              callbacks=callbacks,
                              use_multiprocessing=use_multiprocessing,
                              workers=workers)

fname = "gtsrb-vgg16-reuse.h5"
print('Saving model to', fname)
model.save(fname)

# ### Learning 2: Fine-tuning
#
# Once the top layers have learned some reasonable weights, we can
# continue training by unfreezing the last convolution block of VGG16
# (`block5`) so that it may adapt to our data. The learning rate
# should be smaller than usual.
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
from learning_keras.text_and_sequence.weather_data import float_data, train_gen, val_gen, val_steps
import matplotlib.pyplot as plt

model = Sequential()
model.add(
    layers.GRU(32,
               dropout=0.2,
               recurrent_dropout=0.2,
               input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
                              steps_per_epoch=500,
                              epochs=40,
                              validation_data=val_gen,
                              validation_steps=val_steps)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
Ejemplo n.º 39
0
def main(_):
    ##
    # Load Data
    ##

    with open(FLAGS.data_path, 'r') as f:
        reader = csv.reader(f)
        # data is a list of tuples (img path, steering angle)
        data = np.array([row for row in reader])

    # Split train and validation data
    np.random.shuffle(data)
    split_i = int(len(data) * 0.9)
    X_train, y_train = list(zip(*data[:split_i]))
    X_val, y_val = list(zip(*data[split_i:]))

    X_train, y_train = np.array(X_train), np.array(y_train)
    X_val, y_val = np.array(X_val), np.array(y_val)

    ##
    # Define Model
    ##

    model = Sequential([
        Conv2D(32, (3, 3),
               input_shape=(32, 128, 3),
               padding='same',
               activation='relu'),
        Conv2D(64, (3, 3), padding='same', activation='relu'),
        MaxPooling2D(),
        Dropout(0.5),
        Conv2D(128, (3, 3), padding='same', activation='relu'),
        Conv2D(128, (3, 3), padding='same', activation='relu'),
        MaxPooling2D(),
        Dropout(0.5),
        # Conv2D(1024, (3, 3), padding='same', activation='relu'),
        # Conv2D(1024, (3, 3), padding='same', activation='relu'),
        # MaxPooling2D(),
        # Dropout(0.5),
        # Conv2D(2048, (3, 3), padding='same', activation='relu'),
        # Conv2D(2048, (3, 3), padding='same', activation='relu'),
        # MaxPooling2D(),
        # Dropout(0.5),
        Flatten(),
        Dense(1024, activation='relu'),
        Dense(512, activation='relu'),
        Dense(128, activation='relu'),
        Dense(1, name='output', activation='tanh'),
    ])
    model.compile(optimizer=Adam(lr=FLAGS.lrate), loss='mse')

    ##
    # Train
    ##

    history = model.fit_generator(
        gen_batches(X_train, y_train, FLAGS.batch_size),
        len(X_train) / FLAGS.batch_size,
        FLAGS.num_epochs,
        validation_data=gen_batches(X_val, y_val, FLAGS.batch_size),
        validation_steps=(len(X_val) / FLAGS.batch_size))

    ##
    # Save model
    ##

    if not os.path.exists(FLAGS.save_dir):
        os.makedirs(FLAGS.save_dir)

    json = model.to_json()
    model.save_weights(os.path.join(FLAGS.save_dir, 'model.h5'))
    with open(os.path.join(FLAGS.save_dir, 'model.json'), 'w') as f:
        f.write(json)

# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch)
validation_generator = generator(validation_samples, batch_size=batch)

model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((60, 20), (0, 0))))
model.add(Conv2D(24, (5, 5), strides=(2, 2), activation="relu"))
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation="relu"))
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Dropout(0.3))
model.add(Dense(1))

model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator,
                    steps_per_epoch=np.ceil(len(train_samples) / batch),
                    validation_data=validation_generator,
                    validation_steps=np.ceil(len(validation_samples) / batch),
                    epochs=5)
model.save('model.h5')
Ejemplo n.º 41
0
class Model():
    def __init__(self):
        self.model = Sequential()

    def build_model(self, configs):
        timer = Timer()
        timer.start()

        for layer in configs['model']['layers']:
            neurons = layer['neurons'] if 'neurons' in layer else None
            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None
            input_timesteps = layer[
                'input_timesteps'] if 'input_timesteps' in layer else None
            input_dim = layer['input_dim'] if 'input_dim' in layer else None

            if layer['type'] == 'dense':
                self.model.add(Dense(neurons, activation=activation))
            if layer['type'] == 'lstm':
                self.model.add(
                    CuDNNLSTM(neurons,
                              input_shape=(input_timesteps, input_dim),
                              return_sequences=return_seq))
            if layer['type'] == 'dropout':
                self.model.add(Dropout(dropout_rate))
            if layer['type'] == 'BatchNormalization':
                self.model.add(BatchNormalization())

        self.model.compile(loss=configs['model']['loss'],
                           optimizer=configs['model']['optimizer'],
                           metrics=['accuracy'])

        print('MODEL Compiled')
        timer.stop()

    def train(self, X, y, epochs, batch_size, save_dir, logs):
        timer = Timer()
        timer.start()
        print('MODEL Training Started')
        print(f'MODEL {epochs} epochs, {batch_size} batch size')

        save_fname = os.path.join(
            save_dir,
            f'{dt.datetime.now().strftime("%d%m%Y-%H%M%S")}-e{epochs}.h5')

        callbacks = [
            EarlyStopping(monitor='val_loss', patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True),
            TensorBoard(
                log_dir=
                f'{logs}/{dt.datetime.now().strftime("%d%m%Y-%H%M%S")}-e{epochs}'
            )
        ]

        self.model.fit(X,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks,
                       shuffle=False)
        self.model.save(save_fname)

        print(f'MODEL Training Completed. Model saved as {save_fname}')
        timer.stop()

    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print('MODEL Out-of-Memory Training Started')
        print(
            f'MODEL {epochs} epochs, {batch_size} batch size, {steps_per_epoch} batches per epoch'
        )

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))

        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]

        self.model.fit_generator(data_gen,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 callbacks=callbacks,
                                 workers=1)

        print(
            f'MODEL Out-of-Memory Training Completed. Model saved as {save_fname}'
        )
        timer.stop()

    def predict(self, x):
        print('MODEL Predicting Point-by-Point:')
        predicted = self.model.predict(x)
        predicted = np.reshape(predicted, (predicted.size, ))
        return predicted

    def predict_sequences_multiple(self, data, window_size, prediction_len):
        print('MODEL Predicting Sequences Multiple:')
        prediction_seqs = []
        for i in range(int(len(data) / prediction_len)):
            curr_frame = data[i * prediction_len]
            predicted = []
            for j in range(prediction_len):
                predicted.append(
                    self.model.predict(curr_frame[np.newaxis, :, :])[0, 0])
                curr_frame = curr_frame[1:]
                curr_frame = np.insert(curr_frame, [window_size - 2],
                                       predicted[-1],
                                       axis=0)
            prediction_seqs.append(predicted)
        return prediction_seqs

    def evaluate(self, x, y):
        return self.model.evaluate(x, y, verbose=0)
Ejemplo n.º 42
0
    layer.trainable = False

model = Sequential()
model.add(Flatten(input_shape=base_model.output_shape[1:]))
model.add(Dense(128, activation='relu'))

model.add(Dropout(0.5))
model.add(Dense(5, activation='sigmoid'))

model = Model(inputs=base_model.input,
              outputs=model(base_model.output))  # 新网络=预训练网络+自定义网络

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
              metrics=['accuracy'])
"""
train_datagen = ImageDataGenerator(rescale=1. / 255, horizontal_flip=True)  # 训练数据预处理器,随机水平翻转
test_datagen = ImageDataGenerator(rescale=1. / 255)  # 测试数据预处理器
train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width),
                                                    batch_size=batch_size, class_mode='binary')  # 训练数据生成器
validation_generator = test_datagen.flow_from_directory(validation_data_dir, target_size=(img_height, img_width),
                                                        batch_size=batch_size, class_mode='binary',
                                                        shuffle=False)  # 验证数据生成器
checkpointer = ModelCheckpoint(filepath='dogcatmodel.h5', verbose=1, save_best_only=True)  # 保存最优模型

# 训练&评估
model.fit_generator(train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=epochs,
                    validation_data=validation_generator, validation_steps=nb_validation_samples // batch_size,
                    verbose=2, workers=12, callbacks=[checkpointer])
"""
test_loss, test_acc = model.evaluate(x_test, y_test)
        yield np.array(images), np.array(steer_angles)


model = Sequential()
model.add(Cropping2D(cropping=((70, 25), (0, 0)), input_shape=INPUT_SHAPE))
model.add(Lambda(lambda var: var / 127.5 - 1.0))
model.add(Conv2D(24, 5, 5, activation='relu', subsample=(2, 2)))
model.add(Conv2D(36, 5, 5, activation='relu', subsample=(2, 2)))
model.add(Conv2D(48, 5, 5, activation='relu', subsample=(2, 2)))
model.add(Conv2D(64, 3, 3, activation='relu'))
model.add(Conv2D(64, 3, 3, activation='relu'))
model.add(Dropout(KEEP_PROB))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
#model.summary()

model.compile(loss='mean_squared_error', optimizer=Adam(lr=LEARNING_RATE))

model.fit_generator(image_loader(x_train, y_train, BATCH_SIZE, True),
                    len(x_train),
                    EPOCHS,
                    max_q_size=1,
                    validation_data=image_loader(x_valid, y_valid, BATCH_SIZE,
                                                 False),
                    nb_val_samples=len(x_valid))

model.save('model.h5')
Ejemplo n.º 44
0
path = os.getcwd()

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
        rescale=1./255)
        #width_shift_range=0.1,
        #height_shift_range=0.1,
        #zoom_range=0.2,
        #horizontal_flip=True)


train_generator = train_datagen.flow_from_directory(
        path+'/Train',  # this is the target directory
        target_size=(256,256),  # all images will be resized to 150x150
        batch_size=batch_size,
        class_mode='categorical')  # since we use binary_crossentropy loss, we need binary labels

val_datagen = ImageDataGenerator(
        rescale=1./255)


val_generator = val_datagen.flow_from_directory(
        path+'/Test',  # this is the target directory
        target_size=(256,256),  # all images will be resized to 150x150
        batch_size=batch_size,
        class_mode='categorical')  # since we use binary_crossentropy loss, we need binary labels

model_3a.fit_generator(
        train_generator,
        steps_per_epoch=2000 // batch_size,validation_data=val_generator,validation_steps=100,
        epochs=50,callbacks=callbacks)

# confirm the iterator works
#batchX, batchy = train_generator.next()
#print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))
#batchX, batchy = validation_generator.next()
#print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))
#batchX, batchy = test_generator.next()
#print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))

# simple early stopping
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=30)
mc = ModelCheckpoint('best_model.h5', monitor='val_acc', mode='max', verbose=1, save_best_only=True)
  
model.fit_generator(train_generator, 
    steps_per_epoch = train_generator.samples // batch_size, callbacks=[es, mc], 
    epochs = epochs, validation_data = validation_generator, 
    validation_steps = validation_generator.samples // batch_size)
"""
    epochs = epochs, validation_data = validation_generator, 
    validation_steps = nb_validation_samples // batch_size) 
"""

# load the saved model
saved_model = load_model('best_model.h5')

# make a prediction
test_generator.reset()
yhat = saved_model.predict_generator(test_generator, steps= nb_test_samples)
print(len(yhat))
  
#model.save_weights('model_saved.h5')
Ejemplo n.º 46
0
class Model:
    def __init__(self):
        self.model = None
        self.history = LossHistory()

        # モデリング
    def build_model(self, dataset, nb_classes=3):

        # 空のネットワークモデルを構築します。これは線形スタックモデルであり、各ニューラルネットワークレイヤーが順次追加されます。専門家の名前は順次モデルまたは線形スタックモデルです。
        self.model = Sequential()

        #次のコードは、CNNネットワークに必要なレイヤーを順次追加します。追加はネットワークレイヤーです
        self.model.add(
            Convolution2D(
                32, 3, 3, border_mode='same',
                input_shape=dataset.input_shape))  #レイヤー1:2次元たたみ込みレイヤー
        self.model.add(Activation('relu'))  #レイヤー1:アクティベーション機能

        self.model.add(Convolution2D(32, 3, 3))  #レイヤー2:2次元畳み込みレイヤー
        self.model.add(Activation('relu'))  #レイヤー2:アクティベーション機能
        #プーリング層の役割:
        #1.invariance:translation,rotation,scale
        #2.主な特徴を維持しながら、次元の削減が行われ、過剰適合を防止し、モデルの汎化能力を改善します
        self.model.add(MaxPooling2D(pool_size=(2, 2)))  #レイヤー3:プーリング層
        self.model.add(
            Dropout(0.25))  #レイヤー3:Dropout——このレイヤーの各ノードは、非アクティブ化の確率が25%です。

        self.model.add(Convolution2D(64, 3, 3,
                                     border_mode='same'))  #レイヤー4:2次元たたみ込みレイヤー
        self.model.add(Activation('relu'))  #レイヤー4:アクティベーション機能

        self.model.add(Convolution2D(64, 3, 3))  #レイヤー5:2维卷积层
        self.model.add(Activation('relu'))  #レイヤー5:アクティベーション機能

        self.model.add(MaxPooling2D(pool_size=(2, 2)))  #レイヤー6:プーリング層
        self.model.add(Dropout(0.25))  #レイヤー6:Dropout

        self.model.add(Flatten())  #レイヤー7:Flatten()——多次元入力1次元になる
        self.model.add(Dense(512))  #レイヤー7:Dense層,完全に接続されたレイヤー
        self.model.add(Activation('relu'))  #レイヤー7:アクティベーション機能
        self.model.add(Dropout(0.5))  #レイヤー7:Dropout

        self.model.add(Dense(nb_classes))  #レイヤー8:Dense層
        self.model.add(Activation('softmax'))  #レイヤー8:分類層、最終結果を出力

        #出力モデルの概要
        self.model.summary()

    # トレーニングモデル
    def train(self,
              train_images,
              train_labels,
              valid_images,
              valid_labels,
              batch_size=20,
              nb_epoch=10,
              data_augmentation=False):
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True
                  )  # SGD + momentumのオプティマイザをトレーニングに使用し、最初にオプティマイザオブジェクトを生成します
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=sgd,
                           metrics=['accuracy'])  # 実際のモデル構成作業を完成する

        # データ拡張を使用しない場合、いわゆる拡張とは、回転、反転、ノイズの追加などによって提供するトレーニングデータから新しいトレーニングデータを作成し、トレーニングデータのサイズを意識的に増やし、モデルトレーニングの量を増やすことです。
        if not data_augmentation:
            self.model.fit(train_images,
                           train_labels,
                           batch_size=batch_size,
                           nb_epoch=nb_epoch,
                           validation_data=(valid_images, valid_labels),
                           shuffle=True,
                           callbacks=[self.history])
        #リアルタイムデータを使用して改善する
        else:
            # データプロモーション用のデータジェネレーターを定義します。ジェネレーターオブジェクトdatagenを返します。datagenが呼び出されるたびに一連のデータを生成し(順次生成)、メモリを節約します。これは実際にはpythonデータジェネレーターです
            datagen = ImageDataGenerator(
                featurewise_center=False,  # 入力データを分散化するかどうか(平均値は0)
                samplewise_center=False,  # 入力データの各サンプルを0にするかどうか
                featurewise_std_normalization=
                False,  # データが標準化されているかどうか(データセットの標準偏差で割った入力データ)
                samplewise_std_normalization=False,  # 各サンプルデータを独自の標準偏差で除算するかどうか
                zca_whitening=False,  # 入力データにZCAホワイトニングを適用するかどうか
                rotation_range=20,  # データが増加したときの画像のランダムな回転の角度(範囲0〜180)
                width_shift_range=
                0.2,  # データがプロモートされたときの画像の水平オフセットの振幅(単位は画像の幅の割合、0〜1の浮動小数点数)
                height_shift_range=0.2,  # 上記と同じですが、ここは垂直です
                horizontal_flip=True,  # ランダムな水平反転を実行するかどうか
                vertical_flip=False)  # ランダム垂直フリップを実行するかどうか

            # 固有値の正規化、ZCAホワイトニングなどのトレーニングサンプルセット全体の数を計算します。
            datagen.fit(train_images)

            # ジェネレーターを使用してモデルのトレーニングを開始します
            self.model.fit_generator(datagen.flow(train_images,
                                                  train_labels,
                                                  batch_size=batch_size),
                                     samples_per_epoch=train_images.shape[0],
                                     nb_epoch=nb_epoch,
                                     validation_data=(valid_images,
                                                      valid_labels),
                                     callbacks=[self.history])

    MODEL_PATH = os_path + '/data/model/aggregate.face.model.h5'

    def save_model(self, file_path=MODEL_PATH):
        self.model.save(file_path)

    def load_model(self, file_path=MODEL_PATH):
        self.model = load_model(file_path)

    def evaluate(self, test_images, test_labels):
        score = self.model.evaluate(test_images, test_labels, verbose=1)
        print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))
Ejemplo n.º 47
0
#ch, row, col = 3, 80, 320  # Trimmed image format

model = Sequential()
model.add(Lambda(lambda x: (x / 127.5) - 1., input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dense(1))

model.compile(optimizer='adam', loss='mse')
model.fit_generator(
    train_generator,
    steps_per_epoch=math.ceil(len(train_samples) / batch_size * 6),
    validation_data=validation_generator,
    validation_steps=math.ceil(len(validation_samples) / batch_size * 6),
    epochs=3,
    verbose=1)
#model.fit(X_train, y_train, validation_split = 0.2, shuffle = True, nb_epoch = 3)

model.save('model.h5')
Ejemplo n.º 48
0
test_datagen = ImageDataGenerator(rescale=1. / 255)  # Preprocess images

training_set = train_datagen.flow_from_directory('dataset/training_set',
                                                 target_size=(64, 64),
                                                 batch_size=32,
                                                 class_mode='binary')

test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size=(64, 64),
                                            batch_size=32,
                                            class_mode='binary')

classifier.fit_generator(training_set,
                         steps_per_epoch=8000 / 32,
                         epochs=25,
                         validation_data=test_set,
                         validation_steps=2000 / 32)

from keras.preprocessing import image
import numpy as np

# Single image prediction
test_img = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg',
                          target_size=(64, 64))
test_img = image.img_to_array(test_img)
test_img = np.expand_dims(test_img, axis=0)  # Preprocess image

result = classifier.predict(test_img)
training_set.class_indices  # show indices for classification of cat / dog
# Convert binary result to String prediction result
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
# compile
model.compile(loss='categorical_crossentropy', optimizer=OPTIM,
              metrics=['accuracy'])
# train
history = model.fit_generator(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
                              validation_data=(X_test, y_test),
                              steps_per_epoch=X_train.shape[0] // BATCH_SIZE,
                              epochs=NB_EPOCH, verbose=VERBOSE)
# evaluate
score = model.evaluate(X_test, y_test,
                       batch_size=BATCH_SIZE, verbose=VERBOSE)
print("Test score:", score[0])
print('Test accuracy:', score[1])
        rescale=None,
        # set function that will be applied on each input
        preprocessing_function=None,
        # image data format, either "channels_first" or "channels_last"
        data_format=None,
        # fraction of images reserved for validation (strictly between 0 and 1)
        validation_split=0.0)

    # Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)

    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(datagen.flow(x_train, y_train,
                                     batch_size=batch_size),
                        epochs=epochs,
                        steps_per_epoch=len(x_train)/batch_size*2,
                        validation_data=(x_test, y_test),
                        workers=4)

# Save model and weights
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)

# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
Ejemplo n.º 51
0
#model.add(Dropout(0.4))

model.add(Dense(units=1024))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(units=1024))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(units=512))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(units=7))
model.add(Activation('softmax'))
model.summary()
model.compile(optimizer="Adamax",loss='categorical_crossentropy',metrics=['accuracy'])

ep=689

model.fit_generator(datagen.flow(x,y, batch_size=batch),steps_per_epoch=len(x)/batch, epochs=ep)
ev=model.evaluate(xv,yv,batch_size=batch)
print (' ',ev[1],'\n')

print ('save')
model.summary()

model.save('save.h5')

Ejemplo n.º 52
0
class OCR:
    def __init__(self, test_dir=dir_path + '/train_images/'):
        self.learning_rate = 0.001
        self.test_dir = test_dir
        self.data = []

    def format_y(self, y):

        res = [0] * 128

        if y == None:
            return res

        for i in range(len(y)):

            if y[i] == None:
                continue

            res[y[i] - 2304] = 1

        return res

    def get_character(self, text):

        chars = []
        split = text.split('_')
        split = split[3:]

        if len(split) <= 0:
            print('Image label not found for ', text)
            return

        split[-1] = split[-1][:-4]  #remove .png from last split

        for c in range(len(split)):
            chars.append(int(split[c]))

        return chars

    def load_data(self):

        if not self.test_dir:
            print(
                "Test directory could not be found. Please specify the correct path."
            )
            return

        images_name = [
            f for f in listdir(self.test_dir) if isfile(join(self.test_dir, f))
        ]
        X = []
        y = []

        for file_name in images_name:
            file_path = self.test_dir
            img = cv2.imread(file_path + file_name, 0)

            img = self.preprocess(img)
            X.append(img.reshape(64, 64, 1))

            chars = self.get_character(file_name)
            y.append(self.format_y(chars))

        return np.array(X), np.array(y)

    def preprocess(self, img):
        cv2.normalize(img, img, 0, 255, cv2.NORM_MINMAX)
        kernel = np.ones((5, 5), np.float32) / 25
        img = cv2.filter2D(img, -1, kernel)
        ret, ret_img = cv2.threshold(img, 0, 255,
                                     cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
        resz_img = cv2.resize(ret_img, (64, 64))

        kernel1 = np.ones((3, 3), np.uint8)
        erosion = cv2.erode(resz_img, kernel1, iterations=1)

        kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        closing = cv2.morphologyEx(erosion, cv2.MORPH_CLOSE, kernel2)
        ret, res_img = cv2.threshold(resz_img, 0, 255, cv2.THRESH_BINARY_INV)
        res_img = cv2.filter2D(res_img, -1, kernel)
        return res_img

    def make_model(self):
        self.model = Sequential()

        self.model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   activation='relu',
                   input_shape=(64, 64, 1)))
        self.model.add(Dropout(0.2))
        self.model.add(Conv2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Conv2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Conv2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Conv2D(512, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Flatten())
        self.model.add(Dropout(0.2))
        self.model.add(Dense(1024, activation='relu'))
        self.model.add(Dropout(0.2))
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dropout(0.2))
        self.model.add(Dense(128, activation='sigmoid'))

        self.model.compile(loss='binary_crossentropy',
                           optimizer=Adam(lr=self.learning_rate),
                           metrics=[categorical_accuracy])

    def train(self):
        X, Y = self.load_data(
        )  #loads train data, preprocess it and return in desired format

        X = X.astype('float32')

        train_datagen = ImageDataGenerator(rotation_range=20,
                                           rescale=1. / 255,
                                           width_shift_range=0.1,
                                           height_shift_range=0.1,
                                           shear_range=0.2,
                                           zoom_range=0.1,
                                           horizontal_flip=False,
                                           fill_mode='nearest')

        X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                            Y,
                                                            test_size=0.3)
        X_test, X_validate, Y_test, Y_validate = train_test_split(
            X_test, Y_test, test_size=0.5)
        X_test = X_test / 255.
        train_generator = train_datagen.flow(X_train, Y_train, batch_size=32)
        validation_generator = train_datagen.flow(X_validate,
                                                  Y_validate,
                                                  batch_size=32)

        if isfile(join(dir_path, '/my_model_new.h5')):
            self.model = load_model('my_model_new.h5')
        else:
            self.make_model()

        self.model.fit_generator(train_generator,
                                 steps_per_epoch=len(X_train) / 32,
                                 epochs=50,
                                 validation_data=validation_generator,
                                 validation_steps=len(X_validate) / 32)

        score, acc = self.model.evaluate(X_test, Y_test)
        self.model.save('my_model_new.h5')
        print('score: ', score, 'accuracy: ', acc)
Ejemplo n.º 53
0
        'pred_prob': pred_probs,
    }
    for c, col in zip(class_cols, class_probs):
        frame_dict[c] = col

    table = pandas.DataFrame(frame_dict, columns=all_cols)

    number_cols = ['true_prob', 'pred_prob'] + class_cols
    table[number_cols] = table[number_cols].apply(pandas.to_numeric)
    #from IPython import embed; embed()

    return table


class ResultsDataFrameCallback(keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        run.summary["results"] = results_data_frame(test_datagen, model)


model.fit_generator(train_generator,
                    steps_per_epoch=len(train_generator),
                    epochs=config.epochs,
                    workers=4,
                    callbacks=[ResultsDataFrameCallback()],
                    validation_data=test_generator,
                    validation_steps=len(test_generator))

if config.epochs == 0:
    #run.summary["results"] = results_data_frame(test_datagen, model)
    run.summary.update({"results2": results_data_frame(test_datagen, model)})
model.summary()
model.get_config()
model.layers[0].get_config()
model.layers[0].input_shape
model.layers[0].output_shape
model.layers[0].get_weights()
numpy.shape(model.layers[0].get_weights()[0])
model.layers[0].trainable

filepath = weights_filename
checkpoint = callbacks.ModelCheckpoint(filepath,
                                       monitor='val_acc',
                                       verbose=1,
                                       save_best_only=True,
                                       mode='max')
callbacks_list = [checkpoint]
# Training
hist = model.fit_generator(train_it,
                           steps_per_epoch=5703,
                           validation_data=val_it,
                           validation_steps=1140,
                           epochs=epochs,
                           callbacks=callbacks_list)

# Evaluating the model
score = model.evaluate_generator(test_it, steps=3421)
#score = model.evaluate(X_test, y_test, verbose=0)
print('Test Loss:', score[0])
print('Test accuracy:', score[1])
######################################################
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(2, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])

print(model.summary())
# callbacks = [LearningRateScheduler(poly_decay)]
history = model.fit_generator(trainGen,
                              steps_per_epoch=totalTrain // BS,
                              validation_data=valGen,
                              validation_steps=totalVal // BS,
                              epochs=NUM_EPOCHS,
                              verbose=1
                              # callbacks=callbacks
                              )

print("[INFO] evaluating model...")
testGen.reset()
# predIdxs = model.predict_generator(testGen,
#     steps=(totalTest//BS) + 1)

# predIdxs = np.argmax(predIdxs, axis=1)

score = model.evaluate_generator(testGen, verbose=1)

model.save("saved_model.model")
Ejemplo n.º 56
0
class Model():
    """A class for an building and inferencing an lstm model"""
    def __init__(self):
        self.model = Sequential()

    def load_model(self, filepath):
        print('[Model] Loading model from file %s' % filepath)
        self.model = load_model(filepath)

    def build_model(self, configs):
        timer = Timer()
        timer.start()

        for layer in configs['model']['layers']:
            neurons = layer['neurons'] if 'neurons' in layer else None
            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None
            input_timesteps = layer[
                'input_timesteps'] if 'input_timesteps' in layer else None
            input_dim = layer['input_dim'] if 'input_dim' in layer else None

            if layer['type'] == 'dense':
                self.model.add(Dense(neurons, activation=activation))
            if layer['type'] == 'lstm':
                self.model.add(
                    LSTM(neurons,
                         input_shape=(input_timesteps, input_dim),
                         return_sequences=return_seq))
            if layer['type'] == 'dropout':
                self.model.add(Dropout(dropout_rate))

        self.model.compile(loss=configs['model']['loss'],
                           optimizer=configs['model']['optimizer'])

        print('[Model] Model Compiled')
        timer.stop()

    def train(self, x, y, epochs, batch_size, save_dir):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True)
        ]
        self.model.fit(x,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks)
        self.model.save(save_fname)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()

    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]
        self.model.fit_generator(data_gen,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 callbacks=callbacks,
                                 workers=1)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()

    def predict_point_by_point(self, data):
        #Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
        print('[Model] Predicting Point-by-Point...')
        predicted = self.model.predict(data)
        predicted = np.reshape(predicted, (predicted.size, ))
        return predicted

    def predict_sequences_multiple(self, data, window_size, prediction_len):
        #Predict sequence of 50 steps before shifting prediction run forward by 50 steps
        print('[Model] Predicting Sequences Multiple...')
        prediction_seqs = []
        for i in range(int(len(data) / prediction_len)):
            curr_frame = data[i * prediction_len]
            predicted = []
            for j in range(prediction_len):
                predicted.append(
                    self.model.predict(curr_frame[newaxis, :, :])[0, 0])
                curr_frame = curr_frame[1:]
                curr_frame = np.insert(curr_frame, [window_size - 2],
                                       predicted[-1],
                                       axis=0)
            prediction_seqs.append(predicted)
        return prediction_seqs

    def predict_sequence_full(self, data, window_size, prediction_len):
        #Shift the window by 1 new prediction each time, re-run predictions on new window
        print('[Model] Predicting Sequences Full...')
        curr_frame = data[0]
        predicted = []
        for i in range(len(data)):
            predicted.append(
                self.model.predict(curr_frame[newaxis, :, :])[0, 0])
            curr_frame = curr_frame[1:]
            curr_frame = np.insert(curr_frame, [window_size - 2],
                                   predicted[-1],
                                   axis=0)
        return predicted
Ejemplo n.º 57
0
                                   horizontal_flip=True)

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)

# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
    'data/train',  # this is the target directory
    target_size=(150, 150),  # all images will be resized to 150x150
    batch_size=batch_size,
    class_mode='binary'
)  # since we use binary_crossentropy loss, we need binary labels

# this is a similar generator, for validation data
validation_generator = test_datagen.flow_from_directory('data/validation',
                                                        target_size=(150, 150),
                                                        batch_size=batch_size,
                                                        class_mode='binary')

model.fit_generator(train_generator,
                    steps_per_epoch=2000 // batch_size,
                    epochs=50,
                    validation_data=validation_generator,
                    validation_steps=800 // batch_size)
model.save_weights(
    'first_try.h5'
)  # always save your weights after training or during training
Ejemplo n.º 58
0
             nb_batches=nb_val_samples // batch_size,
             batch_size=batch_size)

# In[67]:

r = next(traingen)
r[0].shape, r[1].shape, len(r)

# In[74]:

for iteration in range(1):
    print 'Iteration', iteration
    print "1"
    h = model.fit_generator(traingen,
                            steps_per_epoch=nb_train_samples // batch_size,
                            epochs=1,
                            verbose=1,
                            validation_data=valgen,
                            validation_steps=nb_val_samples)
    print "2"
    for k, v in h.history.iteritems():
        history[k] = history.get(k, []) + v
    print "3"
    with open('%s.history.pkl' % FN, 'wb') as fp:
        pickle.dump(history, fp, -1)
    print "4"
    model.save_weights('%s.hdf5' % FN, overwrite=True)
    print "5"
    gensamples(batch_size=batch_size)

# In[ ]:
Ejemplo n.º 59
0
training_set = train_datagen.flow_from_directory(
    'train',
    target_size=(150, 150),
        batch_size=16,
    class_mode='binary')

test_set = test_datagen.flow_from_directory(
    'test',
    target_size=(150, 150),
    batch_size=16,
    class_mode='binary')

model_saved = model.fit_generator(
    training_set,
    epochs=10,
    validation_data=test_set,

)

model.save('mymodel.h5', model_saved)

# To test for individual images

mymodel = load_model('mymodel.h5')
# test_image=image.load_img('C:/Users/Karan/Desktop/ML Datasets/Face Mask Detection/Dataset/test/without_mask/30.jpg',target_size=(150,150,3))
test_image = image.load_img(r'C:/Users/karan/Desktop/FaceMaskDetector/test/with_mask/1-with-mask.jpg',
                            target_size=(150, 150, 3))
test_image
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
mymodel.predict(test_image)[0][0]
Ejemplo n.º 60
0
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1001, activation='softmax'))
#model.add(Activation('softmax'))

model.summary()
model.compile(loss='sparse_categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
nb_epoch = 1
nb_train_samples = 63
nb_validation_samples = 55

model.fit_generator(train_generator,
                    samples_per_epoch=nb_train_samples,
                    nb_epoch=nb_epoch,
                    validation_data=validation_generator,
                    nb_val_samples=nb_validation_samples)
model.evaluate_generator(validation_generator, nb_validation_samples)

model.save('/home/garima/Music/dogs-vs-cats/models/fromsiraj.h5')
new_model = keras.models.load_model(
    '/home/garima/Music/dogs-vs-cats/models/fromsiraj.h5')

converter = tf.lite.TocoConverter.from_keras_model_file(
    '/home/garima/Music/dogs-vs-cats/models/fromsiraj.h5')
tflite_model = converter.convert()
open(
    "/home/garima/Downloads/examples-master/lite/examples/image_classification/android/app/src/main/assets/from2.tflite",
    "wb").write(tflite_model)