Beispiel #1
0
def train(starting_epoch=0):
  """This module sets all hyper-parametes of the model and optimisers,
  creates an instance of the Keras Model class and partially trains it using
  the trainer module.

  Args:
    starting_epoch: Specifies at which epoch do we want to start. (Integer)

  Returns:
    None
  """
  global JUMP

  model = create_model() # Creates an object of Model class

  if starting_epoch: # In case starting_epoch is Non-zero
    model = load_model_weight(model, 'model_weights.pkl')
  
  (x_train, y_train, x_valid, y_valid, x_test, y_test) = load_data()
  print ("Training Data Shape: ", x_train.shape)
  print ("Testing Data Shape: ", x_test.shape)

  for i in range(starting_epoch, 300000, JUMP): # The paper trained to 300000 
    model = trainer(model,
                    x_train,
                    y_train,
                    x_valid,
                    y_valid,
                    initial_epoch=i)
    #try:
    #  save_model_weight(model, 'model_weights.pkl')
    #except:
    #  print ("Cannot save the model")
    evaluate(model=model, x_test=x_test, y_test=y_test)
Beispiel #2
0
def main():
    model = create_model()
    model = load_model_weight(model, "model_weights.pkl")
    x_train, y_train, _, _, x_test, y_test = load_data()
    print(x_train.shape)
    print(x_test.shape)

    evaluate(model, x_test, y_test)
Beispiel #3
0
def evaluate(model=None, x_test=None, y_test=None):
    if model is None:
        model = create_model()
        model = load_model_weight(model, "model_weights.pkl")
    if x_test is None or y_test is None:
        x_train, y_train, _, _, x_test, y_test = load_data()
        out = evaluate_process(model, x_train, y_train)
        print("The accuracy is : ", out)
        print("The total number of sample: ", y_test.shape[0])


# evaluate()
Beispiel #4
0
def run_iteration(conf, ds, datasets_bin, sanity):
    """
    """
    model = create_model(conf)
    callbacks = create_callbacks(conf)
    class_weights = get_class_weights(ds["train"], conf)

    start_time = time.time()
    history = model.fit(
            ds["train"],
            steps_per_epoch = conf["steps"]["train"],
            epochs = conf["num_epochs"],
            validation_data = ds["test"],
            validation_steps = conf["steps"]["test"],
            validation_freq = 1,
            class_weight = class_weights,
            callbacks = callbacks,
            verbose = 1
    )
    if conf["verbosity"]:
        print ("Time spent on training: {:.2f} minutes.".format(np.round(time.time() - start_time)/60))

    evaluate_model(model, history, ds, conf)

    count = {"findings": 0, "total": 0}
    pseudo = {"pred_list": [], "lab_list": [], "name_list": []}

    pseudo, count = generate_labels(pseudo, count, ds["unlab"], model, conf)

    # Sort in order of highest confidence to lowest
    pseudo_sorted = custom_sort(pseudo)

    checkout_findings(pseudo_sorted, conf, show=False)

    datasets_bin, added_samples = resample_and_combine(ds, conf, pseudo, pseudo_sorted, datasets_bin, limit=conf["class_limit"])

    # Update unlab_ds
    ds["unlab"] = reduce_dataset(ds["unlab"], remove=added_samples)

    sanity, conf = update_sanity(sanity, len(added_samples), datasets_bin, conf)
GAMMA = 0.99 # decay rate of past observations
OBSERVATION = 25. # timesteps to observe before training
EXPLORE = 3000000. # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # final value of epsilon
INITIAL_EPSILON = 0.1 # starting value of epsilon
REPLAY_MEMORY = 100 # number of previous transitions to remember
BATCH = 20 # size of minibatch
FRAME_PER_ACTION = 1

img_rows , img_cols = 80, 80
img_channels = 3 #We stack 3 frames

LEARNING_RATE = 1e-4


model = create_model(keep_prob=1)

print ("Now we load weight")
model.load_weights("model42.h5")
adam = Adam(lr=LEARNING_RATE)
model.compile(loss='mse',optimizer=adam)
print ("Weight load successfully")    


def prepare_image(im):
    im = im.resize((INPUT_WIDTH, INPUT_HEIGHT))
    im_arr = np.frombuffer(im.tobytes(), dtype=np.uint8)
    im_arr = im_arr.reshape((INPUT_HEIGHT, INPUT_WIDTH, INPUT_CHANNELS))
    im_arr = np.expand_dims(im_arr, axis=0)
    return im_arr
Beispiel #6
0
                                       height_shift_range=0.2,
                                       horizontal_flip=True)
    valid_datagen = ImageDataGenerator()

    train_datagen.fit(x_reshape[2870:, :, :, :])
    valid_datagen.fit(x_reshape[0:2870, :, :, :])

    test_generator = valid_datagen.flow(x_reshape[0:2870, :, :, :],
                                        y_cat[0:2870, :],
                                        batch_size=512)
    train_generator = train_datagen.flow(x_reshape[2870:, :, :, :],
                                         y_cat[2870:, :],
                                         batch_size=512)

    model = create_model(input_shape=(48, 48, 1),
                         alpha=0.5,
                         depth_multiplier=1,
                         classes=7)
    adam = optimizers.Adam(lr=0.0002,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=None,
                           decay=0.0,
                           amsgrad=False)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['categorical_accuracy'])

    filepath = "model/m-{epoch:02d}-{categorical_accuracy:.3f}-{val_categorical_accuracy:.3f}.h5"

    checkpoint = callbacks.ModelCheckpoint(filepath,
Beispiel #7
0
                         inp_len=train_t.shape[1],
                         emb_dim=hid_dim,
                         seq_model=seq_model,
                         nnet_model=nnet_model,
                         pool_mode=pool,
                         dropout_inp=dropout_inp,
                         dropout_hid=dropout_hid)
else:
    # n_classes, vocab_size, inp_len, emb_dim,
    # seq_model='lstm', nnet_model='highway', pool_mode='mean',
    # dropout_inp=False, dropout_hid=True
    model = create_model(n_classes=n_classes,
                         vocab_size=vocab_size + 1,
                         inp_len=train_t.shape[-1],
                         emb_dim=hid_dim,
                         seq_model=seq_model,
                         nnet_model=nnet_model,
                         pool_mode=pool,
                         dropout_inp=dropout_inp,
                         dropout_hid=dropout_hid,
                         emb_weight=emb_weight)

model.summary()
json_string = model.to_json()
fModel = open('models/' + saving + '.json', 'w')
fModel.write(json_string)

opt = RMSprop(lr=0.01)
model.compile(optimizer=opt, loss=loss)

train_y = numpy.expand_dims(train_y, -1)
Beispiel #8
0
import csv
import cv2
import numpy as np
from create_model import *
from functions import *

#two generators for training and validation
train_gen = generate_next_batch()
validation_gen = generate_next_batch()

model = create_model()
history = model.fit_generator(train_gen,
                              samples_per_epoch=number_of_samples_per_epoch,
                              nb_epoch=number_of_epochs,
                              validation_data=validation_gen,
                              nb_val_samples=number_of_validation_samples,
                              verbose=1)

model.save('model.h5')