Ejemplo n.º 1
0
    def create(self, charset, max_length = 120,
               latent_rep_size = 292, weights_file = None,
	       optimizer='Adam', activation='relu',
               filter_size=9, kernel_size=9,
               learning_rate=0.001):
        
        charset_length = len(charset)

        # Create a keras optimizer
        kerasDefaults = candle.keras_default_config()
        # This next line should be be dynamically set based on gParameters
        # kerasDefaults['momentum_sgd'] = gParameters['momentum']
        k_optimizer = candle.build_optimizer(optimizer,
                                        learning_rate,
                                        kerasDefaults)
        
        # Build the encoder
        x = Input(shape=(max_length, charset_length))
        _, z = self._buildEncoder(x, latent_rep_size, max_length, activation=activation, filter=filter_size, kernel_size=kernel_size)
        self.encoder = Model(x, z)

        # Build the decoder
        encoded_input = Input(shape=(latent_rep_size,))
        self.decoder = Model(
            encoded_input,
            self._buildDecoder(
                encoded_input,
                latent_rep_size,
                max_length,
                charset_length
            )
        )

        # Build the autoencoder (encoder + decoder)
        x1 = Input(shape=(max_length, charset_length))
        vae_loss, z1 = self._buildEncoder(x1, latent_rep_size, max_length, filter=filter_size, kernel_size=kernel_size)
        self.autoencoder = Model(
            x1,
            self._buildDecoder(
                z1,
                latent_rep_size,
                max_length,
                charset_length,
                activation=activation
            )
        )

        if weights_file:
            self.autoencoder.load_weights(weights_file)
            self.encoder.load_weights(weights_file, by_name = True)
            self.decoder.load_weights(weights_file, by_name = True)

        print("compiling autoencoder with optimizer = ", k_optimizer)
        self.autoencoder.compile(optimizer = k_optimizer,
                                 loss = vae_loss,
                                 metrics = ['accuracy'])
Ejemplo n.º 2
0
def run(gParameters):

    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.keras')
    logfile = gParameters['logfile'] if gParameters[
        'logfile'] else gParameters['output_dir'] + ext + '.log'
    p1b2.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = candle.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    #(X_train, y_train), (X_test, y_test) = p1b2.load_data(gParameters, seed)
    (X_train,
     y_train), (X_val,
                y_val), (X_test,
                         y_test) = p1b2.load_data_one_hot(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)
    print("Shape y_train: ", y_train.shape)
    print("Shape y_val: ", y_val.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_train --> Min: ", np.min(y_train), ", max: ",
          np.max(y_train))
    print("Range y_val --> Min: ", np.min(y_val), ", max: ", np.max(y_val))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    input_dim = X_train.shape[1]
    input_vector = Input(shape=(input_dim, ))
    output_dim = y_train.shape[1]

    # Initialize weights and learning rule
    initializer_weights = candle.build_initializer(
        gParameters['initialization'], kerasDefaults, seed)
    initializer_bias = candle.build_initializer('constant', kerasDefaults, 0.)

    activation = gParameters['activation']

    # Define MLP architecture
    layers = gParameters['dense']

    if layers != None:
        if type(layers) != list:
            layers = list(layers)
        for i, l in enumerate(layers):
            if i == 0:
                x = Dense(l,
                          activation=activation,
                          kernel_initializer=initializer_weights,
                          bias_initializer=initializer_bias,
                          kernel_regularizer=l2(gParameters['penalty']),
                          activity_regularizer=l2(
                              gParameters['penalty']))(input_vector)
            else:
                x = Dense(l,
                          activation=activation,
                          kernel_initializer=initializer_weights,
                          bias_initializer=initializer_bias,
                          kernel_regularizer=l2(gParameters['penalty']),
                          activity_regularizer=l2(gParameters['penalty']))(x)
            if gParameters['drop']:
                x = Dropout(gParameters['drop'])(x)
        output = Dense(output_dim,
                       activation=activation,
                       kernel_initializer=initializer_weights,
                       bias_initializer=initializer_bias)(x)
    else:
        output = Dense(output_dim,
                       activation=activation,
                       kernel_initializer=initializer_weights,
                       bias_initializer=initializer_bias)(input_vector)

    # Build MLP model
    mlp = Model(outputs=output, inputs=input_vector)
    p1b2.logger.debug('Model: {}'.format(mlp.to_json()))

    # Define optimizer
    optimizer = candle.build_optimizer(gParameters['optimizer'],
                                       gParameters['learning_rate'],
                                       kerasDefaults)

    # Compile and display model
    mlp.compile(loss=gParameters['loss'],
                optimizer=optimizer,
                metrics=['accuracy'])
    mlp.summary()

    # Seed random generator for training
    np.random.seed(seed)

    mlp.fit(X_train,
            y_train,
            batch_size=gParameters['batch_size'],
            epochs=gParameters['epochs'],
            validation_data=(X_val, y_val))

    # model save
    #save_filepath = "model_mlp_W_" + ext
    #mlp.save_weights(save_filepath)

    # Evalute model on test set
    y_pred = mlp.predict(X_test)
    scores = p1b2.evaluate_accuracy_one_hot(y_pred, y_test)
    print('Evaluation on test data:', scores)
Ejemplo n.º 3
0
def run(GP):

    # set the seed
    if GP['seed']:
        np.random.seed(GP['seed'])
    else:
        np.random.seed(np.random.randint(10000))

    # Set paths
    if not os.path.isdir(GP['home_dir']):
        print('Keras home directory not set')
        sys.exit(0)
    sys.path.append(GP['home_dir'])

    # Setup loggin
    args = candle.ArgumentStruct(**GP)
    #    set_seed(args.rng_seed)
    #    ext = extension_from_parameters(args)
    candle.verify_path(args.save_path)
    prefix = args.save_path  # + ext
    logfile = args.logfile if args.logfile else prefix + '.log'
    candle.set_up_logger(logfile, logger, False)  #args.verbose
    logger.info('Params: {}'.format(GP))

    import p2b1 as hf
    reload(hf)

    #import keras_model_utils as KEU
    #reload(KEU)
    #reload(p2ck)
    #reload(p2ck.optimizers)
    maps = hf.autoencoder_preprocess()

    from keras.optimizers import SGD, RMSprop, Adam
    from keras.datasets import mnist
    from keras.callbacks import LearningRateScheduler, ModelCheckpoint
    from keras import callbacks
    from keras.layers.advanced_activations import ELU
    from keras.preprocessing.image import ImageDataGenerator

    #    GP=hf.ReadConfig(opts.config_file)
    batch_size = GP['batch_size']
    learning_rate = GP['learning_rate']
    kerasDefaults = candle.keras_default_config()

    ##### Read Data ########
    import helper
    (data_files, fields) = p2b1.get_list_of_data_files(GP)
    # Read from local directoy
    #(data_files, fields) = helper.get_local_files('/p/gscratchr/brainusr/datasets/cancer/pilot2/3k_run16_10us.35fs-DPPC.20-DIPC.60-CHOL.20.dir/')
    #(data_files, fields) = helper.get_local_files('3k_run16', '/p/lscratchf/brainusr/datasets/cancer/pilot2/')

    # Define datagenerator
    datagen = hf.ImageNoiseDataGenerator(corruption_level=GP['noise_factor'])

    # get data dimension ##
    num_samples = 0
    for f in data_files:

        # Seperate different arrays from the data
        (X, nbrs, resnums) = helper.get_data_arrays(f)

        num_samples += X.shape[0]

    (X, nbrs, resnums) = helper.get_data_arrays(data_files[0])
    print('\nData chunk shape: ', X.shape)

    molecular_hidden_layers = GP['molecular_num_hidden']
    if not molecular_hidden_layers:
        X_train = hf.get_data(X, case=GP['case'])
        input_dim = X_train.shape[1]
    else:
        # computing input dimension for outer AE
        input_dim = X.shape[1] * molecular_hidden_layers[-1]

    print('\nState AE input/output dimension: ', input_dim)

    # get data dimension for molecular autoencoder
    molecular_nbrs = np.int(GP['molecular_nbrs'])
    num_molecules = X.shape[1]
    num_beads = X.shape[2]

    if GP['nbr_type'] == 'relative':
        # relative x, y, z positions
        num_loc_features = 3
        loc_feat_vect = ['rel_x', 'rel_y', 'rel_z']
    elif GP['nbr_type'] == 'invariant':
        # relative distance and angle
        num_loc_features = 2
        loc_feat_vect = ['rel_dist', 'rel_angle']
    else:
        print('Invalid nbr_type!!')
        exit()

    if not GP['type_bool']:
        # only consider molecular location coordinates
        num_type_features = 0
        type_feat_vect = []
    else:
        num_type_features = 5
        type_feat_vect = list(fields.keys())[3:8]

    num_features = num_loc_features + num_type_features + num_beads
    dim = np.prod([num_beads, num_features, molecular_nbrs + 1])
    bead_kernel_size = num_features
    molecular_input_dim = dim
    mol_kernel_size = num_beads

    feature_vector = loc_feat_vect + type_feat_vect + list(fields.keys())[8:]

    print('\nMolecular AE input/output dimension: ', molecular_input_dim)

    print(
        '\nData Format:\n[Frames (%s), Molecules (%s), Beads (%s), %s (%s)]' %
        (num_samples, num_molecules, num_beads, feature_vector, num_features))

    ### Define Model, Solver and Compile ##########
    print('\nDefine the model and compile')
    opt = candle.build_optimizer(GP['optimizer'], learning_rate, kerasDefaults)
    model_type = 'mlp'
    memo = '%s_%s' % (GP['base_memo'], model_type)

    ######## Define Molecular Model, Solver and Compile #########
    molecular_nonlinearity = GP['molecular_nonlinearity']

    len_molecular_hidden_layers = len(molecular_hidden_layers)
    conv_bool = GP['conv_bool']
    full_conv_bool = GP['full_conv_bool']
    if conv_bool:
        molecular_model, molecular_encoder = AE_models.conv_dense_mol_auto(
            bead_k_size=bead_kernel_size,
            mol_k_size=mol_kernel_size,
            weights_path=None,
            input_shape=(1, molecular_input_dim, 1),
            nonlinearity=molecular_nonlinearity,
            hidden_layers=molecular_hidden_layers,
            l2_reg=GP['l2_reg'],
            drop=float(GP['drop_prob']))
    elif full_conv_bool:
        molecular_model, molecular_encoder = AE_models.full_conv_mol_auto(
            bead_k_size=bead_kernel_size,
            mol_k_size=mol_kernel_size,
            weights_path=None,
            input_shape=(1, molecular_input_dim, 1),
            nonlinearity=molecular_nonlinearity,
            hidden_layers=molecular_hidden_layers,
            l2_reg=GP['l2_reg'],
            drop=float(GP['drop_prob']))

    else:
        molecular_model, molecular_encoder = AE_models.dense_auto(
            weights_path=None,
            input_shape=(molecular_input_dim, ),
            nonlinearity=molecular_nonlinearity,
            hidden_layers=molecular_hidden_layers,
            l2_reg=GP['l2_reg'],
            drop=float(GP['drop_prob']))

    if GP['loss'] == 'mse':
        loss_func = 'mse'
    elif GP['loss'] == 'custom':
        loss_func = helper.combined_loss

    molecular_model.compile(
        optimizer=opt,
        loss=loss_func,
        metrics=['mean_squared_error', 'mean_absolute_error'])
    print('\nModel Summary: \n')
    molecular_model.summary()
    ##### set up callbacks and cooling for the molecular_model ##########
    drop = 0.5
    mb_epochs = GP['epochs']
    initial_lrate = GP['learning_rate']
    epochs_drop = 1 + int(np.floor(mb_epochs / 3))

    def step_decay(epoch):
        global initial_lrate, epochs_drop, drop
        lrate = initial_lrate * np.power(drop,
                                         np.floor((1 + epoch) / epochs_drop))
        return lrate

    lr_scheduler = LearningRateScheduler(step_decay)
    history = callbacks.History()
    # callbacks=[history,lr_scheduler]

    history_logger = candle.LoggingCallback(logger.debug)
    candleRemoteMonitor = candle.CandleRemoteMonitor(params=GP)
    timeoutMonitor = candle.TerminateOnTimeOut(TIMEOUT)
    callbacks = [history, history_logger, candleRemoteMonitor, timeoutMonitor]
    loss = 0.

    #### Save the Model to disk
    if GP['save_path'] != None:
        save_path = GP['save_path']
        if not os.path.exists(save_path):
            os.makedirs(save_path)
    else:
        save_path = '.'

    model_json = molecular_model.to_json()
    with open(save_path + '/model.json', "w") as json_file:
        json_file.write(model_json)

    encoder_json = molecular_encoder.to_json()
    with open(save_path + '/encoder.json', "w") as json_file:
        json_file.write(encoder_json)

    print('Saved model to disk')

    #### Train the Model
    if GP['train_bool']:
        ct = hf.Candle_Molecular_Train(
            molecular_model,
            molecular_encoder,
            data_files,
            mb_epochs,
            callbacks,
            batch_size=batch_size,
            nbr_type=GP['nbr_type'],
            save_path=GP['save_path'],
            len_molecular_hidden_layers=len_molecular_hidden_layers,
            molecular_nbrs=molecular_nbrs,
            conv_bool=conv_bool,
            full_conv_bool=full_conv_bool,
            type_bool=GP['type_bool'],
            sampling_density=GP['sampling_density'])
        frame_loss, frame_mse = ct.train_ac()
    else:
        frame_mse = []
        frame_loss = []

    return frame_loss, frame_mse
Ejemplo n.º 4
0
def run(gParameters, data_path):

    kerasDefaults = candle.keras_default_config()

    rnn_size = gParameters['rnn_size']
    n_layers = gParameters['n_layers']
    learning_rate = gParameters['learning_rate']
    dropout = gParameters['drop']
    recurrent_dropout = gParameters['recurrent_dropout']
    n_epochs = gParameters['epochs']
    data_train = data_path + '/data.pkl'
    verbose = gParameters['verbose']
    savedir = gParameters['output_dir']
    do_sample = gParameters['do_sample']
    temperature = gParameters['temperature']
    primetext = gParameters['primetext']
    length = gParameters['length']

    # load data from pickle
    f = open(data_train, 'rb')

    if (sys.version_info > (3, 0)):
        classes = pickle.load(f, encoding='latin1')
        chars = pickle.load(f, encoding='latin1')
        char_indices = pickle.load(f, encoding='latin1')
        indices_char = pickle.load(f, encoding='latin1')

        maxlen = pickle.load(f, encoding='latin1')
        step = pickle.load(f, encoding='latin1')

        X_ind = pickle.load(f, encoding='latin1')
        y_ind = pickle.load(f, encoding='latin1')
    else:
        classes = pickle.load(f)
        chars = pickle.load(f)
        char_indices = pickle.load(f)
        indices_char = pickle.load(f)

        maxlen = pickle.load(f)
        step = pickle.load(f)

        X_ind = pickle.load(f)
        y_ind = pickle.load(f)

    f.close()

    [s1, s2] = X_ind.shape
    print(X_ind.shape)
    print(y_ind.shape)
    print(maxlen)
    print(len(chars))

    X = np.zeros((s1, s2, len(chars)), dtype=np.bool)
    y = np.zeros((s1, len(chars)), dtype=np.bool)

    for i in range(s1):
        for t in range(s2):
            X[i, t, X_ind[i, t]] = 1
        y[i, y_ind[i]] = 1

    # build the model: a single LSTM
    if verbose:
        print('Build model...')

    model = Sequential()

    # for rnn_size in rnn_sizes:
    for k in range(n_layers):
        if k < n_layers - 1:
            ret_seq = True
        else:
            ret_seq = False

        if k == 0:
            model.add(
                LSTM(rnn_size,
                     input_shape=(maxlen, len(chars)),
                     return_sequences=ret_seq,
                     dropout=dropout,
                     recurrent_dropout=recurrent_dropout))
        else:
            model.add(
                LSTM(rnn_size,
                     dropout=dropout,
                     recurrent_dropout=recurrent_dropout,
                     return_sequences=ret_seq))

    model.add(Dense(len(chars)))
    model.add(Activation(gParameters['activation']))

    optimizer = candle.build_optimizer(gParameters['optimizer'],
                                       gParameters['learning_rate'],
                                       kerasDefaults)

    model.compile(loss=gParameters['loss'], optimizer=optimizer)

    if verbose:
        model.summary()

    for iteration in range(1, n_epochs + 1):
        if verbose:
            print()
            print('-' * 50)
            print('Iteration', iteration)

        history = LossHistory()
        model.fit(X, y, batch_size=100, epochs=1, callbacks=[history])

        loss = history.losses[-1]
        if verbose:
            print(loss)

        dirname = savedir
        if len(dirname) > 0 and not dirname.endswith('/'):
            dirname = dirname + '/'

        if not os.path.exists(dirname):
            os.makedirs(dirname)

        # serialize model to JSON
        model_json = model.to_json()
        with open(
                dirname + "/model_" + str(iteration) + "_" +
                "{:f}".format(loss) + ".json", "w") as json_file:
            json_file.write(model_json)

        # serialize weights to HDF5
        model.save_weights(dirname + "/model_" + str(iteration) + "_" +
                           "{:f}".format(loss) + ".h5")

        if verbose:
            print("Checkpoint saved.")

        if do_sample:
            outtext = open(dirname + "/example_" + str(iteration) + "_" +
                           "{:f}".format(loss) + ".txt",
                           "w",
                           encoding='utf-8')

            diversity = temperature

            outtext.write('----- diversity:' + str(diversity) + "\n")

            generated = ''
            seedstr = primetext

            outtext.write('----- Generating with seed: "' + seedstr + '"' +
                          "\n")

            sentence = " " * maxlen

            # class_index = 0
            generated += sentence
            outtext.write(generated)

            for c in seedstr:
                sentence = sentence[1:] + c
                x = np.zeros((1, maxlen, len(chars)))
                for t, char in enumerate(sentence):
                    x[0, t, char_indices[char]] = 1.

                preds = model.predict(x, verbose=verbose)[0]
                next_index = sample(preds, diversity)
                next_char = indices_char[next_index]

                generated += c

                outtext.write(c)

            for i in range(length):
                x = np.zeros((1, maxlen, len(chars)))
                for t, char in enumerate(sentence):
                    x[0, t, char_indices[char]] = 1.

                preds = model.predict(x, verbose=verbose)[0]
                next_index = sample(preds, diversity)
                next_char = indices_char[next_index]

                generated += next_char
                sentence = sentence[1:] + next_char

            if (sys.version_info > (3, 0)):
                outtext.write(generated + '\n')
            else:
                outtext.write(generated.decode('utf-8').encode('utf-8') + '\n')

            outtext.close()
Ejemplo n.º 5
0
def build_model(gParameters,
                kerasDefaults,
                shared_nnet_spec,
                individual_nnet_spec,
                input_dim,
                Y_train,
                Y_test,
                verbose=False):

    labels_train = []
    labels_test = []

    n_out_nodes = []

    for l in range(len(Y_train)):
        truth_train = np.array(Y_train[l], dtype='int32')
        truth_test = np.array(Y_test[l], dtype='int32')

        mv = int(np.max(truth_train))

        label_train = np.zeros((len(truth_train), mv + 1))
        for i in range(len(truth_train)):
            label_train[i, truth_train[i]] = 1

        label_test = np.zeros((len(truth_test), mv + 1))
        for i in range(len(truth_test)):
            label_test[i, truth_test[i]] = 1

        labels_train.append(label_train)
        labels_test.append(label_test)

        n_out_nodes.append(mv + 1)

    shared_layers = []

    # input layer
    layer = Input(shape=(input_dim, ), name='input')
    shared_layers.append(layer)

    # shared layers
    for k in range(len(shared_nnet_spec)):
        layer = Dense(shared_nnet_spec[k],
                      activation=gParameters['activation'],
                      name='shared_layer_' + str(k))(shared_layers[-1])
        if gParameters['drop'] > 0:
            layer = Dropout(gParameters['drop'])(shared_layers[-1])
            shared_layers.append(layer)

    # individual layers
    indiv_layers_arr = []
    models = []

    trainable_count = 0
    non_trainable_count = 0

    for l in range(len(individual_nnet_spec)):
        indiv_layers = [shared_layers[-1]]
        for k in range(len(individual_nnet_spec[l]) + 1):
            if k < len(individual_nnet_spec[l]):
                layer = Dense(individual_nnet_spec[l][k],
                              activation=gParameters['activation'],
                              name='indiv_layer_' + str(l) + '_' + str(k))(
                                  indiv_layers[-1])
                indiv_layers.append(layer)
                if gParameters['drop'] > 0:
                    layer = Dropout(gParameters['drop'])(indiv_layers[-1])
                    indiv_layers.append(layer)
            else:
                layer = Dense(n_out_nodes[l],
                              activation=gParameters['out_activation'],
                              name='out_' + str(l))(indiv_layers[-1])
                indiv_layers.append(layer)

        indiv_layers_arr.append(indiv_layers)

        model = Model(inputs=[shared_layers[0]], outputs=[indiv_layers[-1]])

        # calculate trainable/non-trainable param count for each model
        param_counts = candle.compute_trainable_params(model)
        trainable_count += param_counts['trainable_params']
        non_trainable_count += param_counts['non_trainable_params']

        models.append(model)

    # capture total param counts
    gParameters['trainable_params'] = trainable_count
    gParameters['non_trainable_params'] = non_trainable_count
    gParameters['total_params'] = trainable_count + non_trainable_count

    # Define optimizer
    optimizer = candle.build_optimizer(gParameters['optimizer'],
                                       gParameters['learning_rate'],
                                       kerasDefaults)

    # DEBUG - verify
    if verbose:
        for k in range(len(models)):
            model = models[k]
            print('Model: ', k)
            model.summary()

    for k in range(len(models)):
        model = models[k]
        model.compile(loss=gParameters['loss'],
                      optimizer=optimizer,
                      metrics=[gParameters['metrics']])

    return models, labels_train, labels_test
Ejemplo n.º 6
0
def run(gParameters):
    print ('gParameters: ', gParameters)

    EPOCH = gParameters['epochs']
    BATCH = gParameters['batch_size']
    nb_classes = gParameters['classes']
    DR = gParameters['drop']
    ACTIVATION = gParameters['activation']
    outdir = gParameters['output_dir']
    kerasDefaults = candle_keras.keras_default_config()
    kerasDefaults['momentum_sgd'] = gParameters['momentum']
    OPTIMIZER = candle_keras.build_optimizer(gParameters['optimizer'],
                                        gParameters['learning_rate'],
                                        kerasDefaults)
    PL     = 6213   # 38 + 60483
    PS     = 6212   # 60483

    X_train, Y_train, X_test, Y_test = load_data(nb_classes, PL, gParameters)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Y_train shape:', Y_train.shape)
    print('Y_test shape:', Y_test.shape)


    inputs = Input(shape=(PS,))

    x = Dense(2000, activation=ACTIVATION)(inputs)
    x = Dense(1000, activation=ACTIVATION)(x)

    for i in range(gParameters['connections']):
        x = f(x, gParameters, distance=gParameters['distance'] )

    x = Dropout(DR)(x)

    x = Dense(500, activation=ACTIVATION)(x)
    x = Dropout(DR)(x)
    x = Dense(250, activation=ACTIVATION)(x)
    x = Dropout(DR)(x)
    x = Dense(125, activation=ACTIVATION)(x)
    x = Dropout(DR)(x)
    x = Dense(62, activation=ACTIVATION)(x)
    x = Dropout(DR)(x)
    x = Dense(30, activation=ACTIVATION)(x)
    x = Dropout(DR)(x)
    outputs = Dense(2, activation='softmax')(x)

    model = Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(loss='categorical_crossentropy',
              optimizer=OPTIMIZER,
              metrics=['accuracy'])

      # set up a bunch of callbacks to do work during model training.
    
    checkpointer = ModelCheckpoint(filepath=outdir+'/t29res.autosave.model.h5', verbose=0, save_weights_only=False, save_best_only=True)
    csv_logger = CSVLogger(outdir+'/t29res.training.log')
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.4, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=3, min_lr=0.000000001)
    callbacks = [checkpointer, csv_logger, reduce_lr]

    def warmup_scheduler(epoch):
        lr=gParameters['learning_rate']
        if epoch <= 4:
            K.set_value(model.optimizer.lr, (lr * (epoch+1) / 5))
        print ('Epoch {}: lr={}'.format(epoch, K.get_value(model.optimizer.lr)))
        return K.get_value(model.optimizer.lr)

    if 'warmup_lr' in gParameters:

        warmup_lr = LearningRateScheduler(warmup_scheduler)
        print("adding LearningRateScheduler")
        callbacks.append(warmup_lr)


    history = model.fit(X_train, Y_train,
                    batch_size=BATCH,
                    epochs=EPOCH,
                    verbose=1,
                    validation_data=(X_test, Y_test),
                    callbacks = callbacks)

    score = model.evaluate(X_test, Y_test, verbose=0)

    # summarize history for accuracy
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model Accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')

    plt.savefig(outdir+'/t29res.accuracy.png', bbox_inches='tight')
    plt.savefig(outdir+'/t29res.accuracy.pdf', bbox_inches='tight')

    plt.close()

    # summarize history for loss
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model Loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')

    plt.savefig(outdir+'/t29res.loss.png', bbox_inches='tight')
    plt.savefig(outdir+'/t29res.loss.pdf', bbox_inches='tight')

    print('Test val_loss:', score[0])
    print('Test accuracy:', score[1])

    # serialize model to JSON
    model_json = model.to_json()
    with open(outdir+"/t29res.model.json", "w") as json_file:
        json_file.write(model_json)

    # serialize model to YAML
    model_yaml = model.to_yaml()
    with open(outdir+"/t29res.model.yaml", "w") as yaml_file:
        yaml_file.write(model_yaml)

    # serialize weights to HDF5
    model.save_weights(outdir+"/t29res.model.h5")
    print("Saved model to disk")

    # load json and create model
    json_file = open(outdir+'/t29res.model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model_json = model_from_json(loaded_model_json)

    # load yaml and create model
    yaml_file = open(outdir+'/t29res.model.yaml', 'r')
    loaded_model_yaml = yaml_file.read()
    yaml_file.close()
    loaded_model_yaml = model_from_yaml(loaded_model_yaml)

    # load weights into new model
    loaded_model_json.load_weights(outdir+"/t29res.model.h5")
    print("Loaded json model from disk")

    # evaluate json loaded model on test data
    loaded_model_json.compile(loss='binary_crossentropy', optimizer=gParameters['optimizer'], metrics=['accuracy'])
    score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)

    print('json Validation loss:', score_json[0])
    print('json Validation accuracy:', score_json[1])
    print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1]*100))

    # load weights into new model
    loaded_model_yaml.load_weights(outdir+"/t29res.model.h5")
    print("Loaded yaml model from disk")

    # evaluate loaded model on test data
    loaded_model_yaml.compile(loss='binary_crossentropy', optimizer=gParameters['optimizer'], metrics=['accuracy'])
    score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)

    print('yaml Validation loss:', score_yaml[0])
    print('yaml Validation accuracy:', score_yaml[1])
    print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1]*100))

    # predict using loaded yaml model on test and training data
    predict_yaml_train = loaded_model_yaml.predict(X_train)
    predict_yaml_test = loaded_model_yaml.predict(X_test)

    print('Yaml_train_shape:', predict_yaml_train.shape)
    print('Yaml_test_shape:', predict_yaml_test.shape)

    predict_yaml_train_classes = np.argmax(predict_yaml_train, axis=1)
    predict_yaml_test_classes = np.argmax(predict_yaml_test, axis=1)

    np.savetxt(outdir+"/predict_yaml_train.csv", predict_yaml_train, delimiter=",", fmt="%.3f")
    np.savetxt(outdir+"/predict_yaml_test.csv", predict_yaml_test, delimiter=",", fmt="%.3f")

    np.savetxt(outdir+"/predict_yaml_train_classes.csv", predict_yaml_train_classes, delimiter=",",fmt="%d")
    np.savetxt(outdir+"/predict_yaml_test_classes.csv", predict_yaml_test_classes, delimiter=",",fmt="%d")

    return history
Ejemplo n.º 7
0
#model.add(Activation('relu'))
#model.add(MaxPooling1D(pool_size=10))
#model.add(Flatten())
#model.add(Dense(200))
#model.add(Activation('relu'))
#model.add(Dropout(0.1))
#model.add(Dense(20))
#model.add(Activation('relu'))
#model.add(Dropout(0.1))
#model.add(Dense(CLASSES))
#model.add(Activation('softmax'))

kerasDefaults = candle.keras_default_config()

# Define optimizer
optimizer = candle.build_optimizer(hyperparams['optimizer'],
                                   hyperparams['learning_rate'], kerasDefaults)

model.summary()
model.compile(loss=hyperparams['loss'],
              optimizer=optimizer,
              metrics=[hyperparams['metrics']])

output_dir = hyperparams['save']

if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# calculate trainable and non-trainable params
hyperparams.update(candle.compute_trainable_params(model))

# set up a bunch of callbacks to do work during model training..
Ejemplo n.º 8
0
def run(gParameters):

    print ('Params:', gParameters)

    file_train = gParameters['train_data']
    file_test = gParameters['test_data']
    url = gParameters['data_url']

    train_file = candle.get_file(file_train, url+file_train, cache_subdir='Pilot1')
    test_file = candle.get_file(file_test, url+file_test, cache_subdir='Pilot1')

    X_train, Y_train, X_test, Y_test = load_data(train_file, test_file, gParameters)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Y_train shape:', Y_train.shape)
    print('Y_test shape:', Y_test.shape)

    x_train_len = X_train.shape[1]

    # this reshaping is critical for the Conv1D to work

    X_train = np.expand_dims(X_train, axis=2)
    X_test = np.expand_dims(X_test, axis=2)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    model = Sequential()

    layer_list = list(range(0, len(gParameters['conv']), 3))
    for l, i in enumerate(layer_list):
        filters = gParameters['conv'][i]
        filter_len = gParameters['conv'][i+1]
        stride = gParameters['conv'][i+2]
        print(int(i/3), filters, filter_len, stride)
        if gParameters['pool']:
            pool_list=gParameters['pool']
            if type(pool_list) != list:
                pool_list=list(pool_list)

        if filters <= 0 or filter_len <= 0 or stride <= 0:
                break
        if 'locally_connected' in gParameters:
                model.add(LocallyConnected1D(filters, filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
        else:
            #input layer
            if i == 0:
                model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
            else:
                model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid'))
        model.add(Activation(gParameters['activation']))
        if gParameters['pool']:
                model.add(MaxPooling1D(pool_size=pool_list[int(i/3)]))

    model.add(Flatten())

    for layer in gParameters['dense']:
        if layer:
            model.add(Dense(layer))
            model.add(Activation(gParameters['activation']))
            if gParameters['drop']:
                    model.add(Dropout(gParameters['drop']))
    model.add(Dense(gParameters['classes']))
    model.add(Activation(gParameters['out_act']))

#Reference case
#model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1)))
#model.add(Activation('relu'))
#model.add(MaxPooling1D(pool_size=1))
#model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid'))
#model.add(Activation('relu'))
#model.add(MaxPooling1D(pool_size=10))
#model.add(Flatten())
#model.add(Dense(200))
#model.add(Activation('relu'))
#model.add(Dropout(0.1))
#model.add(Dense(20))
#model.add(Activation('relu'))
#model.add(Dropout(0.1))
#model.add(Dense(CLASSES))
#model.add(Activation('softmax'))

    kerasDefaults = candle.keras_default_config()

    # Define optimizer
    optimizer = candle.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    model.summary()
    model.compile(loss=gParameters['loss'],
                  optimizer=optimizer,
                  metrics=[gParameters['metrics']])

    output_dir = gParameters['save']

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # calculate trainable and non-trainable params
    gParameters.update(candle.compute_trainable_params(model))

    # set up a bunch of callbacks to do work during model training..
    model_name = gParameters['model_name']
    path = '{}/{}.autosave.model.h5'.format(output_dir, model_name)
    # checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True)
    csv_logger = CSVLogger('{}/training.log'.format(output_dir))
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
    candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters)
    timeoutMonitor = candle.TerminateOnTimeOut(gParameters['timeout'])
    history = model.fit(X_train, Y_train,
                    batch_size=gParameters['batch_size'],
                    epochs=gParameters['epochs'],
                    verbose=1,
                    validation_data=(X_test, Y_test),
                    callbacks = [csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor])

    score = model.evaluate(X_test, Y_test, verbose=0)

    if False:
        print('Test score:', score[0])
        print('Test accuracy:', score[1])
        # serialize model to JSON
        model_json = model.to_json()
        with open("{}/{}.model.json".format(output_dir, model_name), "w") as json_file:
            json_file.write(model_json)

        # serialize model to YAML
        model_yaml = model.to_yaml()
        with open("{}/{}.model.yaml".format(output_dir, model_name), "w") as yaml_file:
            yaml_file.write(model_yaml)

        # serialize weights to HDF5
        model.save_weights("{}/{}.weights.h5".format(output_dir, model_name))
        print("Saved model to disk")

        # load json and create model
        json_file = open('{}/{}.model.json'.format(output_dir, model_name), 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model_json = model_from_json(loaded_model_json)


        # load yaml and create model
        yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name), 'r')
        loaded_model_yaml = yaml_file.read()
        yaml_file.close()
        loaded_model_yaml = model_from_yaml(loaded_model_yaml)


        # load weights into new model
        loaded_model_json.load_weights('{}/{}.weights.h5'.format(output_dir, model_name))
        print("Loaded json model from disk")

        # evaluate json loaded model on test data
        loaded_model_json.compile(loss=gParameters['loss'],
            optimizer=gParameters['optimizer'],
            metrics=[gParameters['metrics']])
        score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)

        print('json Test score:', score_json[0])
        print('json Test accuracy:', score_json[1])

        print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1]*100))

        # load weights into new model
        loaded_model_yaml.load_weights('{}/{}.weights.h5'.format(output_dir, model_name))
        print("Loaded yaml model from disk")

        # evaluate loaded model on test data
        loaded_model_yaml.compile(loss=gParameters['loss'],
            optimizer=gParameters['optimizer'],
            metrics=[gParameters['metrics']])
        score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)

        print('yaml Test score:', score_yaml[0])
        print('yaml Test accuracy:', score_yaml[1])

        print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1]*100))

    return history
Ejemplo n.º 9
0
def run_cnn(GP,
            train_x,
            train_y,
            test_x,
            test_y,
            learning_rate=0.01,
            batch_size=10,
            epochs=10,
            dropout=0.5,
            optimizer='adam',
            wv_len=300,
            filter_sizes=[3, 4, 5],
            num_filters=[300, 300, 300],
            emb_l2=0.001,
            w_l2=0.01):

    max_vocab = np.max(train_x)
    max_vocab2 = np.max(test_x)
    if max_vocab2 > max_vocab:
        max_vocab = max_vocab2

    wv_mat = np.random.randn(max_vocab + 1, wv_len).astype('float32') * 0.1

    num_classes = []
    num_classes.append(np.max(train_y[:, 0]) + 1)
    num_classes.append(np.max(train_y[:, 1]) + 1)
    num_classes.append(np.max(train_y[:, 2]) + 1)
    num_classes.append(np.max(train_y[:, 3]) + 1)

    kerasDefaults = candle.keras_default_config()
    optimizer_run = candle.build_optimizer(optimizer, learning_rate,
                                           kerasDefaults)

    cnn = keras_mt_shared_cnn.init_export_network(num_classes=num_classes,
                                                  in_seq_len=1500,
                                                  vocab_size=len(wv_mat),
                                                  wv_space=wv_len,
                                                  filter_sizes=filter_sizes,
                                                  num_filters=num_filters,
                                                  concat_dropout_prob=dropout,
                                                  emb_l2=emb_l2,
                                                  w_l2=w_l2,
                                                  optimizer=optimizer_run)

    print(cnn.summary())

    validation_data = ({
        'Input': test_x
    }, {
        'Dense0': test_y[:, 0],
        'Dense1': test_y[:, 1],
        'Dense2': test_y[:, 2],
        'Dense3': test_y[:, 3]
    })

    # candleRemoteMonitor = CandleRemoteMonitor(params= GP)
    # timeoutMonitor = TerminateOnTimeOut(TIMEOUT)

    candleRemoteMonitor = candle.CandleRemoteMonitor(params=GP)
    timeoutMonitor = candle.TerminateOnTimeOut(GP['timeout'])

    history = cnn.fit(x=np.array(train_x),
                      y=[
                          np.array(train_y[:, 0]),
                          np.array(train_y[:, 1]),
                          np.array(train_y[:, 2]),
                          np.array(train_y[:, 3])
                      ],
                      batch_size=batch_size,
                      epochs=epochs,
                      verbose=2,
                      validation_data=validation_data,
                      callbacks=[candleRemoteMonitor, timeoutMonitor])

    return history