Exemplo n.º 1
0
def get_discriminator():

    model = Sequential()

    # converts from 3-dimension to 1-dimension
    model.add(Flatten(input_shape=img_shape))

    # first block. input dimension: 784     output dimension: 512
    model.add(Dense(512, kernel_regularizer=l2(WEIGHT_DECAY)))
    model.add(LeakyReLU(alpha=0.2))

    # second block. input dimension: 512    output dimension: 256
    model.add(Dense(256, kernel_regularizer=l2(WEIGHT_DECAY)))
    model.add(LeakyReLU(alpha=0.2))

    # third block. input dimension: 256     output dimension: 128
    model.add(Dense(128, kernel_regularizer=l2(WEIGHT_DECAY)))
    model.add(LeakyReLU(alpha=0.2))

    # final block. input dimension: 128     output dimension: 1
    model.add(
        Dense(1, activation="sigmoid", kernel_regularizer=l2(WEIGHT_DECAY)))

    model.compile(loss=BinaryCrossentropy(),
                  optimizer=OPTIMIZER,
                  metrics=["accuracy"])
    return model
Exemplo n.º 2
0
def create_sequential(input_size,
                      activation='relu',
                      hidden_layer_size=32,
                      loss=BinaryCrossentropy(),
                      optimizer='adam',
                      metrics=['accuracy'],
                      learning_rate=0.001):
    """ 
    Costruisce un modello Feed Forward con la seguente struttura
    
    Strato input:           ( , 82)
    ------------------------
    Strato Dense Hidden:    Input   ( ,82)                       Output  ( , hidden_layer_size)
    ------------------------
    Strato Dense output:    Input   ( , hidden_layer_size)       Output  ( , 1)
    """
    model = Sequential()

    model.add(
        Dense(hidden_layer_size, activation=activation,
              input_shape=input_size))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(optimizer=Adam(learning_rate=learning_rate),
                  loss=loss,
                  metrics=metrics)

    return model
Exemplo n.º 3
0
    def create_model(self):
        input_A = Input(shape=IMAGE_SHAPE)
        input_B = Input(shape=IMAGE_SHAPE)

        preTrained = VGG16(include_top=False,
                           weights='imagenet',
                           input_shape=IMAGE_SHAPE)

        #for l in preTrained.layers[:-3]:
        #l.trainable = False

        flatten1 = Flatten()(preTrained.layers[-2].output)
        dense1 = Dense(512, activation='relu')(flatten1)

        modifiedPreTrained = Model(preTrained.input, dense1)

        output_A = modifiedPreTrained(input_A)
        output_B = modifiedPreTrained(input_B)

        l = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))
        l_out = l([output_A, output_B])

        #dense2 = Dense(512, activation='relu')(l_out)
        output = Dense(1, activation='sigmoid')(l_out)

        model = Model(inputs=[input_A, input_B], outputs=output)
        model.compile(loss=BinaryCrossentropy(),
                      metrics=['acc'],
                      optimizer=Adam(learning_rate=LEARNING_RATE))

        return model
Exemplo n.º 4
0
def train_model(x_train, y_train, model, epochs, batch_size, path):

    # define callbacks
    checkpoint = ModelCheckpoint(path,
                                 save_weights_only=True,
                                 monitor='val_acc',
                                 mode='max',
                                 save_best_only=True,
                                 verbose=1)

    # define parameters
    optimizer = Adam(learning_rate=0.001,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=1e-07,
                     amsgrad=False)
    loss = BinaryCrossentropy()

    # initiate training
    model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])

    # start training
    history = model.fit(x_train,
                        y_train,
                        batch_size=batch_size,
                        validation_split=0.2,
                        epochs=epochs,
                        shuffle=True,
                        verbose=1,
                        callbacks=[checkpoint])

    return history, model
Exemplo n.º 5
0
 def __init__(self, modelType='nb', opt=None, inputDim=None):
     if (modelType == 'nb'):
         from sklearn.naive_bayes import MultinomialNB
         self.model = MultinomialNB()
         self.modelType = 'Naive Bayes'
     if (modelType == 'svm'):
         from sklearn import svm
         kernelType = 'rbf'  #default of svms
         if opt:
             kernel = opt
         self.model = svm.SVC(kernel=kernelType)
         self.modelType = 'Support Vector Machine'
     if (modelType == 'lr'):
         from sklearn.linear_model import LogisticRegression
         self.model = LogisticRegression(max_iter=1000)
         self.modelType = 'Logistic Regression'
     if (modelType == 'nn'):
         from keras.models import Sequential
         from keras.layers import Dense
         from keras.losses import BinaryCrossentropy
         loss = BinaryCrossentropy(from_logits=True)
         self.model = Sequential()
         self.model.add(
             Dense(512, input_shape=(inputDim, ), activation='relu'))
         self.model.add(Dense(256, activation='relu'))
         self.model.add(Dense(128, activation='relu'))
         self.model.add(Dense(2, activation='softmax'))
         self.model.compile(loss=loss,
                            optimizer='adam',
                            metrics=['accuracy'])
         self.model.summary()
         self.modelType = 'Neural Network'
Exemplo n.º 6
0
def classify(message):
    sequence = TOKENIZER.texts_to_sequences([message])
    sequence = pad_sequences(sequence, maxlen=250)

    MODEL.compile(loss=BinaryCrossentropy(), optimizer=Adam(), metrics=[AUC()])
    prediction = MODEL.predict(sequence)

    return prediction[0]
Exemplo n.º 7
0
    def generator_loss(self, fake_output):
        '''
        Like the above but this time for generator...
        :return: Generator loss value
        '''

        cross_entropy = BinaryCrossentropy(from_logits=True)
        return cross_entropy(tf.ones_like(fake_output), fake_output) # TF array of ones for real output
Exemplo n.º 8
0
    def build_gan(self):
        z = Input(shape=self.g.mp['input_shape'])
        generated_out = self.g.model(z)
        self.d.model.trainable = False
        is_real = self.d.model(generated_out)

        combined = Model(z, is_real)
        combined.compile(loss=BinaryCrossentropy(from_logits=True), optimizer='rmsprop')
        return combined
Exemplo n.º 9
0
 def discriminator_loss(self, real_output, fake_output):
     '''
     Defines the loss function for the descriminator.
     Uses cross entropy a.k.a (log-loss) helper function from
     Keras 'BinaryCrossEntropy'. Returns the combined loss
     '''
     cross_entropy = BinaryCrossentropy(from_logits=True)
     real_loss = cross_entropy(tf.ones_like(real_output), real_output) # Zero output for real
     fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) # One output for fake
     total_loss = real_loss + fake_loss
     return total_loss
Exemplo n.º 10
0
def get_gan(gen, disc):

    # disabling the training of the model
    disc.trainable = False

    # combining the generator and discriminator model
    model = Sequential()
    model.add(gen)
    model.add(disc)

    model.compile(loss=BinaryCrossentropy(), optimizer=OPTIMIZER)
    return model
Exemplo n.º 11
0
def eval_model(model, gens):
    '''
    Evaluate model comparing performance against different generators

    param:
    model - Keras neural network
    gens - list of Keras ImageDataGenerator

    return:
    None
    '''
    loss_list = []
    auc_list = []
    gen_names = []

    acc = BinaryAccuracy()
    bce = BinaryCrossentropy()

    for gen in gens:
        filename = gen.filenames[0]
        first_index = filename.index('.')
        try:
            second_index = filename.index('.', first_index + 1)
            gen_name = filename[first_index + 1:second_index]

            if (gen_name == 'steg'):
                gen_name = 'Basic LSB'
        except Exception as e:
            gen_name = 'None'

        gen_names.append(gen_name)

        print(f"Evaluating: {gen_name}")

        predictions = model.predict(gen, verbose=1)
        acc.update_state(gen.labels, predictions)
        loss_list.append((bce(gen.labels,
                              predictions).numpy(), acc.result().numpy()))
        acc.reset_states()

    plt.figure('Model Performance vs LSB Type')

    plt.subplot(1, 2, 1)
    plt.bar(gen_names, [loss[0] for loss in loss_list])
    plt.xlabel('LSB Generator Type')
    plt.ylabel('Binary Crossentropy Loss')

    plt.subplot(1, 2, 2)
    plt.bar(gen_names, [loss[1] for loss in loss_list])
    plt.xlabel('LSB Generator Type')
    plt.ylabel('Accuracy')

    plt.show()
Exemplo n.º 12
0
def individual_evaluator(individual: MLPIndividual, trn: Proben1Split,
                         tst: Proben1Split, **kwargs):
    """Evaluate an individual.

    :param individual: current individual to evaluate.
    :param trn: training data and labels.
    :param tst: validation data and labels.
    :param multi_class: ``True`` if the dataset is for multiclass
        classification.
    :returns: the fitness values.

    """
    multi_class = kwargs.get("multi_class", False)
    start_time = time.perf_counter()
    units_size_list = [
        layer.config["units"] for layer in individual.layers[:-1]
    ]
    DGPLOGGER.debug(
        f"    Evaluating individual with neuron number: {units_size_list}")
    # Create the model with the individual configuration
    model = Sequential()

    for layer_index, layer in enumerate(individual.layers):
        model.add(Dense.from_config(layer.config))
        model.layers[layer_index].set_weights([layer.weights, layer.bias])

    model.compile(
        optimizer=SGD(learning_rate=0.01),
        loss=CategoricalCrossentropy()
        if multi_class else BinaryCrossentropy(),
    )

    model.fit(trn.X, trn.y_cat, epochs=100, batch_size=16, verbose=0)

    # Predict the scores
    predicted_y = model.predict_classes(tst.X)
    f2_score = fbeta_score(
        tst.y,
        predicted_y,
        beta=2,
        average="micro" if multi_class else "binary",
    )
    error_perc = (1.0 -
                  accuracy_score(tst.y, predicted_y, normalize=True)) * 100
    neuron_layer_score = sum(units_size_list) * len(units_size_list)
    DGPLOGGER.debug(
        f"        error%={error_perc:.2f}\n"
        f"        neuron/layer-score={neuron_layer_score:.2f}\n"
        f"        f2-score={f2_score:.5f}\n"
        f"        evaluation time={time.perf_counter() - start_time: .2f} sec")

    return (error_perc, neuron_layer_score, f2_score)
Exemplo n.º 13
0
def train(train_sets: tuple,
          test_sets: tuple,
          input_shape: tuple = (1, 128, 128, 1),
          model_version="1.0.0",
          epochs: int = 100,
          classes: int = 2,
          batch_size: int = 1,
          verbose=1,
          out_dir: str = "saved_models"):
    """
    The function to train the model.

    Parameters:
        train_sets (tuple): A tuple of np.array for train images and train labels.
        test_sets (tuple): A tuple of np.array for test images and test labels.
        input shape (tuple): Input shape of the model. It should be in the form of (1, ..., ...).
        model_version (str): The version of the model in d.d.d format.
        epochs (int): The number of epochs.
        classes (int): The number of classes.
        batch_size (int): The number of batch size.
        verbose (bool): Wether to show the progress of each epoch.
        out_dir (str): The output dir for saving the model in.
    """
    (x_train, y_train), (x_test, y_test) = train_sets, test_sets
    y_train = keras.utils.to_categorical(y_train, classes)
    y_test = keras.utils.to_categorical(y_test, classes)
    m = get_model(model_version)
    if not m:
        return
    model = m.build_model(input_shape)
    model.compile(loss=BinaryCrossentropy(),
                  optimizer=RMSprop(learning_rate=0.0001),
                  metrics=['accuracy'])
    saver = ModelSaver(out_dir)
    csv_logger = CSVLogger(
        "%s/%s/log.csv" %
        (out_dir, datetime.datetime.now().date().strftime("%Y_%m_%d")),
        append=True,
        separator=',')
    history = model.fit(x_train,
                        y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=verbose,
                        validation_data=(x_test, y_test),
                        callbacks=[saver, csv_logger])
    model.save("%s/%s/final.hd5" %
               (out_dir, datetime.datetime.now().date().strftime("%Y_%m_%d")))
    print("Model saved in %s as final.hd5" % out_dir)
    plot_results(history, epochs, out_dir)
Exemplo n.º 14
0
Arquivo: kmlp.py Projeto: kqf/lish-moa
def create_model(input_units, output_units, hidden_units=512, lr=1e-3):
    model = Sequential()
    model.add(_dense(hidden_units, input_shape=(input_units, )))
    model.add(_dense(hidden_units // 2))
    model.add(Dropout(rate=0.3))
    model.add(_dense(output_units, activation="sigmoid"))
    model.compile(loss=BinaryCrossentropy(label_smoothing=0.000),
                  optimizer=Adam(
                      lr=lr,
                      beta_1=0.9,
                      beta_2=0.999,
                      epsilon=1e-8,
                      amsgrad=False,
                      decay=0,
                  ))
    return model
Exemplo n.º 15
0
    def build_discriminator(self):
        """
            Construye el discriminador de la GAN
            Basado en la DCGAN
        """
        input_layer = Input(shape=self.input_shape)
        discriminator = dcgan_discriminator_stem(input_layer,
                                                 self.d_initial_filters)
        discriminator = dcgan_discriminator_learner(discriminator,
                                                    self.d_params)
        discriminator = dcgan_discriminator_task(discriminator)
        d_model = Model(input_layer, discriminator)
        d_model.compile(loss=BinaryCrossentropy(label_smoothing=0.1),
                        optimizer=Adam(self.lr, 0.5),
                        metrics=['accuracy'])

        return d_model
def modelBuilder(hp):
    """
    Assign input and output tensors, build neural net and compile model
    :param hp: hyperparameters, argument needed to call this function from evaluateBestParams function
               (see also https://keras.io/api/keras_tuner/hyperparameters/)
    :return: model, compiled neural net model
    """
    ## keras model
    u = Input(shape=(1, ))
    s = Input(shape=(1, ))
    u_embedding = Embedding(N, K)(u)  ## (N, 1, K)
    s_embedding = Embedding(M, K)(s)  ## (N, 1, K)
    u_embedding = Flatten()(u_embedding)  ## (N, K)
    s_embedding = Flatten()(s_embedding)  ## (N, K)
    x = Concatenate()([u_embedding, s_embedding])  ## (N, 2K)

    ## Tune the number of units in the first Dense layer
    ## Choose an optimal value between 32-512
    hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
    x = Dense(units=hp_units)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(100)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dense(1, activation="sigmoid")(x)

    ## define model and compile. Use BinaryCrossEntropy for binary classification approach
    model = Model(inputs=[u, s], outputs=x)
    model.compile(
        loss=BinaryCrossentropy(from_logits=True),
        optimizer=SGD(lr=0.08, momentum=0.9),
        metrics=[
            AUC(thresholds=[0.0, 0.5, 1.0]),
            BinaryAccuracy(threshold=0.5),
            Precision(),
            Recall()
        ],
    )

    ## print model summary
    # model.summary()

    return model
Exemplo n.º 17
0
def create_model(depth=5, breadth=20, learning_rate=0.001):
    """
    A model for predicting whether the current player in a game of hex will win.
    Applies a a sequence of convolutional layers to the input. Each convolutional unit is average-pooled,
    then a dense layer connects these pools to the output.
    The network is provided four representations of the current state of the board,
    by applying 180 degree rotational symmetry and diagonal reflection + player swapping symmetry.
    The second symmetry is not quite a true symmetry since swapping players also changes who the current player is.
    For this reason, the final dense layer has different weights for the player-swapped inputs, so that this difference
    can be taken into account. The convolutional layers use the same weights in all four cases.
    Output of the network should be interpreted as a logit
    representing the probability that the current player will win.
    Functions for constructing input data for these models can be found in the model_input module.

    depth: number of convolutional layers applied to each of the four representations of the board state.
    breadth: number of units in each convolutional layer.
    learning_rate: learning rate for Adam optimizer.
    """
    input_tensors = [Input(shape=(board_size + 1, board_size + 1, 2), name=input_names[k])
                     for k in itertools.product((0, 1), (False, True))]
    out_components = []
    tensors = input_tensors
    pool = GlobalAveragePooling2D()

    for i in range(depth):
        conv_layer = Conv2D(breadth, 3, padding="same", activation="relu")
        tensors = list(map(conv_layer, tensors))
        dense_layer = Dense(1, kernel_initializer="zeros")
        out_components += [dense_layer(pool(t)) for t in tensors[:2]]
        dense_layer = Dense(1, kernel_initializer="zeros")
        out_components += [dense_layer(pool(t)) for t in tensors[2:]]

    output_tensor = Add(name="winners")(out_components)

    model = Model(input_tensors, [output_tensor])

    optimizer = Adam(lr=learning_rate)

    model.compile(
        loss=BinaryCrossentropy(from_logits=True),
        optimizer=optimizer,
        metrics=[BinaryAccuracy(threshold=0.0)]
    )
    return model
Exemplo n.º 18
0
def load_pickle_model(location,
                      input_size,
                      activation='relu',
                      hidden_layer_size=32,
                      loss=BinaryCrossentropy(),
                      optimizer='adam',
                      metrics=['accuracy'],
                      learning_rate=0.001):
    """ Inizializza FFNN con iperparametri in input e pesi caricati dal file pickle indicati in location """

    model = create_sequential(input_size, activation, hidden_layer_size, loss,
                              optimizer, metrics, learning_rate)
    with open(location, "rb") as file:
        # Ricavo array dei pesi
        weights = pickle.load(file)
        # Carico i pesi
        model.set_weights(weights)

    return model
Exemplo n.º 19
0
    def build_gan(self):
        """
            Construye la GAN. Compuesta por generador y discriminador
        """
        self.d_model.trainable = False

        noise_input = Input((self.latent_dim, ))

        # Capa que entrega las imágenes sintéticas
        gen_img = self.g_model(noise_input)

        # Discriminador recibe las imágenes
        discriminator = self.d_model(gen_img)

        gan_model = Model(noise_input, discriminator)
        gan_model.compile(loss=BinaryCrossentropy(),
                          optimizer=Adam(self.lr, 0.5),
                          metrics=['accuracy'])
        return gan_model
def create_model(learning_rate,
                 metrics,
                 feature_columns,
                 regularization=False):
    model = Sequential()

    model.add(DenseFeatures(feature_columns))
    model.add(Dense(units=100, activation='relu', name='Hidden1'))
    if (regularization):
        model.add(Dropout(rate=0.05, name='Hidden2'))
    model.add(Dense(units=100, activation='relu', name='Hidden3'))
    model.add(Dense(units=1, activation='sigmoid', name='Output'))

    optimizer = Adam(lr=learning_rate)
    loss = BinaryCrossentropy(reduction=Reduction.NONE)
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    return model

    print("Defined the create_model function.")
Exemplo n.º 21
0
def assignModel(N, M, K):
    """
    Assign input and output tensors, build neural net and compile model
    :param N: integer, number of users
    :param M: integer, number of songs
    :param K: integer, latent dimensionality
    :return: model, compiled neural net model
    """
    ## keras model
    u = Input(shape=(1,))
    s = Input(shape=(1,))
    u_embedding = Embedding(N, K)(u)   ## (N, 1, K)
    s_embedding = Embedding(M, K)(s)   ## (N, 1, K)
    u_embedding = Flatten()(u_embedding)  ## (N, K)
    s_embedding = Flatten()(s_embedding)  ## (N, K)
    x = Concatenate()([u_embedding, s_embedding])  ## (N, 2K)

    ## the neural network (use sigmoid activation function in output layer for binary classification)
    x = Dense(400)(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(100)(x)
    x = BatchNormalization()(x)
    # x = Activation('sigmoid')(x)
    x = Dense(1, activation="sigmoid")(x)

    ## define model and compile. Use BinaryCrossEntropy for binary classification approach
    model = Model(inputs=[u, s], outputs=x)
    model.compile(
      loss=BinaryCrossentropy(from_logits=True),
      optimizer=SGD(lr=0.08, momentum=0.9),
      metrics=[AUC(thresholds=[0.0, 0.5, 1.0]),
               BinaryAccuracy(threshold=0.5),
               Precision(),
               Recall()],
    )

    return model
def loss(y_true, y_pred):
    return BinaryCrossentropy()(y_true, y_pred) + dice_coef_loss(y_true, y_pred)
Exemplo n.º 23
0
                          conf.kernel_size,
                          strides=2,
                          activation='tanh',
                          padding='same',
                          use_bias=False)(gen)

    model = Model([noise_start, noise_end, noise, in_label], gen)
    # model = Model([noise_start, noise_end], gen)
    # model = Model(noise, gen)

    model.summary()
    return model


# This method returns a helper function to compute cross entropy loss
cross_entropy = BinaryCrossentropy(from_logits=True)


def generator_loss(fake_output):
    return cross_entropy(tf.ones_like(fake_output), fake_output)


def discriminator_loss(real_output, fake_output):
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
    total_loss = real_loss + fake_loss
    return total_loss


discriminator = define_discriminator()
generator = define_generator()
Exemplo n.º 24
0
def classify(input_size):
    plot_callback = PlotLearning()
    datasets = [{'train': pd.read_csv(f'{ten_fold_data_path}{i + 1}_train.csv'),
                 'test': pd.read_csv(f'{ten_fold_data_path}{i + 1}_test.csv')} for i in range(10)]
    metrics = {}
    bce = BinaryCrossentropy()

    progress = 0
    tot_models = len(model_configs) * len(hyperparameters['lr']) * len(hyperparameters['epochs']) * len(
        hyperparameters['optimizer']) * 10
    start = time()
    now = time() - start
    print(f'{round(100 * progress / tot_models, 2)}% Time spent: {timedelta(seconds=round(now))}')
    for lr in hyperparameters['lr'][:1]:
        for epochs in hyperparameters['epochs'][:1]:
            for optimizer in hyperparameters['optimizer'][:1]:
                models = create_models(input_size)

                for j, model in enumerate(models):
                    accs = []
                    precs = []
                    recs = []
                    aucs = []
                    losses = []
                    for i, d in enumerate(datasets):
                        y_col = d['train'].columns[0]
                        train_X = d['train'].drop(columns=y_col)
                        train_y = d['train'][y_col]
                        test_X = d['test'].drop(columns=y_col)
                        test_y = d['test'][y_col]
                        assert input_size == len(train_X.columns)

                        model.compile(optimizer(lr), loss='binary_crossentropy',
                                      metrics=['accuracy'])

                        history = model.fit(train_X, train_y, batch_size=5, epochs=epochs, verbose=0)
                        progress += 1
                        clear_output()
                        now = time() - start
                        print(f'{round(100 * progress / tot_models, 2)}% Time spent: {timedelta(seconds=round(now))}')

                        classification = [0 if p < 0.5 else 1 for p in model.predict(test_X)]

                        accs.append(accuracy_score(test_y, classification))
                        precs.append(precision_score(test_y, classification))
                        recs.append(recall_score(test_y, classification))
                        aucs.append(roc_auc_score(test_y, classification))
                        losses.append(float(bce(test_y, classification).numpy()))

                    configuration_name = f'{model.name} LR{lr} E{epochs} {optimizer.__name__}'
                    metrics[configuration_name] = {}
                    metrics[configuration_name]['lr'] = lr
                    metrics[configuration_name]['epochs'] = epochs
                    metrics[configuration_name]['optimizer'] = optimizer.__name__
                    metrics[configuration_name]['hidden layers'] = model_configs[j]['hidden']
                    metrics[configuration_name]['density factor'] = model_configs[j]['neuron_numerosity']
                    metrics[configuration_name]['activation'] = model_configs[j]['activation'].__name__

                    metrics[configuration_name]['accuracy'] = (mean(accs), stdev(accs))
                    metrics[configuration_name]['precision'] = (mean(precs), stdev(precs))
                    metrics[configuration_name]['recall'] = (mean(recs), stdev(recs))
                    metrics[configuration_name]['roc_auc'] = (mean(aucs), stdev(aucs))
                    metrics[configuration_name]['loss'] = (mean(losses), stdev(losses))

    return metrics
 channels = 3
 n_frames = 30
 ppf.frames_per_video = n_frames
 print("4")
 saved_model_path = "weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
 checkpoint = ModelCheckpoint(saved_model_path,
                              monitor="val_accuracy",
                              verbose=1,
                              save_best_only=True)
 earlystop = EarlyStopping(monitor="val_accuracy",
                           min_delta=0.01,
                           patience=5,
                           restore_best_weights=True)
 callbacks_list = [checkpoint, earlystop]
 optimizer = Adam()
 binloss = BinaryCrossentropy()
 acc = Accuracy()
 print("5")
 # # # Run the training job
 try:
     zipfiles = dppvm.extract_zips(dppvm.zip_down_dir)
     print("se descargo")
 except:
     print("no se descargo ni madres")
 DATA = Path("download")
 DEST = dppvm.DEST
 print("6")
 logging.basicConfig(filename='extract.log', level=logging.INFO)
 zipfiles = sorted(list(DATA.glob('dfdc_train_part_*.zip')),
                   key=lambda x: x.stem)
 # Extract the zip files
Exemplo n.º 26
0
valid_y = []
for i in range(len(x_test)):
    valid_x.append(to_categorical(x_test[i], num_classes=args.X_shape))
    valid_y.append(y_test[i])
valid_x = np.asarray(valid_x)
valid_y = np.asarray(valid_y)
print('negative rate: ', 1-(y_test.sum() / len(y_test)))
valid_x = np.expand_dims(valid_x, -1)


# 模型训练
my_gcn = MYGCN(A, args.X_shape, [args.hidden_dim_1, args.hidden_dim_2])
my_gcn.build(input_shape=[args.X_shape, 1])
tb = TensorBoard()

my_gcn.compile(optimizer=Adam(lr=args.lr), loss=BinaryCrossentropy(), metrics='accuracy')
hist = my_gcn.fit(x=train_x, y=train_y, batch_size=100, validation_data=(valid_x, valid_y), shuffle=True, validation_freq=1, epochs=epoch_num, callbacks=[tb])


# plot train process

import matplotlib.pyplot as plt

def plot_train(history):
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend(['Train', 'valid'], loc='upper left')
    plt.savefig(args.logs_dir + "loss_{}.png".format(os.getpid()))
Exemplo n.º 27
0
class GoftNet:
    _OPTIMIZERS = {'adam': Adam, 'SGD': SGD, 'RMSprop': RMSprop}

    _LOSS_FUNCTIONS = {

        # TODO Why categorical crossentropy gives my a constant zero loss??
        'categorical_crossentropy': CategoricalCrossentropy(),
        'binary_crossentropy': BinaryCrossentropy(),
        'mse': MSE,
    }

    def __init__(self, config):

        self._num_classes = config['data']['num_classes']
        self._class_labels = config['data']['labels']
        self._class_labels_dict = {
            i: self._class_labels[i]
            for i in range(len(self._class_labels))
        }
        self._input_dim = config['model']['input_dim']

        # Feature Extractor Blocks
        # Assume the user didnt mess around with these arguments...
        self._num_features = config['model']['num_features']
        self._kernel_shapes = config['model']['kernel_shapes']
        self._num_conv_layers = config['model']['num_conv_layers']

        # Classifier Layers
        self._units = config['model']['units']
        self._last_layer_activation = config['model'].get(
            'last_later_activation', None)

        # Training parameters.
        self._optimizer = config['train']['optimizer']
        self._loss_function = config['train']['loss_function']
        self._epochs = config['train']['epochs']

        # General
        self._output_dir = config['general'][
            'output_dir']  # Here Tensorboard logs will be written.
        self._summary = True

        # Eval
        self._model_path = config['eval']['model_path']

        # define pathes
        timestamp = str(int(time()))
        self.model_dir_path = os.path.join(self._output_dir, timestamp)
        self.model_path = os.path.join(self.model_dir_path, 'model.h5')
        self.log_dir_path = os.path.join(self._output_dir, timestamp, 'logs')

        os.makedirs(self.model_dir_path, exist_ok=True)
        os.makedirs(self.log_dir_path, exist_ok=True)

        self._create_model()

    def _create_block(self,
                      num_features,
                      kernel_shape,
                      number_conv_layers,
                      first_layer,
                      last_layer,
                      padding='same'):

        for _ in range(number_conv_layers):

            if first_layer:
                self._model.add(
                    Conv2D(num_features,
                           kernel_shape,
                           padding=padding,
                           input_shape=self._input_dim))
            else:
                self._model.add(
                    Conv2D(num_features, kernel_shape, padding=padding))

            self._model.add(BatchNormalization())
            # self._model.add(Dropout(0.3))
            self._model.add(Activation('relu'))

        if not last_layer:
            self._model.add(MaxPooling2D(pool_size=(2, 2)))

    def _get_optimizer(self):
        name = self._optimizer['name']
        opt_params = self._optimizer['params']

        return self._OPTIMIZERS[name](**opt_params)

    def _compile(self):

        optimizer = self._get_optimizer()
        loss_function = self._LOSS_FUNCTIONS[self._loss_function]

        self._model.compile(loss=loss_function,
                            optimizer=optimizer,
                            metrics=['accuracy'])

    def _create_model(self, print_color='yellow'):

        print(colored('###################################', print_color))
        print(colored('######### CREATING MODEL #########', print_color))
        print(colored('###################################', print_color))

        # build the CNN architecture with Keras Sequential API
        self._model = Sequential()

        # ---------------------------------------- #
        # --------- DEFINE F.E BLOCKS ------------ #
        # ---------------------------------------- #
        for i, (num_features, kernel_shape, num_conv_layers) in enumerate(
                zip(self._num_features, self._kernel_shapes,
                    self._num_conv_layers)):
            if i == 0:  # First Layer. Need to define input layer
                first_layer, last_layer = True, False

            elif i == len(
                    self._num_features) - 1:  # Last Layer. No max pooling.
                first_layer, last_layer = False, True

            else:
                first_layer, last_layer = False, False

            self._create_block(num_features,
                               kernel_shape,
                               num_conv_layers,
                               first_layer=first_layer,
                               last_layer=last_layer)

        # ---------------------------------------- #
        # ----- DEFINE CLASSIFIER BLOCKS --------- #
        # ---------------------------------------- #
        self._model.add(Flatten())

        for units in self._units:
            self._model.add(Dense(units))
            self._model.add(Activation('relu'))

        self._model.add(Dense(self._num_classes))
        if self._last_layer_activation is not None:
            # If last layer activation is None, loss wil be calculated directly on logits.
            self._model.add(Activation(self._last_layer_activation))

        # Compile the model with chosen optimizer.
        self._compile()

        # Summary
        if self._summary:
            self._model.summary()

    def train(self, train_data, val_data):

        callbacks = [
            ModelCheckpoint(filepath=self.model_path,
                            monitor='val_accuracy',
                            mode='max',
                            save_best_only=True),
            TensorBoard(log_dir=self.log_dir_path)
        ]

        train_log = self._model.fit_generator(generator=train_data,
                                              validation_data=val_data,
                                              epochs=self._epochs,
                                              callbacks=callbacks,
                                              verbose=1)

        self.plot_log(train_log=train_log, model_dir_path=self.model_dir_path)

    def load_model(self, ):
        self._model = load_model(self._model_path)
        self._compile()

    def inference_on_data(self, test_data):
        results = self._model.predict(test_data, verbose=1)
        results = [
            np.eye(self._num_classes)[np.argmax(res)] for res in results
        ]  # Turn results to one hot.
        return results

    def print_metrics(self, y_pred, y_test):

        y_pred_labels = [
            self._class_labels_dict[class_num]
            for class_num in np.argmax(y_pred, axis=1)
        ]
        y_test_labels = [
            self._class_labels_dict[class_num]
            for class_num in np.argmax(y_test, axis=1)
        ]

        cm = confusion_matrix(y_pred_labels,
                              y_test_labels,
                              labels=np.unique(y_test_labels))
        cm = pd.DataFrame(cm,
                          index=np.unique(y_test_labels),
                          columns=np.unique(y_test_labels))

        report = classification_report(y_test, y_pred)

        print(
            colored("\n===================================================",
                    'yellow'))
        print(
            colored("============== CLASSIFICATION REPORT ==============",
                    'yellow'))
        print(
            colored("===================================================",
                    'yellow'))
        print(colored(report + '\n', 'yellow'))
        print(colored(cm, 'yellow'))

    @staticmethod
    def plot_log(train_log, model_dir_path):

        # Plot training & validation accuracy values
        f = plt.figure(1)
        plt.plot(train_log.history['accuracy'])
        plt.plot(train_log.history['val_accuracy'])
        plt.title('Model accuracy')
        plt.ylabel('Accuracy')
        plt.xlabel('Epoch')
        plt.legend(['Train', 'Val'], loc='upper left')
        f.savefig(os.path.join(model_dir_path, 'acc.png'))

        # Plot training & validation loss values
        g = plt.figure(2)
        plt.plot(train_log.history['loss'])
        plt.plot(train_log.history['val_loss'])
        plt.title('Model loss')
        plt.ylabel('Loss')
        plt.xlabel('Epoch')
        plt.legend(['Train', 'Val'], loc='upper left')
        g.savefig(os.path.join(model_dir_path, 'loss.png'))
Exemplo n.º 28
0
    def fit(self, trainData, trainLabels):
        trainData = np.array(trainData)
        trainLabels = np.array(trainLabels)

        # split training set into training and validation sets
        from sklearn.model_selection import train_test_split
        trainData, valData, trainLabels, valLabels = train_test_split(
            trainData, trainLabels, test_size=self.valSplit, random_state=42)

        self.vec = Vectorizer(mode=self.vecMode,
                              maxFeatures=self.maxVecFeatures)
        self.vec.fit(trainData)

        #print(self.vec.transform([trainData[0]])[0].shape[1])
        numFeatures = len(self.vec.vec.vocabulary_.keys())
        print('Vectorizer fit complete with', numFeatures,
              'features in each vector')

        trainDataGenerator = DataLoader(data=trainData,
                                        labels=trainLabels,
                                        vec=self.vec,
                                        batchSize=self.batchSize)

        valDataGenerator = DataLoader(data=valData,
                                      labels=valLabels,
                                      vec=self.vec,
                                      batchSize=self.batchSize)

        loss = BinaryCrossentropy(from_logits=True)
        self.model = Sequential()
        self.model.add(
            Dense(self.modelN * self.modelNFactor * 2,
                  input_shape=(numFeatures, ),
                  activation='relu'))
        self.model.add(
            Dense(self.modelN * self.modelNFactor, activation='relu'))
        self.model.add(Dense(self.modelN, activation='relu'))
        self.model.add(Dense(2, activation='softmax'))
        self.model.compile(loss=loss, optimizer='adam', metrics=['accuracy'])
        if (self.showModelSummary):
            self.model.summary()
        callbacks = [
            ModelCheckpoint("model.h5", verbose=1, save_best_model=True),
            ReduceLROnPlateau(monitor="val_loss",
                              patience=3,
                              factor=0.1,
                              verbose=1,
                              min_lr=1e-6),
            EarlyStopping(monitor="val_loss", patience=5, verbose=1)
        ]

        print('Shape of the training dataset: (%i,%i)' %
              (len(trainData), numFeatures))

        print(
            'Training and validation data loaders initialized with batch size:',
            self.batchSize)
        results = self.model.fit(trainDataGenerator,
                                 validation_data=valDataGenerator,
                                 workers=8,
                                 callbacks=callbacks,
                                 epochs=self.epochs)

        return (results)
Exemplo n.º 29
0
DEFAULT_DISCRIMINATOR = {
    # Architecture
    'input_shape': (2, ),
    'h1_size': 16,
    'h1_activation': LeakyReLU(alpha=0.2),
    'h2_size': 16,
    'h2_activation': LeakyReLU(alpha=0.2),
    'h3_size': 2,
    'h3_activation': 'linear',
    'output': 1,
    'output_activation': 'linear',

    # Optimizer/loss
    'optimizer': 'rmsprop',
    'loss': BinaryCrossentropy(from_logits=True)
}


class PVDGenerator:
    def __init__(self, model_params: dict = DEFAULT_GENERATOR):
        """Generator neural network.

        Arguments:
            model_params: A dictionary containing the model configuration. See the defaults above
                for format.
        """
        self.mp: dict = model_params
        self.model: Model = self.initialize_model()

    def initialize_model(self) -> Model:
Exemplo n.º 30
0
                 activation='tanh'),
        L.GlobalMaxPool1D(),
        L.Dense(256, activation='relu'),
        L.Dense(1, activation='sigmoid')
    ])
    return cnn


class EvalCallback(Callback):
    def __init__(self):
        super(EvalCallback, self).__init__()
        self.acc = 0

    def on_epoch_end(self, epoch, logs=None):
        if logs['val_accuracy'] > self.acc:
            self.acc = logs['val_accuracy']
            self.model.save_weights('best-weighs.weights')


if __name__ == '__main__':
    (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=NUM_WORDS)
    cnn = build_cnn_model()
    cnn.summary()
    cnn.compile(loss=BinaryCrossentropy(),
                optimizer=Adam(learning_rate),
                metrics=['accuracy', Recall()])
    cnn.fit(IMDBDataLoader(x_train, y_train, max_len=MAX_LEN),
            validation_data=IMDBDataLoader(x_test, y_test, max_len=MAX_LEN),
            epochs=EPOCH,
            callbacks=[EvalCallback(), TensorBoard()])