예제 #1
0
 def _discriminator_loss(discriminator_real_output,
                         discriminator_fake_output):
     real_loss = losses.BinaryCrossentropy(from_logits=True)(
         tf.ones_like(discriminator_real_output), discriminator_real_output)
     fake_loss = losses.BinaryCrossentropy(from_logits=True)(
         tf.zeros_like(discriminator_fake_output),
         discriminator_fake_output)
     return real_loss + fake_loss
def discriminator_loss(real, generated):
    real_loss = losses.BinaryCrossentropy(from_logits=True,
                                          reduction=losses.Reduction.NONE)(
                                              tf.ones_like(real), real)
    generated_loss = losses.BinaryCrossentropy(
        from_logits=True,
        reduction=losses.Reduction.NONE)(tf.zeros_like(generated), generated)
    total_disc_loss = real_loss + generated_loss
    return total_disc_loss * 0.5
예제 #3
0
def get_vgg16_model(trainable=False,
                    num_nodes=NUM_NODES,
                    dropout_rate=DROPOUT_RATE,
                    learning_rate=LEARNING_RATE,
                    print_summary=False):
    """
    Builds and compiles VGG16 transfer learning model.
    """
    transfer_model = VGG16(
        input_shape=INPUT_SHAPE,
        include_top=False,  # leave out the last fully connected layer
        weights='imagenet')
    for layer in transfer_model.layers:
        layer.trainable = trainable
    hidden_layers = []
    hidden_layers.append(layers.Flatten()(transfer_model.output))
    hidden_layers.append(
        layers.Dense(num_nodes, activation="relu")(hidden_layers[-1]))
    hidden_layers.append(layers.Dropout(dropout_rate)(hidden_layers[-1]))
    output_layer = layers.Dense(1, activation="sigmoid")(hidden_layers[-1])
    model5 = Model(transfer_model.input, output_layer)
    model5.compile(loss=losses.BinaryCrossentropy(),
                   optimizer=optimizers.Adam(learning_rate=learning_rate),
                   metrics=METRICS)
    if print_summary:
        model5.summary()
    return model5
예제 #4
0
def get_compiled_model():
    model = drcn.DRCN()
    model.compile(
        optimizer=optimizers.Adam(),
        loss=[losses.BinaryCrossentropy(),
              losses.MeanSquaredError()])
    return model
예제 #5
0
def autoencoder_model():
    
    input_img = layers.Input(shape=(28, 28, 1))

    x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
    #print(x.shape)
    x = layers.MaxPooling2D((2, 2), padding='same')(x)
    #print(x.shape)
    x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    #print(x.shape)
    x = layers.MaxPooling2D((2, 2), padding='same')(x) #Encoded
    #print(x.shape)
    
    
    x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    #print(x.shape)
    x = layers.UpSampling2D((2, 2))(x)
    #print(x.shape)
    x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    #print(x.shape)
    x = layers.UpSampling2D((2, 2))(x)
    #print(x.shape)
    decoded = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
    #print(decoded.shape)
    autoencoder = models.Model(input_img, decoded)

    ########################################################################

    autoencoder.compile(optimizer=optimizers.Adam(learning_rate=0.002),#optimizers.SGD(0.01), 
                        metrics=metrics.BinaryAccuracy(),
                        loss=losses.BinaryCrossentropy())

    return autoencoder
예제 #6
0
def generate_clf(X_train,
                 y_train,
                 X_test,
                 y_test,
                 theme_base,
                 epochs,
                 batch,
                 seed,
                 dropout):
    clf = pred_models.NewTownClassifier(theme_base=theme_base, seed=seed, dropout=dropout)
    clf.compile(optimizer='adam',
                loss=losses.BinaryCrossentropy(),
                metrics=['binary_accuracy'])
    # prepare path and check for previously trained model
    dir_path = pathlib.Path(weights_path / f'predictive_weights/{clf.theme}')
    if not dir_path.exists():
        # prepare callbacks
        callbacks = [
            TensorBoard(
                log_dir=str(
                    logs_path / f'{datetime.now().strftime("%Hh%Mm%Ss")}_{clf.theme}'),
                histogram_freq=1,
                write_graph=True,
                write_images=True,
                update_freq='epoch',
                profile_batch=2,
                embeddings_freq=0,
                embeddings_metadata=None),
            ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.1,
                patience=5,
                verbose=1,
                mode='auto',
                min_delta=0.0001,
                cooldown=0,
                min_lr=0),
            TerminateOnNaN(),
            ModelCheckpoint(
                str(dir_path / 'weights'),
                monitor='val_loss',
                verbose=1,
                save_best_only=True,
                save_weights_only=True,
                mode='auto',
                save_freq='epoch')
        ]
        # train
        clf.fit(x=X_train,
                y=y_train,
                batch_size=batch,
                epochs=epochs,
                verbose=1,
                validation_data=(X_test, y_test),
                shuffle=True,
                callbacks=callbacks)
    else:
        clf.load_weights(str(dir_path / 'weights'))
    #
    return clf
예제 #7
0
def train(opts, model):

    task_opts = opts['task']
    sess_opts = opts['session']

    # Optimizer and criterion.
    opt = optimizers.SGD(learning_rate=0.1)
    crit = losses.BinaryCrossentropy()

    # Accuracy metric.
    train_acc = metrics.CategoricalAccuracy(name='train_acc')

    # Generate batch for selected task.
    src, tgt = Task.generate_batch(task_opts, sess_opts.batch_size)

    for epoch in range(sess_opts.epochs):
        train_acc.reset_states()

        p, loss = train_step(model, src, tgt, crit, opt)
        train_acc(tgt, p)

        if epoch % 500 == 0:
            fig, ax = plt.subplots(2, 1)
            ax[0].imshow(src[0].T)
            ax[1].imshow(p.numpy()[0].T)
            plt.show()

        print(f'Epoch {epoch + 1}, '
              f'Loss: {loss}, '
              f'Accuracy: {train_acc.result() * 100} ')
예제 #8
0
def model_definition(input_shape):
    model = models.Sequential()

    model.add(
        layers.Dense(units=100,
                     input_shape=(input_shape, ),
                     use_bias=True,
                     activation=activations.relu,
                     activity_regularizer=regularizers.l2(0.010)))

    model.add(layers.Dropout(0.50))

    model.add(layers.BatchNormalization())

    model.add(
        layers.Dense(units=30,
                     use_bias=True,
                     activation=activations.relu,
                     activity_regularizer=regularizers.l2(0.010)))

    model.add(layers.Dropout(0.50))

    model.add(
        layers.Dense(units=1,
                     activation=activations.sigmoid,
                     activity_regularizer=regularizers.l2(0.010),
                     use_bias=True))

    model.compile(optimizer=optimizers.SGD(0.001),
                  loss=losses.BinaryCrossentropy(),
                  metrics=[metrics.binary_accuracy])

    return model
예제 #9
0
def get_proj3_model(learning_rate=LEARNING_RATE, print_summary=False):
    """
    Builds and compiles our TensorFlow model from Project 3.
    """
    model1 = tf.keras.Sequential()
    model1.add(
        layers.Conv2D(filters=10,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      activation='relu',
                      input_shape=INPUT_SHAPE))
    model1.add(
        layers.Conv2D(filters=10,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      activation='relu'))
    model1.add(layers.Flatten())
    model1.add(layers.Dense(64, activation="relu"))
    model1.add(layers.Dense(1, activation="sigmoid"))
    model1.compile(loss=losses.BinaryCrossentropy(),
                   optimizer=optimizers.Adam(learning_rate=learning_rate),
                   metrics=METRICS)
    if print_summary:
        model1.summary()
    return model1
예제 #10
0
 def __init__(self,args,config_path,database,network_cls):
    
     self.config = parse_config(config_path)
     config = self.config
     self.args = args
     self.database = database
     self.network_cls = network_cls
     
     # Initialize an optimizer and an loss func
     
     if self.config["clip_norm"]==.0:
         opt_dict = {"learning_rate":self.config["lr"]}
     elif self.config["clip_norm"]>.0:
         opt_dict = {"learning_rate":self.config["lr"],"clipnorm":self.config["clip_norm"]}
     else:
         raise ValueError("clip_norm should be 0(No clipping) or greater float")
     
     self.optimizer = getattr(optimizers, args.optimizer)(**opt_dict)
     
     if self.args.binary:
         self.loss_func = losses.BinaryCrossentropy(from_logits=True)
     else:
         self.loss_func = losses.SparseCategoricalCrossentropy(from_logits=True)
     
     self.ckpt_cb = tf.keras.callbacks.ModelCheckpoint(
         os.path.join(config["model_path"],config["log_path"],config["ckpt_path"],f"{args.encoder_name}.ckpt"))
     self.csv_log_cb = tf.keras.callbacks.CSVLogger(
         os.path.join(config["model_path"],config["log_path"],config["csv_path"],"log.csv"))
     self.tb_cb = tf.keras.callbacks.TensorBoard(
         os.path.join(config["model_path"],config["log_path"],config["tb_path"]))        
예제 #11
0
    def __init__(self, X, metrics, dropout=True, rate=0.25):
        self.model = models.Sequential()
        self.model.add(layers.Flatten(input_shape=(X[0].shape)))
        """
        self.model.add(layers.Dense(512,activation='relu'))
        self.model.add(layers.Dense(512,activation='relu'))

        #if dropout:
        #    self.model.add(layers.Dropout(rate))
        self.model.add(layers.Dense(256,activation='relu'))
        self.model.add(layers.Dense(256,activation='relu'))

        #if dropout:
        #    self.model.add(layers.Dropout(rate))
        self.model.add(layers.Dense(128,activation='relu'))
        self.model.add(layers.Dense(128,activation='relu'))

        self.model.add(layers.Dense(64,activation='relu'))
        self.model.add(layers.Dense(1,activation='sigmoid'))
        """
        self.model.add(layers.Dense(300, activation='relu'))
        self.model.add(layers.Dense(100, activation='relu'))

        self.model.add(layers.Dense(1, activation='sigmoid'))
        self.es = tf.keras.callbacks.EarlyStopping(monitor='val_auc',
                                                   patience=10,
                                                   mode='max',
                                                   restore_best_weights=True)

        self.model.compile(tf.keras.optimizers.Adam(learning_rate=1e-4),
                           loss=losses.BinaryCrossentropy(),
                           metrics=metrics)
예제 #12
0
    def get_loss_fn(self, **kwargs):
        """
        Define a sigmoid cross entropy loss
        Additionally can pass in record positions to handle positional bias

        Returns
        -------
        function
            Function to compute sigmoid cross entropy loss

        Notes
        -----
            Uses `mask` field to exclude padded records from contributing
            to the loss
        """
        bce = losses.BinaryCrossentropy(
            reduction=Reduction.SUM_OVER_BATCH_SIZE)
        mask = kwargs.get("mask")

        def _loss_fn(y_true, y_pred):
            # Mask the predictions to ignore padded records
            y_true = tf.gather_nd(y_true,
                                  tf.where(tf.equal(mask, tf.constant(1.0))))
            y_pred = tf.gather_nd(y_pred,
                                  tf.where(tf.equal(mask, tf.constant(1.0))))

            return bce(y_true, y_pred)

        return _loss_fn
예제 #13
0
def get_model(input_shape=(800, 12)):
    input1 = tf.keras.Input(shape=input_shape)
    input2 = tf.keras.Input(shape=2)  # 入力の形状の指定. shape=(時間軸, 12誘導)
    # block1

    C = Conv1D(filters=32, kernel_size=5, strides=1)(input1)

    C11 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(C)
    A11 = Activation("relu")(C11)
    C12 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(A11)
    S11 = Add()([C12, C])
    A12 = Activation("relu")(S11)
    M11 = MaxPooling1D(pool_size=5, strides=2)(A12)

    C21 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(M11)
    A21 = Activation("relu")(C21)
    C22 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(A21)
    S21 = Add()([C22, M11])
    A22 = Activation("relu")(S11)
    M21 = MaxPooling1D(pool_size=5, strides=2)(A22)

    C31 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(M21)
    A31 = Activation("relu")(C31)
    C32 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(A31)
    S31 = Add()([C32, M21])
    A32 = Activation("relu")(S31)
    M31 = MaxPooling1D(pool_size=5, strides=2)(A32)

    C41 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(M31)
    A41 = Activation("relu")(C41)
    C42 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(A41)
    S41 = Add()([C42, M31])
    A42 = Activation("relu")(S41)
    M41 = MaxPooling1D(pool_size=5, strides=2)(A42)

    C51 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(M41)
    A51 = Activation("relu")(C51)
    C52 = Conv1D(filters=32, kernel_size=5, strides=1, padding='same')(A51)
    S51 = Add()([C52, M41])
    A52 = Activation("relu")(S51)
    M51 = MaxPooling1D(pool_size=5, strides=2)(A52)

    x = tf.keras.layers.GlobalAveragePooling1D()(M51)

    y = tf.keras.layers.Dense(2)(input2)
    y = tf.keras.layers.Activation("relu")(y)

    combined = tf.keras.layers.concatenate([x, y])
    z = Dense(32, activation="tanh")(combined)
    z = Dense(1, activation="sigmoid")(z)

    model = core_model.SAMModel(inputs=[input1, input2], outputs=z)

    opt = Adam(lr=1e-3)
    opt = tfa.optimizers.SWA(opt)
    model.compile(loss=losses.BinaryCrossentropy(label_smoothing=0.001),
                  metrics=['AUC'],
                  optimizer=opt)

    return model
def train_test(epochs=20):
    db_train, db_test = f_get_data()
    model1 = MyRNN(units1=64)  # units1=64状态向量长度
    model1.compile(optimizer=optimizers.Adam(0.001), loss=losses.BinaryCrossentropy(), metrics=['accuracy'])
    model1.fit(db_train, epochs=epochs, validation_data=db_test)
    result_test = model1.evaluate(db_test)
    print(result_test)
예제 #15
0
    def __init__(self, config: Config):
        self.config = config
        self.strategy = tf.distribute.get_strategy()

        self.generator_optimizer = optimizers.Adam(
            config.generator_lr, config.generator_beta1)
        self.discriminator_optimizer = optimizers.Adam(
            config.discriminator_lr, config.discriminator_beta1)

        self.generator = build_generator(config)
        self.discriminator = build_discriminator(config)

        self.loss_object = losses.BinaryCrossentropy(
            from_logits=True,
            reduction=losses.Reduction.NONE
        )

        self.checkpoint_prefix = config.output_dir / 'checkpoints' / 'ckpt'
        self.checkpoint = tf.train.Checkpoint(
            generator_optimizer=self.generator_optimizer,
            discriminator_optimizer=self.discriminator_optimizer,
            generator=self.generator,
            discriminator=self.discriminator
        )

        self.loss_gs = []
        self.loss_ds = []
        self.d_reals = []
        self.d_fakes = []
        self.images = []
예제 #16
0
    def get_loss_fn(self, **kwargs):
        """
        Define a masked rank 1 ListNet loss
        Additionally can pass in record positions to handle positional bias

        Returns
        -------
        function
            Function to compute top 1 listnet loss

        Notes
        -----
            Uses `mask` field to exclude padded records from contributing
            to the loss
        """
        bce = losses.BinaryCrossentropy(reduction=Reduction.SUM)
        mask = kwargs.get("mask")

        def _loss_fn(y_true, y_pred):
            batch_size = tf.cast(tf.shape(y_true)[0], tf.float32)

            # Mask the padded records
            y_true = tf.gather_nd(y_true,
                                  tf.where(tf.equal(mask, tf.constant(1.0))))
            y_pred = tf.gather_nd(y_pred,
                                  tf.where(tf.equal(mask, tf.constant(1.0))))

            # Reshape the tensors
            y_true = tf.expand_dims(tf.squeeze(y_true), axis=-1)
            y_pred = tf.expand_dims(tf.squeeze(y_pred), axis=-1)

            return tf.math.divide(bce(y_true, y_pred), batch_size)

        return _loss_fn
예제 #17
0
파일: ff_models.py 프로젝트: KorfLab/genDL
    def build(self):
        model = tf.keras.Sequential()
        model.add(layers.Flatten(input_shape=(42, 4)))

        for i in range(self.layers):
            if self.reg:
                model.add(
                    layers.Dense(self.sizes[i],
                                 activation='elu',
                                 kernel_regularizer=regularizers.l2(
                                     self.reg[i])))
            else:
                model.add(layers.Dense(self.sizes[i], activation='elu'))

            if self.dropout:
                model.add(layers.Dropout(self.dropout[i]))

        model.add(layers.Dense(1, activation='sigmoid'))

        model.compile(optimizer=optimizers.Adam(learning_rate=self.lr),
                      loss=losses.BinaryCrossentropy(),
                      metrics=[
                          'binary_accuracy',
                          metrics.TruePositives(name='tp'),
                          metrics.FalseNegatives(name='fn'),
                          metrics.TrueNegatives(name='tn'),
                          metrics.FalsePositives(name='fp'),
                          metrics.Recall(name='recall'),
                          metrics.Precision(name='precision')
                      ])

        return model
예제 #18
0
def discriminator_loss(real_output, fake_output):
    cross_entropy = losses.BinaryCrossentropy(from_logits=True)
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
    total_loss = real_loss + fake_loss

    return total_loss
예제 #19
0
def cross_n_prob_loss(GT, PRED_PLOT, PRED_PROB):
	total_plot_error = 0
	total_prob_error = 0
	count = 0
	for Y, Y_pred, Y_prob_pred in zip(GT, PRED_PLOT, PRED_PROB):
		Y = Y[Y[:, 0]==1][:, 1:]
		M = Y.shape[-2]
		N = Y_pred.shape[-2]
		Y = tf.expand_dims(Y, axis=-2)
		Y_pred = tf.expand_dims(Y_pred, axis=-2)
		new_Y = tf.tile(Y, (1, N, 1))
		new_Y_pred = tf.tile(Y_pred, (1, M, 1))
		new_Y_pred = tf.transpose(new_Y_pred, perm=[1,0,2])
		chamf_dis = tf.norm(tf.subtract(new_Y, new_Y_pred), axis=-1)
		minval_sum = tf.reduce_sum(tf.reduce_min(chamf_dis, axis=-1))
		minval_idx = tf.argmin(chamf_dis, axis=-1)
		total_plot_error += minval_sum

		Y_prob = tf.zeros(N,dtype=tf.float32)
		minval_idx,_ = tf.unique(minval_idx)
		Y_prob = tf.tensor_scatter_nd_update(Y_prob, tf.expand_dims(minval_idx,-1), tf.ones_like(minval_idx,dtype=tf.float32))
		
		total_prob_error += losses.BinaryCrossentropy()(Y_prob, Y_prob_pred)
		count+=1
	
	return total_plot_error/count, total_prob_error/count
예제 #20
0
    def get_loss_fn(self, **kwargs):
        """
        Define a rank 1 ListNet loss
        Additionally can pass in record positions to handle positional bias

        """
        bce = losses.BinaryCrossentropy(reduction=Reduction.SUM)
        mask = kwargs.get("mask")

        def _loss_fn(y_true, y_pred):
            batch_size = tf.cast(tf.shape(y_true)[0], tf.float32)

            # Mask the padded records
            y_true = tf.gather_nd(y_true,
                                  tf.where(tf.equal(mask, tf.constant(1.0))))
            y_pred = tf.gather_nd(y_pred,
                                  tf.where(tf.equal(mask, tf.constant(1.0))))

            # Reshape the tensors
            y_true = tf.expand_dims(tf.squeeze(y_true), axis=-1)
            y_pred = tf.expand_dims(tf.squeeze(y_pred), axis=-1)

            return tf.math.divide(bce(y_true, y_pred), batch_size)

        return _loss_fn
예제 #21
0
def test_rotate(knowledge_graph):
    margin = 2.34
    norm_order = 1.234

    # this test creates a random untrained model and predicts every possible edge in the graph, and
    # compares that to a direct implementation of the scoring method in the paper
    gen = KGTripleGenerator(knowledge_graph, 3)

    # use a random initializer with a large range, so that any differences are obvious
    init = initializers.RandomUniform(-1, 1)
    rotate_model = RotatE(
        gen, 5, margin=margin, norm_order=norm_order, embeddings_initializer=init
    )
    x_inp, x_out = rotate_model.in_out_tensors()

    model = Model(x_inp, x_out)

    model.compile(loss=tf_losses.BinaryCrossentropy(from_logits=True))

    every_edge = itertools.product(
        knowledge_graph.nodes(),
        knowledge_graph._edges.types.pandas_index,
        knowledge_graph.nodes(),
    )
    df = triple_df(*every_edge)

    # check the model can be trained on a few (uneven) batches
    model.fit(
        gen.flow(df.iloc[:7], negative_samples=2),
        validation_data=gen.flow(df.iloc[7:14], negative_samples=3),
    )

    # compute the exact values based on the model by extracting the embeddings for each element and
    # doing the y_(e_1)^T M_r y_(e_2) = <e_1, w_r, e_2> inner product
    s_idx = knowledge_graph.node_ids_to_ilocs(df.source)
    r_idx = knowledge_graph._edges.types.to_iloc(df.label)
    o_idx = knowledge_graph.node_ids_to_ilocs(df.target)

    nodes, edge_types = rotate_model.embeddings()
    # the rows correspond to the embeddings for the given edge, so we can do bulk operations
    e_s = nodes[s_idx, :]
    w_r = edge_types[r_idx, :]
    e_o = nodes[o_idx, :]

    # every edge-type embedding should be a unit rotation
    np.testing.assert_allclose(np.abs(w_r), 1)

    actual = margin - np.linalg.norm(e_s * w_r - e_o, ord=norm_order, axis=1)

    # predict every edge using the model
    prediction = model.predict(gen.flow(df))

    # (use an absolute tolerance to allow for catastrophic cancellation around very small values)
    np.testing.assert_allclose(prediction[:, 0], actual, rtol=1e-3, atol=1e-14)

    # the model is stateful (i.e. it holds the weights permanently) so the predictions with a second
    # 'build' should be the same as the original one
    model2 = Model(*rotate_model.in_out_tensors())
    prediction2 = model2.predict(gen.flow(df))
    np.testing.assert_array_equal(prediction, prediction2)
예제 #22
0
def build(config, vocab_size):
    model = Sequential()
    model.add(Embedding(vocab_size, config.output_dim))
    for i in range(config.n_lstm):
        model.add(LSTM(128))
    model.add(Dense(config.n_class, activation='softmax'))

    # define loss function
    if config.loss == 'binary_crossentropy':
        loss = losses.BinaryCrossentropy()
    elif config.loss == 'categorical_crossentropy':
        loss = losses.CategoricalCrossentropy()

    model.compile(optimizer=config.optimizer, loss=loss)

    es = EarlyStopping(monitor=config.metric,
                       mode='auto',
                       verbose=1,
                       patience=config.patience)
    mc = ModelCheckpoint(os.path.join(config.save_directory, config.ckpt_name),
                         monitor=config.metric,
                         mode='auto',
                         verbose=1,
                         save_best_only=config.best_only)
    callback = [es, mc]
    model.summary()
    return model, callback
예제 #23
0
    def __init__(self, vocab_size, embedding_size, input_length,
                 n_punct_classes) -> None:
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.input_length = input_length

        inputs = keras.Input((self.input_length, ))
        model_head = self.get_model_head()(inputs)
        has_punct_out = self.get_mlp_model(2, 'has_p')(model_head)
        punct_out = self.get_mlp_model(n_punct_classes, 'p')(model_head)
        # punct_mask = self.get_punct_mask(n_punct_classes, 'p')({'has_p': has_punct_out, 'p_inter': punct_out})
        start_quote_out = self.get_mlp_model(2, 'sq')(model_head)
        end_quote_out = self.get_mlp_model(2, 'eq')(model_head)
        dash_out = self.get_mlp_model(2, 'd')(model_head)
        self.model = keras.Model(inputs=inputs,
                                 outputs={
                                     'has_p': has_punct_out,
                                     'p': punct_out,
                                     'sq': start_quote_out,
                                     'eq': end_quote_out,
                                     'd': dash_out,
                                 })
        my_losses = {
            "has_p": losses.BinaryCrossentropy(from_logits=False),
            "p": losses.CategoricalCrossentropy(from_logits=False),
            "sq": losses.BinaryCrossentropy(from_logits=False),
            "eq": losses.BinaryCrossentropy(from_logits=False),
            "d": losses.BinaryCrossentropy(from_logits=False),
        }
        my_metrics = {
            "has_p":
            [metrics.Recall(class_id=1),
             metrics.Precision(class_id=1)],
            "p": [metrics.Recall(class_id=1),
                  metrics.Precision(class_id=1)],
            "sq": [metrics.Recall(class_id=1),
                   metrics.Precision(class_id=1)],
            "eq": [metrics.Recall(class_id=1),
                   metrics.Precision(class_id=1)],
            "d": [metrics.Recall(class_id=1),
                  metrics.Precision(class_id=1)],
        }
        self.model.compile(
            optimizer=optimizers.Adam(),  #learning_rate=0.001), 
            loss=my_losses,
            metrics=my_metrics)
        self.model.summary()
예제 #24
0
def generator_loss(fake_output, kl_fake_output):
    cross_entropy = losses.BinaryCrossentropy(from_logits=True)

    unifom_dist = tf.ones([BATCH_SIZE, num_classes]) * (1.0 / num_classes)
    kl = losses.KLDivergence()
    kl_loss = kl(kl_fake_output, unifom_dist) * num_classes

    return cross_entropy(tf.ones_like(fake_output), fake_output) + kl_loss
예제 #25
0
파일: app.py 프로젝트: gtg7784/GAN-study
def discriminator_loss(real, fake):
    cross_entropy = losses.BinaryCrossentropy(from_logits=True)

    real_loss = cross_entropy(tf.ones_like(real), real)
    fake_loss = cross_entropy(tf.ones_like(real), real)

    total_loss = real_loss + fake_loss

    return total_loss
예제 #26
0
def test_complex(knowledge_graph, sample_strategy):
    # this test creates a random untrained model and predicts every possible edge in the graph, and
    # compares that to a direct implementation of the scoring method in the paper
    gen = KGTripleGenerator(knowledge_graph, 3)

    # use a random initializer with a large positive range, so that any differences are obvious
    init = initializers.RandomUniform(-1, 1)
    complex_model = ComplEx(gen, 5, embeddings_initializer=init)
    x_inp, x_out = complex_model.in_out_tensors()

    model = Model(x_inp, x_out)
    if sample_strategy == "uniform":
        loss = tf_losses.BinaryCrossentropy(from_logits=True)
    else:
        loss = sg_losses.SelfAdversarialNegativeSampling()

    model.compile(loss=loss)

    every_edge = itertools.product(
        knowledge_graph.nodes(),
        knowledge_graph._edges.types.pandas_index,
        knowledge_graph.nodes(),
    )
    df = triple_df(*every_edge)

    # check the model can be trained on a few (uneven) batches
    model.fit(
        gen.flow(df.iloc[:7], negative_samples=2, sample_strategy=sample_strategy),
        validation_data=gen.flow(
            df.iloc[7:14], negative_samples=3, sample_strategy=sample_strategy
        ),
    )

    # compute the exact values based on the model by extracting the embeddings for each element and
    # doing the Re(<e_s, w_r, conj(e_o)>) inner product
    s_idx = knowledge_graph.node_ids_to_ilocs(df.source)
    r_idx = knowledge_graph._edges.types.to_iloc(df.label)
    o_idx = knowledge_graph.node_ids_to_ilocs(df.target)

    nodes, edge_types = complex_model.embeddings()
    # the rows correspond to the embeddings for the given edge, so we can do bulk operations
    e_s = nodes[s_idx, :]
    w_r = edge_types[r_idx, :]
    e_o = nodes[o_idx, :]
    actual = (e_s * w_r * e_o.conj()).sum(axis=1).real

    # predict every edge using the model
    prediction = model.predict(gen.flow(df))

    # (use an absolute tolerance to allow for catastrophic cancellation around very small values)
    np.testing.assert_allclose(prediction[:, 0], actual, rtol=1e-3, atol=1e-6)

    # the model is stateful (i.e. it holds the weights permanently) so the predictions with a second
    # 'build' should be the same as the original one
    model2 = Model(*complex_model.in_out_tensors())
    prediction2 = model2.predict(gen.flow(df))
    np.testing.assert_array_equal(prediction, prediction2)
def main():
    units = 64
    epochs = 5
    model = MyRNN(units)
    model.compile(optimizer=optimizers.Adam(lr=0.0005),
                  loss=losses.BinaryCrossentropy(),
                  metrics=['accuracy'],
                  experimental_run_tf_function=False)
    model.fit(train_db, epochs=epochs, validation_data=test_db)
    model.evaluate(test_db)
예제 #28
0
    def __init__(self, batch_size, iou_type):
        super(YOLOv4Loss, self).__init__(name="YOLOv4Loss")
        if iou_type == "ciou":
            self.bbox_xiou = bbox_ciou

        self.prob_cross_entropy = losses.BinaryCrossentropy(
            reduction=losses.Reduction.NONE)

        self.batch_size = batch_size
        self.while_cond = lambda i, iou: tf.less(i, self.batch_size)
예제 #29
0
def Simple(input_data):
    # Simple
    model = models.Sequential()
    model.add(
        layers.LSTM(256,
                    activation='tanh',
                    recurrent_activation='sigmoid',
                    recurrent_dropout=0,
                    unroll=False,
                    use_bias=True,
                    return_sequences=False,
                    time_major=False))
    model.add(layers.Dense(number_of_outputs, activation='sigmoid'))

    loss_fn = losses.BinaryCrossentropy()
    opt = optimizers.Adam(learning_rate=.005)

    myMetric = metrics.CategoricalCrossentropy()

    model.compile(optimizer=opt,
                  loss=loss_fn,
                  metrics=[myMetric, metrics.FalseNegatives()])

    inputPath = None
    outputPath = rootDir + getUniqueName("Simple")

    if inputPath is not None:
        print("Loading Model From A file")
        print(inputPath)
        model.load_weights(inputPath)

    class_weights = {}

    class_weights[0] = .15
    non_zero_class_weight = (1 - class_weights[0]) / (number_of_outputs - 1)
    for i in range(1, number_of_outputs):
        class_weights[i] = 1

    # cp_callback = callbacks.ModelCheckpoint(filepath=outputPath,
    #                                         save_weights_only=True,
    #                                         verbose=1)

    cp_callback = callbacks.ModelCheckpoint(filepath=outputPath,
                                            save_weights_only=True,
                                            save_best_only=True,
                                            verbose=1)

    print(outputPath)

    model.fit(
        input_data,
        epochs=50000,
        callbacks=[cp_callback],
        class_weight=class_weights,
    )
예제 #30
0
def configure_model(model):
    opt = optimizers.Adam(lr=1e-3)
    loss = losses.BinaryCrossentropy()
    # es = callbacks.EarlyStopping(monitor='val_auc', min_delta=0.001, patience=5,
    #                             verbose=1, mode='max', baseline=None, restore_best_weights=True)

    # rlr = callbacks.ReduceLROnPlateau(monitor='val_auc', factor=0.5,
    #                                 patience=3, min_lr=1e-6, mode='max', verbose=1)

    model.compile(loss=loss, optimizer=opt)
    return model