コード例 #1
0
    def critic_optimizer(self):
        discounted_reward = K.placeholder(shape=(None, ))

        value = self.critic.output

        loss = K.mean(K.square(discounted_reward - value))

        optimizer = Adam(lr=self.critic_lr)
        updates = optimizer.get_updates(self.critic.trainable_weights, [], loss)
        train = K.function([self.critic.input, discounted_reward], [], updates=updates)
        return train
コード例 #2
0
    def optimizer(self):
        action = K.placeholder(shape=[None, 5])
        discounted_rewards = K.placeholder(shape=[None, ])

        # Calculate cross entropy error function
        action_prob = K.sum(action * self.model.output, axis=1)
        cross_entropy = K.log(action_prob) * discounted_rewards
        loss = -K.sum(cross_entropy)

        # create training function
        optimizer = Adam(lr=self.learning_rate)
        updates = optimizer.get_updates(self.model.trainable_weights, [],
                                        loss)
        train = K.function([self.model.input, action, discounted_rewards], [],
                           updates=updates)

        return train
コード例 #3
0
    def actor_optimizer(self):
        action = K.placeholder(shape=(None, self.action_size))
        advantages = K.placeholder(shape=(None, ))

        policy = self.actor.output

        good_prob = K.sum(action * policy, axis=1)
        eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
        loss = -K.sum(eligibility)

        entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)

        actor_loss = loss + 0.01*entropy

        optimizer = Adam(lr=self.actor_lr)
        updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
        train = K.function([self.actor.input, action, advantages], [], updates=updates)
        return train
コード例 #4
0
model = Model(inputs=vgg.input, outputs=predictions)

# set trainable
for layer in model.layers[:19]:
    layer.trainable = False
for layer in model.layers[19:]:
    layer.trainable = True

layers = [(layer, layer.name, layer.trainable) for layer in model.layers]
print("layer, layer.name, layer.trainable")
for layer in layers:
    print(layer)

Epochs = 10
stepPerEpochs = int(trainSplSize / batchSize)
model.compile(Adam(lr=.003),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
print("model.summary()")
print(model.summary())
history = model.fit_generator(train_generator,
                              steps_per_epoch=stepPerEpochs,
                              epochs=Epochs,
                              validation_data=val_generator,
                              validation_steps=50,
                              verbose=1)

model.save('ASL_vgg16ft_r5.h5')

# results
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
コード例 #5
0
ファイル: began.py プロジェクト: aasensio/DNHazel
    def gan(self):
    # initialize a GAN trainer

    # this is the fastest way to train a GAN in Keras
    # two models are updated simutaneously in one pass

        noise = Input(shape=self.generator.input_shape[1:])
        real_data = Input(shape=self.discriminator.input_shape[1:])

        generated = self.generator(noise)
        gscore = self.discriminator(generated)
        rscore = self.discriminator(real_data)

        def log_eps(i):
            return K.log(i+1e-11)

        # single side label smoothing: replace 1.0 with 0.9
        dloss = - K.mean(log_eps(1-gscore) + .1 * log_eps(1-rscore) + .9 * log_eps(rscore))
        gloss = - K.mean(log_eps(gscore))

        Adam = tf.train.AdamOptimizer

        lr,b1 = 1e-4,.2 # otherwise won't converge.
        optimizer = Adam(lr)

        grad_loss_wd = optimizer.compute_gradients(dloss, self.discriminator.trainable_weights)
        update_wd = optimizer.apply_gradients(grad_loss_wd)

        grad_loss_wg = optimizer.compute_gradients(gloss, self.generator.trainable_weights)
        update_wg = optimizer.apply_gradients(grad_loss_wg)

        def get_internal_updates(model):
            # get all internal update ops (like moving averages) of a model
            inbound_nodes = model.inbound_nodes
            input_tensors = []
            for ibn in inbound_nodes:
                input_tensors+= ibn.input_tensors
            updates = [model.get_updates_for(i) for i in input_tensors]
            return updates

        other_parameter_updates = [get_internal_updates(m) for m in [self.discriminator,self.generator]]
        # those updates includes batch norm.

        print('other_parameter_updates for the models(mainly for batch norm):')
        print(other_parameter_updates)

        train_step = [update_wd, update_wg, other_parameter_updates]
        losses = [dloss,gloss]

        learning_phase = K.learning_phase()

        def gan_feed(sess,batch_image,z_input):
            # actual GAN trainer
            nonlocal train_step,losses,noise,real_data,learning_phase

            res = sess.run([train_step,losses],feed_dict={
            noise:z_input,
            real_data:batch_image,
            learning_phase:True,
            # Keras layers needs to know whether
            # this run is training or testring (you know, batch norm and dropout)
            })

            loss_values = res[1]
            return loss_values #[dloss,gloss]

        return gan_feed
コード例 #6
0
x = base_model.output
x = attach_attention_module(x, attention_module)
x = GlobalAveragePooling2D()(x)
x = Dropout(dropout)(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(dropout)(x)
predictions = Dense(no_of_classes, activation='softmax')(x)

lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=2,
                               min_lr=0.5e-6)

model = Model(input=base_model.input, output=predictions)
model.compile(optimizer=Adam(lr_schedule(0)),
              loss='categorical_crossentropy',
              metrics=['categorical_accuracy', 'accuracy'])

filepath = output_models_dir + experiment_name + "_{epoch:02d}_{val_acc:.2f}.h5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=False,
                             save_weights_only=False,
                             mode='auto',
                             period=1)
checkpoints = [checkpoint]
model.fit_generator(train_generator,
                    epochs=epochs,
                    steps_per_epoch=87,
コード例 #7
0
ファイル: job13.py プロジェクト: boringlee24/keras_old
    #model.add(layers.UpSampling2D((2,2)))
    #model.add(layers.UpSampling2D((2,2)))
    model.add(base_model)
    model.add(layers.Flatten())
    #model.add(layers.BatchNormalization())
    #model.add(layers.Dense(128, activation='relu'))
    #model.add(layers.Dropout(0.5))
    #model.add(layers.BatchNormalization())
    #model.add(layers.Dense(64, activation='relu'))
    #model.add(layers.Dropout(0.5))
    #model.add(layers.BatchNormalization())
    model.add(layers.Dense(
        10, activation='softmax'))  #, kernel_initializer='he_uniform'))

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=args_lr),
                  metrics=['accuracy'])

    #model.summary()
    print(model_type)

#pdb.set_trace()

current_epoch = 0

################### connects interrupt signal to the process #####################


def terminateProcess(signalNumber, frame):
    print('checkpointing the model triggered by kill -15 signal')
    # delete whatever checkpoint that already exists
コード例 #8
0
def model_train(model, batch_size=32, epochs=200, data_path = None, data_augmentation = True, subtract_pixel_mean = True):
    num_classes = 10

    (x_train, y_train), (x_test, y_test) = load_data(data_path)

    x_train = x_train.astype('float32') / 255
    x_test = x_test.astype('float32') / 255

    # If subtract pixel mean is enabled
    if subtract_pixel_mean:
        x_train_mean = np.mean(x_train, axis=0)
        x_train -= x_train_mean
        x_test -= x_train_mean

    # Convert class vectors to binary class matrices.
    y_train = np_utils.to_categorical(y_train, num_classes)
    y_test = np_utils.to_categorical(y_test, num_classes)

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=lr_schedule(0)),
                  metrics=['accuracy'])

    checkpoint = ModelCheckpoint(filepath='./weights/resnet20_cifar10_weights.{epoch:03d}.h5',
                                 monitor='loss',
                                 save_best_only=True,
                                 save_weights_only=True)
    lr_scheduler = LearningRateScheduler(lr_schedule)
    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)
    csv_logger = CSVLogger('./results/training_resnet20_cifar10.csv')

    callbacks = [checkpoint, lr_reducer, lr_scheduler, csv_logger]

    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(x_train, y_train,
                  batch_size=batch_size,
                  epochs=epochs,
                  validation_data=(x_test, y_test),
                  shuffle=True,
                  callbacks=callbacks)
    else:
        print('Using real-time data augmentation.')
        # This will do preprocessing and realtime data augmentation:
        datagen = ImageDataGenerator(featurewise_center=False,# set input mean to 0 over the dataset
                                     samplewise_center=False,# set each sample mean to 0
                                     featurewise_std_normalization=False,# divide inputs by std of dataset
                                     samplewise_std_normalization=False,# divide each input by its std
                                     zca_whitening=False,# apply ZCA whitening
                                     rotation_range=0,# randomly rotate images in the range (deg 0 to 180)
                                     width_shift_range=0.1,# randomly shift images horizontally
                                     height_shift_range=0.1,# randomly shift images vertically
                                     horizontal_flip=True,# randomly flip images
                                     vertical_flip=False)# randomly flip images

        # Compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)

        # Fit the model on the batches generated by datagen.flow().
        model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                            validation_data=(x_test, y_test),
                            epochs=epochs,
                            verbose=1,
                            workers=8,
                            use_multiprocessing=True,
                            callbacks=callbacks)
コード例 #9
0
def main():

    dataList = []
    #plt.imshow(preprocessImage(img, transform))
    #plt.show()

    #showSamplesCompared(img, transform, '', '', '')
    #plt.xkcd()
    np.random.seed(0)
    #data = pd.read_csv('/home/jjordening/data/dataset_sim_000_km_few_laps/driving_log.csv',
    #                   header = None, names=['center','left', 'right', 'steering','throttle', 'brake', 'speed', 'position', 'orientation'])
    #data['positionX'], data['positionY'], data['positionZ'] = data['position'].apply(retrieveVectors)
    #data['orientationX'], data['orientationY'], data['orientationZ'] = data['orientation'].apply(retrieveVectors)
    #data['center'] = '/home/jjordening/data/dataset_sim_000_km_few_laps/'+data['center'].apply(lambda x: x.strip())

    #data1 = pd.read_csv('/home/jjordening/data/udacity-day-01-exported-1102/output_processed.txt')
    #data1['path'] = '/home/jjordening/data/udacity-day-01-exported-1102/'+data1['path'].apply(lambda x: x.strip())

    #data2 = pd.read_csv('/home/jjordening/data/udacity-day-01-exported-1109/output_processed.txt')
    #data2['path'] = '/home/jjordening/data/udacity-day-01-exported-1109/'+data2['path'].apply(lambda x: x.strip())

    if ALL:
        data3 = pd.read_csv('/home/jjordening/data/1538/output_processed.txt')
        data3['path'] = '/home/jjordening/data/1538/' + data3['path'].apply(
            lambda x: x.strip())
        print('data3', np.max(data3['steering']), np.min(data3['steering']))
        dataList.append(data3)

        data4 = pd.read_csv('/home/jjordening/data/1543/output_processed.txt')
        data4['path'] = '/home/jjordening/data/1543/' + data4['path'].apply(
            lambda x: x.strip())
        print('data4', np.max(data4['steering']), np.min(data4['steering']))
        dataList.append(data4)

        data5 = pd.read_csv('/home/jjordening/data/1610/output_processed.txt')
        data5['path'] = '/home/jjordening/data/1610/' + data5['path'].apply(
            lambda x: x.strip())
        print('data5', np.max(data5['steering']), np.min(data5['steering']))
        dataList.append(data5)

        data6 = pd.read_csv('/home/jjordening/data/1645/output_processed.txt')
        data6['path'] = '/home/jjordening/data/1645/' + data6['path'].apply(
            lambda x: x.strip())
        print('data6', np.max(data6['steering']), np.min(data6['steering']))
        dataList.append(data6)

        data7 = pd.read_csv('/home/jjordening/data/1702/output_processed.txt')
        data7['path'] = '/home/jjordening/data/1702/' + data7['path'].apply(
            lambda x: x.strip())
        print('data7', np.max(data7['steering']), np.min(data7['steering']))
        dataList.append(data7)

        data8 = pd.read_csv('/home/jjordening/data/1708/output_processed.txt')
        data8['path'] = '/home/jjordening/data/1708/' + data8['path'].apply(
            lambda x: x.strip())
        print('data8', np.max(data8['steering']), np.min(data8['steering']))
        dataList.append(data8)

    data9 = pd.read_csv('/home/jjordening/data/1045/output_processed.txt')
    data9['path'] = '/home/jjordening/data/1045/' + data9['path'].apply(
        lambda x: x.strip())
    print('data9', np.max(data9['steering']), np.min(data9['steering']))
    dataList.append(data9)

    data10 = pd.read_csv('/home/jjordening/data/1050/output_processed.txt')
    data10['path'] = '/home/jjordening/data/1050/' + data10['path'].apply(
        lambda x: x.strip())
    print('data10', np.max(data10['steering']), np.min(data10['steering']))
    dataList.append(data10)

    data11 = pd.read_csv('/home/jjordening/data/1426/output_processed.txt')
    data11['path'] = '/home/jjordening/data/1426/' + data11['path'].apply(
        lambda x: x.strip())
    print('data11', np.max(data11['steering']), np.min(data11['steering']))
    dataList.append(data11)

    data12 = pd.read_csv('/home/jjordening/data/1516/output_processed.txt')
    data12['path'] = '/home/jjordening/data/1516/' + data12['path'].apply(
        lambda x: x.strip())
    print('data12', np.max(data12['steering']), np.min(data12['steering']))
    dataList.append(data12)

    data13 = pd.read_csv('/home/jjordening/data/1634/outputNew.txt')
    data13['path'] = '/home/jjordening/data/1634/' + data13['path'].apply(
        lambda x: x.strip())
    print('data12', np.max(data13['steering']), np.min(data13['steering']))
    dataList.append(data13)

    print(data9['brake'].unique())
    """data3 = pd.read_csv('/home/jjordening/data/dataset_polysync_1464552951979919/output_processed.txt', header = None, 
                        names = ['path','heading','longitude','latitude','quarternion0','quarternion1','quarternion2','quarternion3','vel0','vel1',
                                'vel2','steering','throttle','brake','speed'], skiprows = 500)
    data3 = data3.ix[0:1500].append(data3.ix[2600:])
    data3 = data3.ix[-500:]
    data3['path'] = '/home/jjordening/data/dataset_polysync_1464552951979919/'+data3['path'].apply(lambda x: x.strip())
    data3['throttle'] = 0"""

    #data['right'] = '../simulator/data/data/'+data['right'].apply(lambda x: x.strip())
    #data['left'] = '../simulator/data/data/'+data['left'].apply(lambda x: x.strip())
    angles = []
    dataNew = pd.DataFrame()
    offset = 0
    #print(data3['steering'])
    #print(data1['longitude'])
    """for dat in [data3,data4,data5,data6,data7]:
        angles.extend(dat['steering'].values)
        for row in dat.iterrows():
            dat.loc[row[0], 'angleIndex'] = row[0]+ offset
            #images.append(preprocessImage(mpimg.imread(row[1]['center'].strip())))
            #images.append(transform(mpimg.imread(row[1]['center'].strip())))
        offset+=100
        dataNew = dataNew.append(dat.ix[100:])"""
    #dataNew['throttle'] = dataNew['accel'].apply(lambda x: max(x,0)/np.max(dataNew['accel']))
    for dat in dataList:
        dataNew = dataNew.append(dat.ix[30:])
        del dat

    print('Len dataNew: ', len(dataNew))
    dataNew = dataNew.loc[pd.notnull(dataNew['throttle'])]
    dataNew = dataNew.loc[pd.notnull(dataNew['brake'])]
    dataNew = dataNew.loc[pd.notnull(dataNew['steering'])]
    print('Len dataNew: ', len(dataNew))
    print(np.max(dataNew['throttle']), np.min(dataNew['throttle']))
    # TODO: Normalisation of position and orientation<
    #del data3,data4,data5,data6,data7
    print(len(dataNew), dataNew.columns)
    print(np.histogram(dataNew['throttle'], bins=31))
    hist, edges = np.histogram(dataNew['steering'], bins=31)
    print(hist, edges, len(dataNew))
    hist = 1. / np.array([
        val if val > len(dataNew) / 40. else len(dataNew) / 40. for val in hist
    ])
    hist *= len(dataNew) / 40.
    print(hist, edges, len(dataNew))
    dataNew['norm'] = dataNew['steering'].apply(
        lambda x: getNormFactor(x, hist, edges))
    print(dataNew['norm'].unique())
    print(np.min(dataNew['steering']), np.max(dataNew['steering']))
    print(np.min(dataNew['throttle']), np.max(dataNew['throttle']))
    print(np.min(dataNew['brake']), np.max(dataNew['brake']))

    dataNew['speed'] = dataNew['speed'].apply(lambda x: x / 40. - 1)

    dataNew = shuffle(dataNew, random_state=0)
    #plt.figure(1, figsize=(8,4))
    #plt.hist(dataNew['steering'], bins =31)

    #plt.show()

    dataTrain, dataTest = train_test_split(dataNew, test_size=.1)
    dataTrain, dataVal = train_test_split(dataTrain, test_size=.1)

    file = open(dataTrain['path'].iloc[0], 'rb')
    # Use the PIL raw decoder to read the data.
    #   - the 'F;16' informs the raw decoder that we are reading a little endian, unsigned integer 16 bit data.
    img = np.array(Image.frombytes('RGB', [960, 480], file.read(), 'raw'))
    file.close()

    imShape = preprocessImage(img).shape
    print(imShape)

    batchSize = 128
    epochBatchSize = 8192
    trainGenerator = generateImagesFromPaths(dataTrain, batchSize, imShape,
                                             [3], True)
    t = time.time()
    trainGenerator.__next__()
    print("Time to build train batch: ", time.time() - t)
    valGenerator = generateImagesFromPaths(dataVal, batchSize, imShape, [3])
    t = time.time()
    valGenerator.__next__()
    print("Time to build validation batch: ", time.time() - t)
    stopCallback = EarlyStopping(monitor='val_loss',
                                 patience=20,
                                 min_delta=0.01)
    checkCallback = ModelCheckpoint('psyncModel.ckpt',
                                    monitor='val_loss',
                                    save_best_only=True)
    visCallback = TensorBoard(log_dir='./logs/%d' % int(time.time()),
                              histogram_freq=0,
                              write_graph=True,
                              write_images=True)
    model = load_model('psyncModelBase.h5',
                       custom_objects={'customLoss': customLoss})
    name = 'psyncPosNet%d.h5' % time.time()
    if LOADMODEL:
        endModel = load_model('psyncPosNet.h5',
                              custom_objects={'customLoss': customLoss})
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=100,
            samples_per_epoch=epochBatchSize,
            max_q_size=8,
            validation_data=valGenerator,
            nb_val_samples=len(dataVal),
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('psyncModel.ckpt')
        endModel.save(name)

    else:
        inpC = Input(shape=(imShape[0], imShape[1], imShape[2]),
                     name='inputImg')
        xC = Convolution2D(24,
                           8,
                           8,
                           border_mode='valid',
                           subsample=(2, 2),
                           name='conv1')(inpC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(36,
                           5,
                           5,
                           border_mode='valid',
                           subsample=(2, 2),
                           name='conv2')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(48,
                           5,
                           5,
                           border_mode='valid',
                           subsample=(2, 2),
                           name='conv3')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(64, 5, 5, border_mode='valid', name='conv4')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(
            64,
            5,
            5,
            border_mode='valid',
            name='conv5',
        )(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xOut = Flatten()(xC)
        print(xOut.get_shape())

        #Cut for transfer learning is here:
        speedInp = Input(shape=(1, ), name='inputSpeed')

        xOut = Lambda(lambda x: K.concatenate(x, axis=1))([xOut, speedInp])
        xOut = Dense(200)(xOut)
        xOut = BatchNormalization()(xOut)
        xOut = Activation('elu')(xOut)
        xOut = Dense(200)(xOut)
        xOut = BatchNormalization()(xOut)
        xEnd = Activation('elu')(xOut)

        xOutSteer = Dense(100)(xEnd)
        xOutSteer = BatchNormalization()(xOutSteer)
        xOutSteer = Activation('elu')(xOutSteer)
        xOutSteer = Dropout(.2)(xOutSteer)
        xOutSteer = Dense(30)(xOutSteer)
        xOutSteer = BatchNormalization()(xOutSteer)
        xOutSteer = Activation('elu')(xOutSteer)
        xOutSteer = Dense(1, activation='sigmoid')(xOutSteer)
        xOutSteer = Lambda(lambda x: x * 10 - 5, name='outputSteer')(xOutSteer)

        xOutThr = Dense(100, name='thr1')(xEnd)
        xOutThr = BatchNormalization(name='thr2')(xOutThr)
        xOutThr = Activation('elu')(xOutThr)
        xOutThr = Dropout(.2)(xOutThr)
        xOutThr = Dense(30, name='thr3')(xOutThr)
        xOutThr = BatchNormalization(name='thr4')(xOutThr)
        xOutThr = Activation('elu')(xOutThr)
        xOutThr = Dense(1, activation='sigmoid', name='thr5')(xOutThr)
        xOutThr = Lambda(lambda x: x * 2 - 1, name='outputThr')(xOutThr)

        xOutPos = Dropout(.3)(xEnd)
        xOutPos = Dense(1, activation='sigmoid', name='pos5')(xOutPos)
        xOutPos = Lambda(lambda x: x * 2 - 1, name='outputPos')(xOutPos)

        endModel = Model((inpC, speedInp), (xOutSteer, xOutThr, xOutPos))
        endModel.compile(optimizer=Adam(lr=1e-4), loss='mse', metrics=['mse'])
        #endModel.fit_generator(trainGenerator, callbacks = [visCallback],
        #                       nb_epoch=50, samples_per_epoch=epochBatchSize,
        #                       max_q_size=24, nb_worker=8, pickle_safe=True)
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=30,
            samples_per_epoch=epochBatchSize,
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('psyncModel.ckpt')
        endModel.save(name)
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=30,
            samples_per_epoch=epochBatchSize,
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('psyncModel.ckpt')
        endModel.save(name)
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=40,
            samples_per_epoch=epochBatchSize,
            max_q_size=24,
            validation_data=valGenerator,
            nb_val_samples=len(dataVal),
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('psyncModel.ckpt')
        endModel.save(name)

    endModel = load_model(name, custom_objects={'customLoss': customLoss})

    print(endModel.evaluate_generator(valGenerator, val_samples=len(dataVal)))
    print(
        endModel.evaluate_generator(generateImagesFromPaths(
            dataTest, batchSize, imShape, [3], angles),
                                    val_samples=len(dataTest)))
コード例 #10
0
        outputs.append(out)

    # Step 3: Create model instance taking three inputs and returning the list of outputs. (≈ 1 line)
    model = Model(inputs=[X, s0, c0], outputs=outputs)

    ### END CODE HERE ###

    return model


#Create the model

model = model(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab))
model.summary()

opt = Adam()
model.compile(optimizer=opt,
              loss="categorical_crossentropy",
              metrics=["accuracy"])

s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
outputs = list(Yoh.swapaxes(0, 1))

model.fit([Xoh, s0, c0], outputs, epochs=1, batch_size=100)
model.load_weights('models/model.h5')

EXAMPLES = [
    '3 May 1979', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007',
    'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001', '1 March 2001'
]
コード例 #11
0
def get_net():
    inputs = Input(shape=(img_h, img_w, N_channels))
    # 网络结构定义
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(inputs)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool4)
    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(drop5))
    merge6 = concatenate([drop4, up6], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv6))
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv7))
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv8))
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    conv9 = Conv2D(2,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)
    conv10 = Conv2D(C, (1, 1),
                    activation='relu',
                    kernel_initializer='he_normal')(conv9)

    reshape = Reshape((C, img_h * img_w),
                      input_shape=(C, img_h, img_w))(conv10)
    reshape = Permute((2, 1))(reshape)

    activation = Activation('softmax')(reshape)

    model = Model(input=inputs, output=activation)

    model.compile(optimizer=Adam(lr=1.0e-4),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
コード例 #12
0
def training_game():
    env = Environment(
        map_name="ForceField",
        visualize=True,
        game_steps_per_episode=150,
        agent_interface_format=features.AgentInterfaceFormat(
            feature_dimensions=features.Dimensions(screen=64, minimap=32)))

    input_shape = (_SIZE, _SIZE, 1)
    nb_actions = 12  # Number of actions

    model = neural_network_model(input_shape, nb_actions)
    memory = SequentialMemory(limit=5000, window_length=_WINDOW_LENGTH)

    processor = SC2Proc()

    # Policy

    policy = LinearAnnealedPolicy(EpsGreedyQPolicy(),
                                  attr="eps",
                                  value_max=1,
                                  value_min=0.2,
                                  value_test=.0,
                                  nb_steps=1e2)

    # Agent

    dqn = DQNAgent(
        model=model,
        nb_actions=nb_actions,
        memory=memory,
        enable_double_dqn=True,
        enable_dueling_network=True,
        # 2019-07-12 GU Zhan (Sam)
        #                   nb_steps_warmup=500, target_model_update=1e-2, policy=policy,
        nb_steps_warmup=2000,
        target_model_update=1e-2,
        policy=policy,
        batch_size=150,
        processor=processor,
        delta_clip=1)

    dqn.compile(Adam(lr=.001), metrics=["mae", "acc"])

    # Tensorboard callback

    timestamp = f"{datetime.datetime.now():%Y-%m-%d %I:%M%p}"
    # 2019-07-12 GU Zhan (Sam) folder name for Lunux:
    #    callbacks = keras.callbacks.TensorBoard(log_dir='./Graph/'+ timestamp, histogram_freq=0,
    #                                write_graph=True, write_images=False)

    # 2019-07-12 GU Zhan (Sam) folder name for Windows:
    callbacks = keras.callbacks.TensorBoard(log_dir='.\Graph\issgz',
                                            histogram_freq=0,
                                            write_graph=True,
                                            write_images=False)

    # Save the parameters and upload them when needed

    name = "agent"
    w_file = "dqn_{}_weights.h5f".format(name)
    check_w_file = "train_w" + name + "_weights.h5f"

    if SAVE_MODEL:
        check_w_file = "train_w" + name + "_weights_{step}.h5f"

    log_file = "training_w_{}_log.json".format(name)

    if LOAD_MODEL:
        dqn.load_weights(w_file)

    class Saver(Callback):
        def on_episode_end(self, episode, logs={}):
            if episode % 200 == 0:
                self.model.save_weights(w_file, overwrite=True)

    s = Saver()
    logs = FileLogger('DQN_Agent_log.csv', interval=1)

    #    dqn.fit(env, callbacks=[callbacks,s,logs], nb_steps=600, action_repetition=2,
    dqn.fit(env,
            callbacks=[callbacks, s, logs],
            nb_steps=10000,
            action_repetition=2,
            log_interval=1e4,
            verbose=2)

    dqn.save_weights(w_file, overwrite=True)
    dqn.test(env, action_repetition=2, nb_episodes=30, visualize=False)
コード例 #13
0
                                      aspect_ratios_per_layer=None,
                                      two_boxes_for_ar1=two_boxes_for_ar1,
                                      limit_boxes=limit_boxes,
                                      variances=variances,
                                      coords=coords,
                                      normalize_coords=normalize_coords)
model.load_weights('./ssd7_0_weights.h5')
#model = load_model('./ssd7_0.h5')

### Set up training

batch_size = 4

# 3: Instantiate an Adam optimizer and the SSD loss function and compile the model

adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=5e-05)

ssd_loss = SSDLoss(neg_pos_ratio=3, n_neg_min=0, alpha=1.0)

model.compile(optimizer=adam, loss=ssd_loss.compute_loss)

# 4: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function 

ssd_box_encoder = SSDBoxEncoder(img_height=img_height,
                                img_width=img_width,
                                n_classes=n_classes, 
                                predictor_sizes=predictor_sizes,
                                min_scale=min_scale,
                                max_scale=max_scale,
                                scales=scales,
                                aspect_ratios_global=aspect_ratios,
コード例 #14
0
def multihead_seq_model(tasks,
                        filters,
                        n_dil_layers,
                        conv1_kernel_size,
                        tconv_kernel_size,
                        b_loss_weight=1,
                        c_loss_weight=1,
                        p_loss_weight=1,
                        c_splines=20,
                        p_splines=0,
                        merge_profile_reg=False,
                        lr=0.004,
                        padding='same',
                        batchnorm=False,
                        use_bias=False,
                        n_profile_bias_tracks=2,
                        n_bias_tracks=2,
                        seqlen=None,
                        skip_type='residual'):
    from basepair.seqmodel import SeqModel
    from basepair.layers import DilatedConv1D, DeConv1D, GlobalAvgPoolFCN
    from basepair.metrics import BPNetMetricSingleProfile
    from basepair.heads import ScalarHead, ProfileHead
    from gin_train.metrics import ClassificationMetrics, RegressionMetrics
    from basepair.losses import mc_multinomial_nll_2, CountsMultinomialNLL
    from basepair.exp.paper.config import peak_pred_metric
    from basepair.activations import clipped_exp
    from basepair.functions import softmax

    assert p_loss_weight >= 0
    assert c_loss_weight >= 0
    assert b_loss_weight >= 0

    # Heads -------------------------------------------------
    heads = []
    # Profile prediction
    if p_loss_weight > 0:
        if not merge_profile_reg:
            heads.append(
                ProfileHead(target_name='{task}/profile',
                            net=DeConv1D(n_tasks=2,
                                         filters=filters,
                                         tconv_kernel_size=tconv_kernel_size,
                                         padding=padding,
                                         n_hidden=0,
                                         batchnorm=batchnorm),
                            loss=mc_multinomial_nll_2,
                            loss_weight=p_loss_weight,
                            postproc_fn=softmax,
                            use_bias=use_bias,
                            bias_input='bias/{task}/profile',
                            bias_shape=(None, n_profile_bias_tracks),
                            metric=peak_pred_metric))
        else:
            heads.append(
                ProfileHead(
                    target_name='{task}/profile',
                    net=DeConv1D(
                        n_tasks=2,
                        filters=filters,
                        tconv_kernel_size=tconv_kernel_size,
                        padding=padding,
                        n_hidden=1,  # use 1 hidden layer in that case
                        batchnorm=batchnorm),
                    activation=clipped_exp,
                    loss=CountsMultinomialNLL(2, c_task_weight=c_loss_weight),
                    loss_weight=p_loss_weight,
                    bias_input='bias/{task}/profile',
                    use_bias=use_bias,
                    bias_shape=(None, n_profile_bias_tracks),
                    metric=BPNetMetricSingleProfile(
                        count_metric=RegressionMetrics(),
                        profile_metric=peak_pred_metric)))
            c_loss_weight = 0  # don't need to use the other count loss

    # Count regression
    if c_loss_weight > 0:
        heads.append(
            ScalarHead(
                target_name='{task}/counts',
                net=GlobalAvgPoolFCN(n_tasks=2,
                                     n_splines=p_splines,
                                     batchnorm=batchnorm),
                activation=None,
                loss='mse',
                loss_weight=c_loss_weight,
                bias_input='bias/{task}/counts',
                use_bias=use_bias,
                bias_shape=(n_bias_tracks, ),
                metric=RegressionMetrics(),
            ))

    # Binary classification
    if b_loss_weight > 0:
        heads.append(
            ScalarHead(
                target_name='{task}/class',
                net=GlobalAvgPoolFCN(n_tasks=1,
                                     n_splines=c_splines,
                                     batchnorm=batchnorm),
                activation='sigmoid',
                loss='binary_crossentropy',
                loss_weight=b_loss_weight,
                metric=ClassificationMetrics(),
            ))
    # -------------------------------------------------
    m = SeqModel(
        body=DilatedConv1D(filters=filters,
                           conv1_kernel_size=conv1_kernel_size,
                           n_dil_layers=n_dil_layers,
                           padding=padding,
                           batchnorm=batchnorm,
                           skip_type=skip_type),
        heads=heads,
        tasks=tasks,
        optimizer=Adam(lr=lr),
        seqlen=seqlen,
    )
    return m
コード例 #15
0
    def __init__(self,
                 numOfClasses,
                 batch_size=32,
                 grayScale=True,
                 leaky=False,
                 swish=False,
                 attention_middle=False,
                 attention_last=False,
                 conditional=False):
        self.img_rows = 64
        self.img_cols = 64
        if grayScale == True:
            self.channels = 1
        else:
            self.channels = 3
        self.img_shape = (self.img_rows, self.img_cols, self.channels)
        #self.latent_dim = 100
        self.num_classes = numOfClasses
        self.latent_dim = 80
        self.losslog = []
        self.batch_size = batch_size
        self.grayScale = grayScale
        self.leaky = leaky
        self.swish = swish
        self.attention_middle = attention_middle
        self.attention_last = attention_last
        self.conditional = conditional

        self.imagPath = "images"

        # Following parameter and optimizer set as recommended in paper
        self.n_critic = 2  #original 2

        if self.leaky:
            optimizer_gen = Adam(lr=5e-05,
                                 beta_1=0.0,
                                 beta_2=0.7,
                                 epsilon=1e-08)  # working
            optimizer_cri = Adam(lr=2e-04,
                                 beta_1=0.0,
                                 beta_2=0.7,
                                 epsilon=1e-08)  # working
        elif self.swish:
            optimizer_gen = Adam(lr=5e-05,
                                 beta_1=0.0,
                                 beta_2=0.7,
                                 epsilon=1e-08)  # working
            optimizer_cri = Adam(lr=2e-04,
                                 beta_1=0.0,
                                 beta_2=0.7,
                                 epsilon=1e-08)  # working
        else:
            optimizer_gen = Adam(lr=5e-05,
                                 beta_1=0.0,
                                 beta_2=0.7,
                                 epsilon=1e-08)  # working
            optimizer_cri = Adam(lr=2e-04,
                                 beta_1=0.0,
                                 beta_2=0.7,
                                 epsilon=1e-08)  # working

        # Build the generator and critic
        self.generator = self.build_generator()
        self.critic = self.build_critic()

        #keras2ascii(self.generator)
        #plot_model(self.generator, to_file='generator_model.png')

        #keras2ascii(self.critic)
        #plot_model(self.critic, to_file='generator_model.png')

        #-------------------------------
        # Construct Computational Graph
        #       for the Critic
        #-------------------------------

        # Freeze generator's layers while training critic
        self.generator.trainable = False

        # Image input (real sample)
        real_img = Input(shape=self.img_shape)

        # Noise input
        z_disc = Input(shape=(self.latent_dim, ))
        # ADDING LABEL TO MAKE IT DWGAN
        if self.conditional:
            label = Input(shape=(1, ))
            # Generate image based of noise (fake sample)
            fake_img = self.generator([z_disc, label])
            # Discriminator determines validity of the real and fake images
            fake = self.critic([fake_img, label])

            valid = self.critic([real_img, label])
            # Construct weighted average between real and fake images
            interpolated_img = RandomWeightedAverage()([real_img, fake_img])
            # Determine validity of weighted sample
            validity_interpolated = self.critic([interpolated_img, label])

            # Use Python partial to provide loss function with additional
            # 'averaged_samples' argument
            partial_gp_loss = partial(self.gradient_penalty_loss,
                                      averaged_samples=interpolated_img)
            partial_gp_loss.__name__ = 'gradient_penalty'  # Keras requires function names

            self.critic_model = Model(
                inputs=[real_img, label, z_disc],
                outputs=[valid, fake, validity_interpolated])
            self.critic_model.compile(loss=[
                self.wasserstein_loss, self.wasserstein_loss, partial_gp_loss
            ],
                                      optimizer=optimizer_cri,
                                      loss_weights=[1, 1, 10])
        else:
            fake_img = self.generator(z_disc)
            # Discriminator determines validity of the real and fake images
            fake = self.critic(fake_img)
            valid = self.critic(real_img)

            # Construct weighted average between real and fake images
            interpolated_img = RandomWeightedAverage()([real_img, fake_img])
            # Determine validity of weighted sample
            validity_interpolated = self.critic(interpolated_img)

            # Use Python partial to provide loss function with additional
            # 'averaged_samples' argument
            partial_gp_loss = partial(self.gradient_penalty_loss,
                                      averaged_samples=interpolated_img)
            partial_gp_loss.__name__ = 'gradient_penalty'  # Keras requires function names

            self.critic_model = Model(
                inputs=[real_img, z_disc],
                outputs=[valid, fake, validity_interpolated])

            self.critic_model.compile(loss=[
                self.wasserstein_loss, self.wasserstein_loss, partial_gp_loss
            ],
                                      optimizer=optimizer_cri,
                                      loss_weights=[1, 1, 10])

        #-------------------------------
        # Construct Computational Graph
        #         for Generator
        #-------------------------------

        # For the generator we freeze the critic's layers
        self.critic.trainable = False
        self.generator.trainable = True

        # Sampled noise for input to generator
        z_gen = Input(shape=(self.latent_dim, ))

        if self.conditional:
            # add label to the input
            label = Input(shape=(1, ))
            # Generate images based of noise
            #img = self.generator(z_gen)
            img = self.generator([z_gen, label])
            # Discriminator determines validity
            valid = self.critic([img, label])
            # Defines generator model
            self.generator_model = Model([z_gen, label], valid)
        else:
            # Generate images based of noise
            img = self.generator(z_gen)
            # Discriminator determines validity
            valid = self.critic(img)
            # Defines generator model
            self.generator_model = Model(z_gen, valid)

        self.generator_model.compile(loss=self.wasserstein_loss,
                                     optimizer=optimizer_gen)
コード例 #16
0
x_fake = g_model(z_in)
x_inter = Lambda(interpolating)([x_in, x_fake])
x_real_score = d_model(x_in)
x_fake_score = d_model(x_fake)
x_inter_score = d_model(x_inter)

grads = K.gradients(x_inter_score, [x_inter])[0]
grad_norms = K.sqrt(K.sum(grads**2, range(1, K.ndim(grads))) + 1e-9)

d_train_model = Model([x_in, z_in],
                      [x_real_score, x_fake_score, x_inter_score])

d_loss = K.mean(-(x_real_score - x_fake_score)) + 10 * K.mean(
    (grad_norms - 1)**2)
d_train_model.add_loss(d_loss)
d_train_model.compile(optimizer=Adam(2e-4, 0.5))

# 整合模型(训练生成器)
g_model.trainable = True
d_model.trainable = False
x_fake_score = d_model(g_model(z_in))

g_train_model = Model(z_in, x_fake_score)
g_train_model.add_loss(K.mean(-x_fake_score))
g_train_model.compile(optimizer=Adam(2e-4, 0.5))

# 检查模型结构
d_train_model.summary()
g_train_model.summary()

env.seed(123)
nb_actions = env.action_space.n

model = Sequential()
model.add(Flatten(input_shape=(1, ) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())

memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model,
               nb_actions=nb_actions,
               memory=memory,
               nb_steps_warmup=10,
               target_model_update=1e-2,
               policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])

dqn.fit(env, nb_steps=1000, visualize=True, verbose=2)

dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)

dqn.test(env, nb_episodes=5, visualize=True)
コード例 #18
0
def unet_model_3d(input_shape,
                  pool_size=(2, 2, 2),
                  n_labels=1,
                  initial_learning_rate=0.00001,
                  deconvolution=False,
                  depth=4,
                  n_base_filters=32,
                  include_label_wise_dice_coefficients=False,
                  metrics=dice_coefficient,
                  batch_normalization=False,
                  activation_name="sigmoid"):
    """
    Builds the 3D UNet Keras model.f
    :param metrics: List metrics to be calculated during model training (default is dice coefficient).
    :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
    coefficient for each label as metric.
    :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
    layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
    to train the model.
    :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
    layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
    :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
    divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
    :param pool_size: Pool size for the max pooling operations.
    :param n_labels: Number of binary labels that the model is learning.
    :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
    :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
    increases the amount memory required during training.
    :return: Untrained 3D UNet Model
    """
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    # add levels with max pooling
    for layer_depth in range(depth):
        layer1 = create_convolution_block(
            input_layer=current_layer,
            n_filters=n_base_filters * (2**layer_depth),
            batch_normalization=batch_normalization)
        layer2 = create_convolution_block(
            input_layer=layer1,
            n_filters=n_base_filters * (2**layer_depth) * 2,
            batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    # add levels with up-convolution or up-sampling
    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = get_up_convolution(
            pool_size=pool_size,
            deconvolution=deconvolution,
            n_filters=current_layer._keras_shape[1])(current_layer)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1]._keras_shape[1],
            input_layer=concat,
            batch_normalization=batch_normalization)
        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1]._keras_shape[1],
            input_layer=current_layer,
            batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation(activation_name)(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    if include_label_wise_dice_coefficients and n_labels > 1:
        label_wise_dice_metrics = [
            get_label_dice_coefficient_function(index)
            for index in range(n_labels)
        ]
        if metrics:
            metrics = metrics + label_wise_dice_metrics
        else:
            metrics = label_wise_dice_metrics

    model.compile(optimizer=Adam(lr=initial_learning_rate),
                  loss=dice_coefficient_loss,
                  metrics=metrics)
    return model
コード例 #19
0
ファイル: train.py プロジェクト: ShanJuQiuMing-S/yolo4_tiny
        if Cosine_scheduler:
            # 预热期
            warmup_epoch = int((Freeze_epoch-Init_epoch)*0.2)
            # 总共的步长
            total_steps = int((Freeze_epoch-Init_epoch) * num_train / batch_size)
            # 预热步长
            warmup_steps = int(warmup_epoch * num_train / batch_size)
            # 学习率
            reduce_lr = WarmUpCosineDecayScheduler(learning_rate_base=learning_rate_base,
                                                        total_steps=total_steps,
                                                        warmup_learning_rate=1e-4,
                                                        warmup_steps=warmup_steps,
                                                        hold_base_rate_steps=num_train,
                                                        min_learn_rate=1e-6
                                                        )
            model.compile(optimizer=Adam(), loss={'yolo_loss': lambda y_true, y_pred: y_pred})
        else:
            reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, verbose=1)
            model.compile(optimizer=Adam(learning_rate_base), loss={'yolo_loss': lambda y_true, y_pred: y_pred})

        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
        model.fit_generator(data_generator(lines[:num_train], batch_size, input_shape, anchors, num_classes, mosaic=mosaic),
                steps_per_epoch=max(1, num_train//batch_size),
                validation_data=data_generator(lines[num_train:], batch_size, input_shape, anchors, num_classes, mosaic=False),
                validation_steps=max(1, num_val//batch_size),
                epochs=Freeze_epoch,
                initial_epoch=Init_epoch,
                callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    for i in range(freeze_layers): model_body.layers[i].trainable = True
        x = Dense(256, activation='relu')(x)

        predictions = Dense(num_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)

        base_model.trainable = True
        set_trainable = False
        for layer in base_model.layers:
            if layer.name == 'conv2d_90':
                set_trainable = True
            if set_trainable:
                layer.trainable = True
            else:
                layer.trainable = False

        model.compile(optimizer=Adam(lr=0.00001),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()

        # checkpoints
        stem_id = "7_2_1_InceptionV3-unfrozen1-mask-imagenet-imgsize"
        filepath = stem_id + str(
            size) + ".best_{epoch:02d}-{val_accuracy:.2f}.hdf5"
        checkpoint = ModelCheckpoint(filepath,
                                     monitor='val_accuracy',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max')
        filepath = stem_id + str(size) + ".last_auto4.hdf5"
        checkpoint_all = ModelCheckpoint(filepath,
コード例 #21
0
 total_weight_per_user = train.nnz / float(num_users)
 train_csr, user_weights = train.tocsr(), []
 for u in xrange(num_users):
     #user_weights.append(total_weight_per_user / float(train_csr.getrow(u).nnz))
     user_weights.append(1)
 print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d" 
       %(time()-t1, num_users, num_items, train.nnz, len(testRatings)))
 
 # Build model
 model = get_model(num_users, num_items, mf_dim, layers, reg_layers, reg_mf, enable_dropout)
 if learner.lower() == "adagrad": 
     model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')
 elif learner.lower() == "rmsprop":
     model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')
 elif learner.lower() == "adam":
     model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')
 else:
     model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')
 
 # Load pretrain model
 if mf_pretrain != '' and mlp_pretrain != '':
     gmf_model = GMFlogistic.get_model(num_users,num_items,mf_dim)
     gmf_model.load_weights(mf_pretrain)
     mlp_model = MLPlogistic.get_model(num_users,num_items, layers, reg_layers)
     mlp_model.load_weights(mlp_pretrain)
     model = load_pretrain_model(model, gmf_model, mlp_model, len(layers))
     print("Load pretrained GMF (%s) and MLP (%s) models done. " %(mf_pretrain, mlp_pretrain))
     
 # Init performance
 (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
 hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
model.add(Dense(50, activation = 'elu', name = 'FC2'))
model.add(Dropout(0.2))
model.add(Dense(10, activation = 'elu', name = 'FC3'))
model.add(Dropout(0.2))
model.add(Dense(1, activation = 'elu', name = 'FC4'))
model.summary()

# checkpoints
checkpoint = ModelCheckpoint("./model_nvidia- {epoch:003d}.h5",
                             monitor = 'val_loss',
                             verbose = 1,
                             save_best_only = True,
                             mode = 'auto')

# compile
opt = Adam(lr = 0.001)
model.compile(optimizer = opt, loss = 'mse', metrics = [])

class LifecycleCallback(keras.callbacks.Callback):
    
    def on_epoch_begin(self, epoch, logs = {}):
        pass
    
    def on_epoch_end(self, epoch, logs = {}):
        global threshold
        threshold = 1 / (epoch  + 1)
        
    def on_batch_begin(self, batch, logs = {}):
        pass
    
    def on_batch_end(self, batch, logs = {}):
コード例 #23
0
ファイル: evaluate_dl6.py プロジェクト: enricca/TFM
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Runs a trained in a preprocessed dataset (npz)')
    parser.add_argument('-input_model', help='path of the model')
    parser.add_argument('-input_data', help='path of the input data')
    parser.add_argument('-output_csv', help='path of the output csv')
    parser.add_argument('--roi_statistics_csv', default = '', help=' (OPTIONAL) Annotate statistics')
    parser.add_argument('--threshold', type = float, default = -1, help=' (OPTIONAL) Discard patches with less than that.')
    parser.add_argument('--overwrite',  action='store_true', help=' (OPTIONAL) Overwrite Default none.')
    parser.add_argument('--convertToFloat',  action='store_true', help=' (OPTIONAL) Transform the images to float. Dunno why, but some networks only work with one kind (Mingot ones with float, new ones with int16).')
    args = parser.parse_args()

    #Load the network
    K.set_image_dim_ordering('th')
    model = ResnetBuilder().build_resnet_50((3,40,40),1)
    model.compile(optimizer=Adam(lr=1e-4), loss='mse', metrics=['mse'])
    logging.info('Loading existing model %s...' % args.input_model)
    model.load_weights(args.input_model)
    
    #Create a dataframe for the ROIS
    stats_roi_pd = pd.DataFrame()
    
    #Get the patient files
    if os.path.isdir(args.input_data):
        patientFiles = map(lambda s: os.path.join(args.input_data, s)  ,filter(lambda s: s.endswith('.npz'), os.listdir(args.input_data)))
    else:
        patientFiles = []
        with open(args.input_data, 'r') as f:
            for line in f:
                patientFiles.append(line.strip())
コード例 #24
0
def train_result(x_train, y_train, x_test, y_test, name, Results):
    inputs = Input((224, 224, 1))

    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

    up6 = concatenate([
        Conv2DTranspose(256,
                        (2, 2), strides=(2, 2), padding='same')(conv5), conv4
    ],
                      axis=3)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

    up7 = concatenate([
        Conv2DTranspose(128,
                        (2, 2), strides=(2, 2), padding='same')(conv6), conv3
    ],
                      axis=3)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

    up8 = concatenate([
        Conv2DTranspose(64,
                        (2, 2), strides=(2, 2), padding='same')(conv7), conv2
    ],
                      axis=3)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    up9 = concatenate([
        Conv2DTranspose(32,
                        (2, 2), strides=(2, 2), padding='same')(conv8), conv1
    ],
                      axis=3)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=[inputs], outputs=[conv10])
    model.compile(optimizer=Adam(lr=1e-5),
                  loss=jaccard_distance_loss,
                  metrics=[dice_coef])
    history = model.fit(x_train,
                        y_train,
                        batch_size=8,
                        validation_data=(x_test, y_test),
                        shuffle=True,
                        epochs=140)
    History.append(history)
    y_pred = model.predict(x_test)
    print(y_pred.shape)
    score = dice_score(y_test, y_pred)
    print("dice score: ", score)
    Dice_Scores.append((name, score))
    for i in range(y_pred.shape[0]):
        y_pred[i, :, :, :][y_pred[i, :, :, :] > 0.5] = 1
        y_pred[i, :, :, :][y_pred[i, :, :, :] <= 0.5] = 0
    Results = save_result(y_pred, name, Results)
    return Results
コード例 #25
0
def create_model(seed, epochs, batch_size):
    train_generator2 = train_datagen2.flow_from_directory(
        'data/train',
        target_size=img_size,
        batch_size=batch_size,
        class_mode='categorical',
        seed=seed)
    validation_generator2 = test_datagen.flow_from_directory(
        'data/validation',
        target_size=img_size,
        batch_size=batch_size,
        class_mode='categorical',
        seed=seed)

    reset_random_seeds(seed)
    model = Sequential([
        Conv2D(baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay),
               input_shape=(32, 32, 3)),
        Activation('relu'),
        BatchNormalization(),
        Conv2D(baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        MaxPool2D(pool_size=(2, 2)),
        Dropout(0.2),

        Conv2D(2 * baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        Conv2D(2 * baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        MaxPool2D(pool_size=(2, 2)),
        Dropout(0.3),

        Conv2D(4 * baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        Conv2D(4 * baseMapNum, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)),
        Activation('relu'),
        BatchNormalization(),
        MaxPool2D(pool_size=(2, 2)),
        Dropout(0.4),
        Flatten(),
        Dense(128, activation='relu'),
        BatchNormalization(),
        Dropout(0.4),
        Dense(num_classes, activation='softmax')
    ])

    lrr = ReduceLROnPlateau(
        monitor='val_accuracy',
        factor=.5,
        patience=8,
        min_lr=1e-4,
        verbose=1)
    opt_adam = Adam(learning_rate=0.002, beta_1=0.9, beta_2=0.999)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt_adam,
                  metrics=['accuracy'])
    history = model.fit(train_generator2, epochs=epochs, validation_data=validation_generator2, callbacks=[lrr])
    loss, acc = model.evaluate(validation_generator2)
    return model, history, loss, acc
コード例 #26
0
 #model.add(Dropout(0.6))
 #model.add(Conv1D(activation="relu", padding="valid", strides=1, filters=640, kernel_size=3, kernel_initializer='glorot_uniform', kernel_regularizer=l2(0.001)))
 #model.add(MaxPooling1D(pool_size=2))
 #model.add(Dropout(0.5))
 #model.add(Flatten())
 #model.summary()
 model.add(Dense(units=512, input_dim=2080, activation="relu", kernel_initializer='glorot_uniform'))
 model.add(Dropout(0.5))
 #model.add(Dense(units=512, input_dim=512,  activation="relu", kernel_initializer='glorot_uniform',kernel_regularizer=l2(0.001)))
 #model.add(Dropout(0.5))
 model.add(Dense(units=180, activation="relu",kernel_initializer='glorot_uniform'))
 model.add(Dropout(0.5))
 model.add(Dense(units=70, activation="relu",kernel_initializer='glorot_uniform'))
 model.add(Dense(units=1, activation="sigmoid"))
 model.summary()
 adam = Adam(lr=0.0001)
 sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
 model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
 print('running at most 60 epochs')
 checkpointer = ModelCheckpoint(filepath="HistoneMark_H3K27ac.hdf5", verbose=1, save_best_only=True)
 earlystopper = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
 model.fit(X_train_H3K27ac, Y_train_H3K27ac, batch_size=128, epochs=50, shuffle=True, validation_data=( X_val_H3K27ac, Y_val_H3K27ac), callbacks=[checkpointer,earlystopper])
 #model.fit(X_train_s, Y_train_s, batch_size=12, epochs=50, shuffle=True, validation_data=( X_val_s, Y_val_s), callbacks=[checkpointer,earlystopper])
 y_pred = model.predict(X_test_H3K27ac)
 #y_pred = model.predict(X_test_s)
 #tresults = model.evaluate(X_test_s, Y_test_s)
 np.savetxt('H3K27ac_true.csv', Y_test_H3K27ac, delimiter=",")
 np.savetxt('H3K27ac_pred.csv', y_pred, delimiter=",")
 tresults = model.evaluate(X_test_H3K27ac, Y_test_H3K27ac)
 print(tresults)
 model.summary()		
コード例 #27
0
ファイル: cnn_att.py プロジェクト: ybxgood/QA_Model
# read config
sentence_words_num = int(getConfig('cnn', 'WORDS_DIM'))
word_vector_dim = int(getConfig('cnn', 'VEC_DIM'))
# end config

model = Sequential()
model.add(Conv1D(500, 3, activation='relu', input_shape=(300, 900)))
model.add(MaxPooling1D(3))
model.add(Dropout(1.0))

model.add(Flatten())
model.add(Dense(11, activation='softmax'))

# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])

# test
early_stopping = EarlyStopping(monitor='acc', patience=3, mode='max')
model.fit(x_train, y_train, batch_size=128, epochs=50, callbacks=[early_stopping])
score_test = model.evaluate(x_test, y_test, batch_size=64)
print 'loss=', score_test[0], ' acc=', score_test[1]

# dev
score_dev = model.evaluate(x_dev, y_dev, batch_size=64)
print 'dev loss=', score_dev[0], ' acc=', score_dev[1]

now_time = GetNowTime()
# logging
logging.basicConfig(level=logging.DEBUG,
コード例 #28
0
    MaxPooling2D(pool_size=2),
    Flatten(),
    Dense(1024, activation='relu'),
    Dropout(0.2),
    BatchNormalization(),
    Dense(1024, activation='relu'),
    Dropout(0.2),
    BatchNormalization(),
    Dense(256, activation='relu'),
    Dropout(0.2),
    BatchNormalization(),
    Dense(NUM_CLASSES, activation='softmax')
])

cnn_model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=Adam(lr=0.001),
                  metrics=['accuracy'])

# summary
cnn_model.summary()

# Fitting the model
cnn_model.fit(x_train,
              y_train,
              batch_size=BATCH_SIZE,
              epochs=EPOCHS,
              verbose=1,
              validation_data=(x_validate, y_validate))

cnn_model.save('alexNet_model.h5')
コード例 #29
0
GAMMA=1  	# GAMMA of our cumulative reward function
STEPS_PER_EPISODE = 30  	# No. of time-steps per episode

# configure and compile our agent by using built-in Keras optimizers and the metrics!

# allocate the memory by specifying the maximum no. of samples to store
memory = SequentialMemory(limit=300000, window_length=1)
# random process for exploration noise
random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=theta, mu=0., dt=0.01, sigma=sigma)
# define the DDPG agent
agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
                  memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
                  random_process=random_process, gamma=GAMMA, target_model_update=1e-3)
# compile the model as follows
agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mse'])

callbacks = common_func.build_callbacks(ENV_NAME, log_filename_pre, filename_exp)
# ----------------------------------------------------------------------------------------------------------------------------------------
# Training phase
# fitting the agent. After training is done, save the final weights.
# 240000

# agent.fit(env, nb_steps=300000, visualize=False, callbacks=callbacks, verbose=1, nb_max_episode_steps=STEPS_PER_EPISODE, process_noise_std=process_noise_std)
# agent.save_weights(log_filename_pre+filename_exp+'/ddpg_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# common_func.save_process_noise(ENV_NAME, log_filename_pre, filename_exp, process_noise_std, theta)

# -----------------------------------------------------------------------------------------------------------------------------------------
# Testing phase
# std_dev_noise: actuator noise while testing
agent.load_weights(log_filename_pre+filename_exp+'/ddpg_{}_weights.h5f'.format(ENV_NAME))
コード例 #30
0
ファイル: addition_lstm.py プロジェクト: lxastro/lxnn
    params += _params
    regularizers += _regularizers
    constraints += _consts
    updates += _updates

print("parameters:")
print(params)
print("regularizers:")
print(regularizers)
print("constrains:")
print(constraints)
print("updates:")
print(updates)

"""updates"""
optimizer = Adam()
_updates = optimizer.get_updates(params, constraints, train_loss)
updates += _updates

print("after Adam, updates:")
for update in updates:
    print(update)

train_ins = [X_train, y, weights]
test_ins = [X_test, y, weights]
predict_ins = [X_test]

"""Get functions"""
print("complie: _train")
_train = K.function(train_ins, [train_loss], updates=updates)
print("complie: _train_with_acc")
コード例 #31
0
def run_cifar10(batch_size,
                nb_epoch,
                depth,
                nb_dense_block,
                nb_filter,
                growth_rate,
                dropout_rate,
                learning_rate,
                weight_decay,
                plot_architecture):
    """ Run CIFAR10 experiments

    :param batch_size: int -- batch size
    :param nb_epoch: int -- number of training epochs
    :param depth: int -- network depth
    :param nb_dense_block: int -- number of dense blocks
    :param nb_filter: int -- initial number of conv filter
    :param growth_rate: int -- number of new filters added by conv layers
    :param dropout_rate: float -- dropout rate
    :param learning_rate: float -- learning rate
    :param weight_decay: float -- weight decay
    :param plot_architecture: bool -- whether to plot network architecture

    """

    ###################
    # Data processing #
    ###################

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()

    nb_classes = len(np.unique(y_train))
    img_dim = X_train.shape[1:]

    if K.image_dim_ordering() == "th":
        n_channels = X_train.shape[1]
    else:
        n_channels = X_train.shape[-1]

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')

    # Normalisation
    X = np.vstack((X_train, X_test))
    # 2 cases depending on the image ordering
    if K.image_dim_ordering() == "th":
        for i in range(n_channels):
            mean = np.mean(X[:, i, :, :])
            std = np.std(X[:, i, :, :])
            X_train[:, i, :, :] = (X_train[:, i, :, :] - mean) / std
            X_test[:, i, :, :] = (X_test[:, i, :, :] - mean) / std

    elif K.image_dim_ordering() == "tf":
        for i in range(n_channels):
            mean = np.mean(X[:, :, :, i])
            std = np.std(X[:, :, :, i])
            X_train[:, :, :, i] = (X_train[:, :, :, i] - mean) / std
            X_test[:, :, :, i] = (X_test[:, :, :, i] - mean) / std

    ###################
    # Construct model #
    ###################

    model = densenet.DenseNet(nb_classes,
                              img_dim,
                              depth,
                              nb_dense_block,
                              growth_rate,
                              nb_filter,
                              dropout_rate=dropout_rate,
                              weight_decay=weight_decay)
    # Model output
    model.summary()

    # Build optimizer
    opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=["accuracy"])

    if plot_architecture:
        from keras.utils.visualize_util import plot
        plot(model, to_file='./figures/densenet_archi.png', show_shapes=True)

    ####################
    # Network training #
    ####################

    print("Training")

    list_train_loss = []
    list_test_loss = []
    list_learning_rate = []

    for e in range(nb_epoch):

        if e == int(0.5 * nb_epoch):
            K.set_value(model.optimizer.lr, np.float32(learning_rate / 10.))

        if e == int(0.75 * nb_epoch):
            K.set_value(model.optimizer.lr, np.float32(learning_rate / 100.))

        split_size = batch_size
        num_splits = X_train.shape[0] / split_size
        arr_splits = np.array_split(np.arange(X_train.shape[0]), num_splits)

        l_train_loss = []
        start = time.time()

        for batch_idx in arr_splits:

            X_batch, Y_batch = X_train[batch_idx], Y_train[batch_idx]
            train_logloss, train_acc = model.train_on_batch(X_batch, Y_batch)

            l_train_loss.append([train_logloss, train_acc])

        test_logloss, test_acc = model.evaluate(X_test,
                                                Y_test,
                                                verbose=0,
                                                batch_size=64)
        list_train_loss.append(np.mean(np.array(l_train_loss), 0).tolist())
        list_test_loss.append([test_logloss, test_acc])
        list_learning_rate.append(float(K.get_value(model.optimizer.lr)))
        # to convert numpy array to json serializable
        print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))

        d_log = {}
        d_log["batch_size"] = batch_size
        d_log["nb_epoch"] = nb_epoch
        d_log["optimizer"] = opt.get_config()
        d_log["train_loss"] = list_train_loss
        d_log["test_loss"] = list_test_loss
        d_log["learning_rate"] = list_learning_rate

        json_file = os.path.join('./log/experiment_log_cifar10.json')
        with open(json_file, 'w') as fp:
            json.dump(d_log, fp, indent=4, sort_keys=True)
コード例 #32
0
ファイル: wgan_gp_ownloss.py プロジェクト: white1107/GAN
    def __init__(self):
        #self.path = "/volumes/data/dataset/gan/MNIST/wgan-gp/wgan-gp_generated_images/"
        self.path = "images/"
        #mnistデータ用の入力データサイズ
        self.img_rows = 28 
        self.img_cols = 28
        self.channels = 1
        self.img_shape = (self.img_rows, self.img_cols, self.channels)
        
        # 潜在変数の次元数 
        self.z_dim = 5

        self.n_critic = 5


        # 画像保存の際の列、行数
        self.row = 5
        self.col = 5
        self.row2 = 1 # 連続潜在変数用
        self.col2 = 10# 連続潜在変数用 

        
        # 画像生成用の固定された入力潜在変数
        self.noise_fix1 = np.random.normal(0, 1, (self.row * self.col, self.z_dim)) 
        # 連続的に潜在変数を変化させる際の開始、終了変数
        self.noise_fix2 = np.random.normal(0, 1, (1, self.z_dim))
        self.noise_fix3 = np.random.normal(0, 1, (1, self.z_dim))

        # 横軸がiteration数のプロット保存用np.ndarray
        self.g_loss_array = np.array([])
        self.d_loss_array = np.array([])
        self.d_accuracy_array = np.array([])
        self.d_predict_true_num_array = np.array([])
        self.c_predict_class_list = []

        #discriminator_optimizer = Adam(lr=1e-5, beta_1=0.1)
        combined_optimizer = Adam(lr=1e-4, beta_1=0.5, beta_2=0.9)

        # discriminatorモデル
        self.discriminator = self.build_discriminator()


        # Generatorモデル
        self.generator = self.build_generator()

        # combinedモデルの学習時はdiscriminatorの学習をFalseにする
        for layer in self.discriminator.layers:
            layer.trainable = False
        self.discriminator.trainable = False

        self.netG_model, self.netG_train = self.build_combined()

        for layer in self.discriminator.layers:
            layer.trainable = True
        for layer in self.generator.layers:
            layer.trainable = False
        self.discriminator.trainable = True
        self.generator.trainable = False

        # Classifierモデル
        self.classifier = self.build_classifier()

        self.netD_train = self.build_discriminator_with_own_loss()
コード例 #33
0
ファイル: keras-LSTM.py プロジェクト: niubifuwei/keras-mnist
print(type(x_train))

#
x_train /= 255
x_test /= 255

#print(x_train[0])

print(y_train[0])
#把整形int标签转换成one-hot编码的数组标签,以方便计算loss
y_train = keras.utils.to_categorical(y_train, n_classes)
y_test = keras.utils.to_categorical(y_test, n_classes)
print(y_train[0])

#搭建模型
model=Sequential()
model.add(LSTM(n_hidden,batch_input_shape=(None,n_step,n_input),unroll=True))
model.add(Dense(n_classes))#这个参数应该与输出维度相同了
model.add(Activation('softmax'))

adam=Adam(lr=learning_rate)
model.summary()
model.compile(optimizer=adam,
             loss='categorical_crossentropy',
             metrics=['accuracy'])

model.fit(x_train,y_train,batch_size=batch_size,epochs=training_iters,verbose=1,validation_data=(x_test,y_test))

score=model.evaluate(x_test,y_test,verbose=0)
print('LSTM test score:',score[0])
print('LSTM test accuracy:',score[1])
コード例 #34
0
    def get_mitosegnet(self, wmap, lr):

        inputs = Input(shape=(self.img_rows, self.img_cols, 1))
        print(inputs.get_shape(), type(inputs))

        # core mitosegnet (modified u-net) architecture
        # batchnorm architecture (batchnorm before activation)
        ######################################################################

        conv1 = Conv2D(64, 3, padding='same',
                       kernel_initializer=gauss())(inputs)
        print("conv1 shape:", conv1.shape)
        batch1 = BatchNormalization()(conv1)
        act1 = Activation("relu")(batch1)

        conv1 = Conv2D(64,
                       3,
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 / (9 * 64))))(
                           act1)  # conv1
        print("conv1 shape:", conv1.shape)
        batch1 = BatchNormalization()(conv1)
        act1 = Activation("relu")(batch1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(act1)
        print("pool1 shape:", pool1.shape)
        ########

        ########
        conv2 = Conv2D(128,
                       3,
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 64))))(pool1)
        print("conv2 shape:", conv2.shape)
        batch2 = BatchNormalization()(conv2)
        act2 = Activation("relu")(batch2)

        conv2 = Conv2D(128,
                       3,
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 / (9 * 128))))(
                           act2)  # conv2
        print("conv2 shape:", conv2.shape)
        batch2 = BatchNormalization()(conv2)
        act2 = Activation("relu")(batch2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(act2)
        print("pool2 shape:", pool2.shape)
        ########

        ########
        conv3 = Conv2D(256,
                       3,
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 128))))(pool2)
        print("conv3 shape:", conv3.shape)
        batch3 = BatchNormalization()(conv3)
        act3 = Activation("relu")(batch3)

        conv3 = Conv2D(256,
                       3,
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 / (9 * 256))))(
                           act3)  # conv3
        print("conv3 shape:", conv3.shape)
        batch3 = BatchNormalization()(conv3)
        act3 = Activation("relu")(batch3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(act3)
        print("pool3 shape:", pool3.shape)
        ########

        ########
        conv4 = Conv2D(512,
                       3,
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 256))))(pool3)
        batch4 = BatchNormalization()(conv4)
        act4 = Activation("relu")(batch4)

        conv4 = Conv2D(512,
                       3,
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 / (9 * 512))))(
                           act4)  # conv4
        batch4 = BatchNormalization()(conv4)
        act4 = Activation("relu")(batch4)

        pool4 = MaxPooling2D(pool_size=(2, 2))(act4)
        ########

        ########
        conv5 = Conv2D(1024,
                       3,
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 512))))(pool4)
        batch5 = BatchNormalization()(conv5)
        act5 = Activation("relu")(batch5)

        conv5 = Conv2D(1024,
                       3,
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 / (9 * 1024))))(
                           act5)  # conv5
        batch5 = BatchNormalization()(conv5)
        act5 = Activation("relu")(batch5)

        ########

        up6 = Conv2D(512,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer=gauss(stddev=sqrt(2 / (9 * 1024))))(
                         UpSampling2D(size=(2, 2))(act5))

        merge6 = concatenate([conv4, up6], axis=3)

        conv6 = Conv2D(
            512,
            3,
            activation='relu',
            padding='same',
            kernel_initializer=gauss(stddev=sqrt(2 / (9 * 512))))(merge6)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 512))))(conv6)

        up7 = Conv2D(256,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer=gauss(stddev=sqrt(2 / (9 * 512))))(
                         UpSampling2D(size=(2, 2))(conv6))

        merge7 = concatenate([conv3, up7], axis=3)

        conv7 = Conv2D(
            256,
            3,
            activation='relu',
            padding='same',
            kernel_initializer=gauss(stddev=sqrt(2 / (9 * 256))))(merge7)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 256))))(conv7)

        up8 = Conv2D(128,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer=gauss(stddev=sqrt(2 / (9 * 256))))(
                         UpSampling2D(size=(2, 2))(conv7))

        merge8 = concatenate([conv2, up8], axis=3)

        conv8 = Conv2D(
            128,
            3,
            activation='relu',
            padding='same',
            kernel_initializer=gauss(stddev=sqrt(2 / (9 * 128))))(merge8)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 128))))(conv8)

        up9 = Conv2D(64,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer=gauss(stddev=sqrt(2 / (9 * 128))))(
                         UpSampling2D(size=(2, 2))(conv8))

        merge9 = concatenate([conv1, up9], axis=3)

        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 64))))(merge9)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 64))))(conv9)

        conv9 = Conv2D(2,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer=gauss(stddev=sqrt(2 /
                                                            (9 * 64))))(conv9)

        ######################################################################

        conv10 = Conv2D(1,
                        1,
                        activation='sigmoid',
                        kernel_initializer=gauss(stddev=sqrt(2 /
                                                             (9 * 2))))(conv9)

        if wmap == False:
            input = inputs
            loss = self.pixelwise_crossentropy()
        else:
            weights = Input(shape=(self.img_rows, self.img_cols, 1))
            input = [inputs, weights]

            loss = self.weighted_pixelwise_crossentropy(input[1])

        model = Model(inputs=input, outputs=conv10)

        model.compile(optimizer=Adam(lr=lr),
                      loss=loss,
                      metrics=['accuracy', self.dice_coefficient])

        return model
コード例 #35
0
# Using the Glove embedding:
i = 0
for word in vocabulary:
    embedding_vector = embeddings_index.get(word[0])
    
    if embedding_vector is not None:
        # words not found in embedding index will be all-zeros.
        embedding_matrix[i] = embedding_vector
    i += 1

# *******************************************************************
# Keras model of the chatbot: 
# *******************************************************************

ad = Adam(lr=0.00005) 

input_context = Input(shape=(maxlen_input,), dtype='int32')#, name='input_context')
input_answer = Input(shape=(maxlen_input,), dtype='int32')#, name='input_answer')
LSTM_encoder = LSTM(sentence_embedding_size, kernel_initializer= 'lecun_uniform')
LSTM_decoder = LSTM(sentence_embedding_size, kernel_initializer= 'lecun_uniform')
if os.path.isfile(weights_file):
    Shared_Embedding = Embedding(output_dim=word_embedding_size, input_dim=dictionary_size, input_length=maxlen_input)
else:
    Shared_Embedding = Embedding(output_dim=word_embedding_size, input_dim=dictionary_size, weights=[embedding_matrix], input_length=maxlen_input)
word_embedding_context = Shared_Embedding(input_context)
context_embedding = LSTM_encoder(word_embedding_context)

# LSTM_encoder_topic = LSTM(topic_embedding_size, kernel_initializer='lecun_uniform')
LSTM_encoder_topic = Dense(topic_embedding_size, activation="relu")
topic_embedding = LSTM_encoder_topic(context_embedding)
コード例 #36
0
        if X_train_seq_res_std[i, j] == 0:
            X_train_seq_res[:,i, j] = 0
        else:
            X_train_seq_res[:, i, j] = (X_train_seq_res[:, i, j] - X_train_seq_res_mean[i, j])/X_train_seq_res_std[i, j]


#########################
#      MODEL SETUP      #
#########################

# Set training hyper-parameters.
epochs = 22
batch_size = 64
learn_rate = 0.001
drop_prob = 0.75
optimiser = Adam(lr=learn_rate)

branch1 = Sequential()

branch1.add(BatchNormalization(input_shape=(num_seq_steps, num_seq_features)))
branch1.add(Conv1D(50, 5, kernel_initializer='he_uniform', activation='relu',
                   input_shape=(num_seq_steps, num_seq_features)))
branch1.add(MaxPooling1D(2))
branch1.add(Dropout(drop_prob))

branch1.add(BatchNormalization())
branch1.add(Conv1D(50, 11, kernel_initializer='he_uniform', activation='relu'))
branch1.add(MaxPooling1D(2))
branch1.add(Dropout(drop_prob))

branch1.add(BatchNormalization())