Пример #1
0
def save_activation(activation, L1):

    #Parameters Of Experiments
    cfg = {}
    cfg['SGD_BATCHSIZE'] = 256
    cfg['SGD_LEARNINGRATE'] = 0.0004
    cfg['NUM_EPOCHS'] = 5000
    cfg['FULL_MI'] = True

    cfg['ACTIVATION'] = 'tanh'

    cfg['LAYER_DIMS'] = [10, 7, 5, 4, 3]
    ARCH_NAME = '-'.join(map(str, cfg['LAYER_DIMS']))

    trn, tst = utils.get_IB_data('2017_12_21_16_51_3_275766')

    cfg['SAVE_DIR'] = 'rawdata/' + cfg['ACTIVATION'] + '_' + ARCH_NAME

    input_layer = keras.layers.Input((trn.X.shape[1], ))
    clayer = input_layer
    # clayer = keras.layers.Dense(cfg['LAYER_DIMS'][0],
    #                             activation=cfg['ACTIVATION'],
    #                             kernel_initializer=keras.initializers.truncated_normal(mean=0.0,
    #                                                                                    stddev=1 / np.sqrt(float(cfg['LAYER_DIMS'][0]))),
    #                             bias_initializer='zeros',
    #                             activity_regularizer=keras.regularizers.l1(.01)
    #                             )(clayer)
    for n in cfg['LAYER_DIMS']:
        clayer = keras.layers.Dense(
            n,
            activation=cfg['ACTIVATION'],
            kernel_initializer=keras.initializers.truncated_normal(
                mean=0.0, stddev=1 / np.sqrt(float(n))),
            bias_initializer='zeros',
        )(clayer)

    output_layer = keras.layers.Dense(trn.nb_classes,
                                      activation='softmax')(clayer)

    model = keras.models.Model(inputs=input_layer, outputs=output_layer)
    optimizer = keras.optimizers.TFOptimizer(
        tf.train.AdamOptimizer(learning_rate=cfg['SGD_LEARNINGRATE']))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    def do_report(epoch):
        if epoch < 20:
            return True
        elif epoch < 100:
            return (epoch % 5 == 0)
        elif epoch < 2000:
            return (epoch % 20 == 0)
        else:
            return (epoch % 100 == 0)

    reporter = loggingreporter.LoggingReporter(cfg=cfg,
                                               trn=trn,
                                               tst=tst,
                                               do_save_func=do_report)

    r = model.fit(x=trn.X,
                  y=trn.Y,
                  verbose=2,
                  batch_size=cfg['SGD_BATCHSIZE'],
                  epochs=cfg['NUM_EPOCHS'],
                  callbacks=[
                      reporter,
                  ])
model = keras.models.Model(inputs=input_layer, outputs=output_layer)
optimizer = keras.optimizers.SGD(lr=cfg['SGD_LEARNINGRATE'])

model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])


# In[ ]:


def do_report(epoch):
    # Only log activity for some epochs.  Mainly this is to make things run faster.
    if epoch < 20:       # Log for all first 20 epochs
        return True
    elif epoch < 100:    # Then for every 5th epoch
        return (epoch % 5 == 0)
    elif epoch < 200:    # Then every 10th
        return (epoch % 10 == 0)
    else:                # Then every 100th
        return (epoch % 100 == 0)

reporter = loggingreporter.LoggingReporter(cfg=cfg,
                                          trn=trn,
                                          tst=tst,
                                          do_save_func=do_report)
r = model.fit(x=trn.X, y=trn.Y,
              verbose    = 2,
              batch_size = cfg['SGD_BATCHSIZE'],
              epochs     = cfg['NUM_EPOCHS'],
              # validation_data=(tst.X, tst.Y),
              callbacks  = [reporter,])
Пример #3
0
def do_report(epoch):
    # Only log activity for some epochs.  Mainly this is to make things run faster.
    if epoch < 20:       # Log for all first 20 epochs
        return True
    elif epoch < 100:    # Then for every 5th epoch
        return (epoch % 5 == 0)
    elif epoch < 200:    # Then every 10th
        return (epoch % 10 == 0)
    else:                # Then every 100th
        return (epoch % 1000 == 0)
    
if not os.path.isdir(os.path.join(args.save_dir,'tensorboard')):
    os.mkdir(os.path.join(args.save_dir,'tensorboard'))
    
reporter = loggingreporter.LoggingReporter(args=args, 
                                          trn=trn, 
                                          tst=tst, 
                                          do_save_func=do_report)
                                          
                               
saver = keras.callbacks.ModelCheckpoint(os.path.join(args.save_dir,'chkpt.h5'), 
                                        monitor='val_loss', 
                                        verbose=0, 
                                        save_best_only=False, 
                                        save_weights_only=False, 
                                        mode='auto', 
                                        period=1000)

scheduler = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', 
                                            factor=0.75, 
                                            patience=20, 
                                            verbose=0,