Esempio n. 1
0
 def __init__(self,
              state_size=128,
              correct=1,
              gamma=1,
              ewma=0.7,
              input_size=(3, 70, 70),
              teacher=None):
     NN.Module.__init__(self)
     self.correct = correct
     self.critic = cuda(
         build_mlp(input_size=state_size, layer_sizes=[state_size, 1]))
     self.opt = T.optim.Adam(self.critic.parameters())
     self.input_size = input_size
     self.teacher = teacher
     self.gamma = gamma
     for p in teacher.parameters():
         p.requires_grad = False
Esempio n. 2
0
vf_args = (observation_space.shape[0] + 1, vf_hidden_dims, 1)

# Initialize the policy
if type(action_space) is Box:
    policy_args = (observation_space.shape[0], policy_hidden_dims,
                   action_space.shape[0])
    policy = build_diag_gauss_policy(*policy_args)
elif type(action_space) is Discrete:
    policy_args = (observation_space.shape[0], policy_hidden_dims,
                   action_space.n)
    policy = build_multinomial_policy(*policy_args)
else:
    raise NotImplementedError

# Initalize the value function
value_fun = build_mlp(*vf_args)
policy.to(device)
value_fun.to(device)

# Initialize the state transformation
z_filter = ZFilter()
state_bound = Bound(-5, 5)
state_filter = Transform(state_bound, z_filter)

# Initialize the simulator
n_trajectories = config['n_trajectories']
max_timesteps = config['max_timesteps']
# try:
#     env_args = config['env_args']
# except:
env_args = {}
Esempio n. 3
0
                              width=args.width,
                              height=args.height,
                              max_=args.max,
                              verbose=True)  # args.verbose)

    if args.model == 'cnn':
        label = '10cnn_%d_%d_%d' % (32, 128, args.epochs)
        model = build_cnn(training_data,
                          width=args.width,
                          height=args.height,
                          verbose=True)  # args.verbose)
        train(model, label, training_data, epochs=args.epochs)
    elif args.model == 'mlp':
        label = '10mlp_%d_%d' % (32, args.epochs)
        model = build_mlp(training_data,
                          width=args.width,
                          height=args.height,
                          verbose=True)  # args.verbose)
        train(model, label, training_data, epochs=args.epochs)
    elif args.model == 'student':
        model = build_mlp(training_data,
                          width=args.width,
                          height=args.height,
                          verbose=True)  # args.verbose)
        temp = 2.0
        lamb = 0.5
        label = '10student_mlp_%d_%d_lambda%s_temp%s' % (32, args.epochs,
                                                         str(lamb), str(temp))

        train_student(model,
                      label,
                      training_data,
Esempio n. 4
0
images = images / 255.0

# Split the data
(trainAttrX, testAttrX, trainImagesX,
 testImagesX) = train_test_split(df, images, test_size=0.25, random_state=42)

# Normalization
max_price = trainAttrX["price"].max()
trainY = trainAttrX["price"] / max_price
testY = testAttrX["price"] / max_price

(trainAttrX, testAttrX) = datasets.process_attributes(df, trainAttrX,
                                                      testAttrX)

# Create network
mlp = models.build_mlp(trainAttrX.shape[1], False)
cnn = models.build_cnn(64, 64, 3, regress=False)

concatenated = concatenate([mlp.output, cnn.output])

x = Dense(4, activation="relu")(concatenated)
x = Dense(1, activation="linear")(x)

model = Model(inputs=[mlp.input, cnn.input], outputs=x)

optimizer = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=optimizer)

model.fit([trainAttrX, trainImagesX],
          trainY,
          validation_data=([testAttrX, testImagesX], testY),
Esempio n. 5
0
def exp( args ):
    '''
    alg     -- elu, qelu, relu, qrelu
    lrate   -- learning rate
    dropout -- whether to dropout
    batch_size -- number of samples in a mini batch
    epochs  -- how many epochs to train
    '''

    counter = Counter()
    callbacks = [ counter ]

    if args.arch == 'mlp':
        model = models.build_mlp( args, counter )

    elif args.arch == 'cnn':
        model = models.build_cnn( args, counter )

    elif args.arch == 'resnet':
        model = models.build_resnet( args, counter )

    elif args.arch == 'siamese':
        model = models.build_siamese( args, counter )

    elif args.arch == 'ae':
        model = models.build_ae( args, counter )

    else:
        raise RuntimeError( 'unknown architecture' )

    if args.arch == 'resnet':
        if args.alg[0] == 'n':
            optimizer = add_gradient_noise( keras.optimizers.Adam )( lr=lr_schedule(0), noise_eta=args.init_v, counter=counter )
        else:
            optimizer = keras.optimizers.Adam( lr=lr_schedule(0) )

        lr_scheduler = LearningRateScheduler( lr_schedule )
        lr_reducer = ReduceLROnPlateau( factor=np.sqrt(0.1),
                                         cooldown=0,
                                         patience=5,
                                         min_lr=0.5e-6 )
        callbacks += [ lr_reducer, lr_scheduler ]

    elif args.alg[0] == 'n':
        optimizer = add_gradient_noise( keras.optimizers.SGD )( lr=args.lrate, decay=1e-6, momentum=0.5, nesterov=False, noise_eta=args.init_v, counter=counter )

    else:
        optimizer = keras.optimizers.SGD( lr=args.lrate, decay=1e-6, momentum=0.5, nesterov=False )

    if args.arch == 'siamese':
        model.compile( loss=contrastive_loss, optimizer=optimizer, metrics=[siamese_accuracy] )

        tr_pairs, tr_y = data.train_generator
        te_pairs, te_y = data.test_generator

        history = model.fit( [tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
                             batch_size=args.batch_size,
                             epochs=args.epochs,
                             callbacks=callbacks,
                             validation_data=( [te_pairs[:, 0], te_pairs[:, 1]], te_y),
                             verbose=args.verbose )

        return history.history['loss'], history.history['val_siamese_accuracy']

    elif args.arch == 'ae':
        model.compile( loss=keras.losses.mse,
                       optimizer=optimizer )
        history = model.fit_generator(
                        data.train_generator,
                        steps_per_epoch=(data.x_train.shape[0]//args.batch_size+1),
                        epochs=args.epochs,
                        callbacks=callbacks,
                        validation_data=data.test_generator,
                        validation_steps=data.x_test.shape[0]//args.test_batch_size,
                        verbose=args.verbose )

        return history.history['loss'], history.history['val_loss']

    else:
        model.compile( loss=keras.losses.categorical_crossentropy,
                       optimizer=optimizer,
                       metrics=['accuracy'] )
        history = model.fit_generator(
                        data.train_generator,
                        steps_per_epoch=(data.x_train.shape[0]//args.batch_size+1),
                        epochs=args.epochs,
                        callbacks=callbacks,
                        validation_data=data.test_generator,
                        validation_steps=data.x_test.shape[0]//args.test_batch_size,
                        verbose=args.verbose )

        return history.history['loss'], history.history['val_acc']
Esempio n. 6
0
action_dim = config['action_dim']

n_episodes = config['n_episodes']
env_name = config['env_name']
n_episodes = config['n_episodes']
n_trajectories = config['n_trajectories']
trajectory_len = config['max_timesteps']
policy_dims = config['policy_hidden_dims']
vf_dims = config['vf_hidden_dims']
cf_dims = config['cf_hidden_dims']
max_constraint_val = config['max_constraint_val']
bias_red_cost = config['bias_red_cost']
device = get_device()

policy = build_diag_gauss_policy(state_dim, policy_dims, action_dim)
value_fun = build_mlp(state_dim + 1, vf_dims, 1)
cost_fun = build_mlp(state_dim + 1, cf_dims, 1)

policy.to(device)
value_fun.to(device)
cost_fun.to(device)

simulator = SinglePathSimulator(env_name, policy, n_trajectories,
                                trajectory_len)
cpo = CPO(policy,
          value_fun,
          cost_fun,
          simulator,
          model_name=model_name,
          bias_red_cost=bias_red_cost,
          max_constraint_val=max_constraint_val)
Esempio n. 7
0
    # Load the initialized multi-layer perceptron
    mlp_weight_path = "./trained_models/approximate_plane_case10/cp.ckpt"

    trainingSet_delgrs = pd.read_csv('./data/approximate_plane_set.csv')
    inputsMLPTrain = trainingSet_delgrs[[
        'Dkappa', 'dynamicLoads', 'bearingTemp'
    ]]
    inputsMLPTrain_min = inputsMLPTrain.min(axis=0)
    inputsMLPTrain_range = inputsMLPTrain.max(axis=0) - inputsMLPTrain_min
    dLInputScaling = getScalingDenseLayer(inputsMLPTrain_min,
                                          inputsMLPTrain_range)

    lowBounds_delgrs = np.asarray([0.05 / 25920])
    upBounds_delgrs = np.asarray([np.max(trainingSet_delgrs['delDkappa'])])

    mlp_model = build_mlp(dLInputScaling)
    mlp_model.load_weights(mlp_weight_path)
    mlp_model.trainable = True

    # Get the discrete ordinal classifier
    cl_model = discrete_ordinal_classifier(
        np.expand_dims(np.transpose(multipleInspections), -1).shape, myDtype)
    cl_model.build(np.expand_dims(np.transpose(multipleInspections), -1).shape)
    cl_layer = cl_model.layers[0]
    cl_layer.trainable = True

    # Build stacked RNN model
    RNNmodel = create_stacked_rnn(cl_layer,
                                  inspectionArray,
                                  mlp_model,
                                  d0RNN,