Ejemplo n.º 1
0
def run(path,train,val,parameters,train_out=None,val_out=None,):
    if 'learn' in mode:
        if train_out is None:
            train_out = train
        if val_out is None:
            val_out = val
        ae, _, _ = simple_genetic_search(
            curry(nn_task, latplan.model.get(default_parameters["aeclass"]),
                  path,
                  train, train_out, val, val_out), # noise data is used for tuning metric
            default_parameters,
            parameters,
            path,
            limit=300,
            report_best= lambda net: net.save(),
        )
    elif 'reproduce' in mode:   # reproduce the best result from the grid search log
        if train_out is None:
            train_out = train
        if val_out is None:
            val_out = val
        ae, _, _ = reproduce(
            curry(nn_task, latplan.model.get(default_parameters["aeclass"]),
                  path,
                  train, train_out, val, val_out), # noise data is used for tuning metric
            default_parameters,
            parameters,
            path,
        )
        ae.save()
    else:
        ae = latplan.model.load(path)
    return ae
Ejemplo n.º 2
0
def main(directory, mode, input_type=prepare_oae_per_action_PU3):
    directory_ad = "{}/_ads/".format(directory)
    print(directory, mode, input_type)

    try:
        if 'learn' in mode:
            raise Exception('learn')
        if input_type is prepare_oae_validated:
            discriminator = default_networks['Discriminator'](
                directory_ad).load()
        else:
            discriminator = default_networks['PUDiscriminator'](
                directory_ad).load()
    except:
        data = np.loadtxt("{}/actions.csv".format(directory), dtype=np.int8)
        load_ae(directory)
        discriminators = []
        evaluations = []
        for i, train_test_data in enumerate(input_type(data)):
            print("label", i)
            try:
                d, _, e = grid_search(
                    curry(nn_task, default_networks['PUDiscriminator'],
                          directory_ad + str(i) + "/", *train_test_data),
                    default_parameters, parameters)
                discriminators.append(d)
                evaluations.append(e)
            except Exception as e:
                print(e)
                discriminators.append(None)
                evaluations.append(None)
            print(evaluations)
Ejemplo n.º 3
0
def run(path, train, test, parameters):
    if 'learn' in mode:
        from latplan.util import curry
        ae, _, _ = grid_search(
            curry(nn_task, default_networks[encoder], path, train, train,
                  gaussian(test),
                  test),  # noise data is used for tuning metric
            default_parameters,
            parameters,
            report=lambda ae: ae.report(train, train, test, test),
            report_best=lambda ae: dump_autoencoding_image(ae, test, train))
        ae.save()
    else:
        ae = default_networks[encoder](path).load()
    return ae
Ejemplo n.º 4
0
def learn(input_type):
    global discriminator
    if "hanoi" in sae.path:
        data = np.loadtxt(sae.local("all_actions.csv"), dtype=np.int8)
    else:
        data = np.loadtxt(sae.local("actions.csv"), dtype=np.int8)
    network, train_in, train_out, test_in, test_out = input_type(data)
    discriminator, _, _ = grid_search(
        curry(
            nn_task,
            network,
            sae.local("_ad/"),
            train_in,
            train_out,
            test_in,
            test_out,
        ), default_parameters, parameters)
    discriminator.save()
Ejemplo n.º 5
0
def learn(path):
    network = latplan.model.get('PUDiscriminator')
    true_actions = np.loadtxt(sae.local("actions.csv"), dtype=np.int8)
    fake_actions = np.loadtxt(aae.local("fake_actions.csv"), dtype=np.int8)
    train_in, train_out, val_in, val_out = prepare_binary_classification_data(
        true_actions, fake_actions)
    discriminator, _, _ = grid_search(
        curry(
            nn_task,
            network,
            path,
            train_in,
            train_out,
            val_in,
            val_out,
        ),
        default_parameters,
        parameters,
        path,
    )
    discriminator.save()
    return discriminator
Ejemplo n.º 6
0
        'batch_size' :[2000],
        'full_epoch' :[1000],
        'epoch'      :[1000],
        'encoder_activation' :['relu'], # 'tanh'
        'decoder_activation' :['relu'], # 'tanh',
        # quick eval
        'lr'         :[0.001],
    }
    print(data.shape)
    try:
        if 'learn' in mode:
            raise Exception('learn')
        aae = ActionAE(directory_aae).load()
    except:
        aae,_,_ = grid_search(curry(nn_task, ActionAE, directory_aae,
                                    data[:int(len(data)*0.9)], data[:int(len(data)*0.9)],
                                    data[int(len(data)*0.9):], data[int(len(data)*0.9):],),
                              default_parameters,
                              parameters)
        aae.save()

    N = data.shape[1]//2
    
    actions = aae.encode_action(data, batch_size=1000).round()
    histogram = np.squeeze(actions.sum(axis=0,dtype=int))
    all_labels = np.zeros((np.count_nonzero(histogram), actions.shape[1], actions.shape[2]), dtype=int)
    for i, pos in enumerate(np.where(histogram > 0)[0]):
        all_labels[i][0][pos] = 1
    
    if 'plot' in mode:
        aae.plot(data[:8], "aae_train.png")
Ejemplo n.º 7
0
def learn(method):
    global cae, discriminator
    default_parameters = {
        'lr'              : 0.001,
        'batch_size'      : 1000,
        'epoch'           : 1000,
        'max_temperature' : 2.0,
        'min_temperature' : 0.1,
        'M'               : 2,
        'min_grad'        : 0.0,
        'optimizer'       : 'radam',
        'dropout'         : 0.4,
    }
    data_valid = np.loadtxt(sae.local("states.csv"),dtype=np.int8)
    train_in, train_out, test_in, test_out, data_valid, data_mixed = prepare(data_valid,sae)
    sae.plot_autodecode(data_mixed[:8], sae.local("_sd3/fake_samples.png"))

    def save(net):
        net.parameters["method"] = method
        net.save()

    if method == "feature":
        # decode into image, extract features and learn from it
        train_image, test_image = sae.decode(train_in), sae.decode(test_in)
        train_in2, test_in2 = sae.get_features(train_image), sae.get_features(test_image)
        discriminator,_,_ = grid_search(curry(nn_task, latplan.model.get('PUDiscriminator'), sae.local("_sd3/"),
                                              train_in2, train_out, test_in2, test_out,),
                                        default_parameters,
                                        {
                                            'num_layers' :[1,2],
                                            'layer'      :[300,1000],
                                            'clayer'     :[16],
                                            'activation' :['relu','tanh'],
                                        },
                                        sae.local("_sd3/"),
                                        report_best= save, shuffle=False,
        )
    if method == "cae":
        # decode into image, learn a separate cae and learn from it
        train_image, test_image = sae.decode(train_in), sae.decode(test_in)
        cae,_,_ = grid_search(curry(nn_task, latplan.model.get('SimpleCAE'),
                                    sae.local("_cae"),
                                    train_image, train_image, test_image, test_image),
                              default_parameters,
                              {
                                  'num_layers' :[1,2],
                                  'layer'      :[300,1000],
                                  'clayer'     :[16],
                                  'activation' :['relu','tanh'],
                              },
                              sae.local("_cae/"),
                              report_best= save, shuffle=False,
        )
        cae.save()
        train_in2, test_in2 = cae.encode(train_image), cae.encode(test_image)
        discriminator,_,_ = grid_search(curry(nn_task, latplan.model.get('PUDiscriminator'), sae.local("_sd3/"),
                                              train_in2, train_out, test_in2, test_out,),
                                        default_parameters,
                                        {
                                            'num_layers' :[1,2],
                                            'layer'      :[300,1000],
                                            'clayer'     :[16],
                                            'activation' :['relu','tanh'],
                                        },
                                        sae.local("_sd3/"),
                                        report_best= save, shuffle=False,
        )
    if method == "direct":
        # learn directly from the latent encoding
        discriminator,_,_ = grid_search(curry(nn_task, latplan.model.get('PUDiscriminator'), sae.local("_sd3/"),
                                              train_in, train_out, test_in, test_out,),
                                        default_parameters,
                                        {
                                            'num_layers' :[1,2],
                                            'layer'      :[300,1000],# [400,4000],
                                            'activation' :['relu','tanh'],
                                        },
                                        sae.local("_sd3/"),
                                        report_best= save, shuffle=False,
        )
    if method == "image":
        # learn directly from the image
        train_image, test_image = sae.decode(train_in), sae.decode(test_in)
        discriminator,_,_ = grid_search(curry(nn_task, latplan.model.get('PUDiscriminator'), sae.local("_sd3/"),
                                              train_image, train_out, test_image, test_out,),
                                        default_parameters,
                                        {
                                            'num_layers' :[1,2],
                                            'layer'      :[300,1000],# [400,4000],
                                            'activation' :['relu','tanh'],
                                        },
                                        sae.local("_sd3/"),
                                        report_best= save, shuffle=False,
        )
Ejemplo n.º 8
0
    data = np.loadtxt("{}/actions.csv".format(directory),dtype=np.int8)
    train_in, train_out, test_in, test_out = prepare(data)

    oae = ActionAE(directory_oae).load()

    train_pre, train_action = oae.encode(train_in)
    test_pre, test_action = oae.encode(test_in)

    print(train_pre.shape,train_action.shape)
    train_in2 = np.concatenate([train_pre,np.squeeze(train_action)],axis=1)
    test_in2 = np.concatenate([test_pre,np.squeeze(test_action)],axis=1)
    
    try:
        discriminator = Discriminator(directory_ad).load()
    except (FileNotFoundError, ValueError):
        discriminator,_,_ = grid_search(curry(nn_task, Discriminator, directory_ad,
                                              train_in2, train_out, test_in2, test_out,),
                                        default_parameters,
                                        parameters)
    show_n = 30
    
    for y,_y in zip(discriminator.discriminate(test_in)[:show_n],
                    test_out[:show_n]):
        print(y,_y)

    # test if the learned action is correct

    actions_valid = np.loadtxt("{}/all_actions.csv".format(directory),dtype=int)
    
    from latplan.util import get_ae_type
    ae = default_networks[get_ae_type(directory)](directory).load()
    N = ae.parameters["N"]
print(data.shape)
N = data.shape[1] // 2
train = data[:int(len(data) * 0.9)]
val = data[int(len(data) * 0.9):int(len(data) * 0.95)]
test = data[int(len(data) * 0.95):]

if 'learn' in mode:
    print("start training")
    if num_actions is not None:
        parameters['M'] = [num_actions]
    aae, _, _ = simple_genetic_search(
        curry(
            nn_task,
            eval(aeclass),
            sae.local("_{}_{}/".format(aeclass, num_actions)),
            train,
            train,
            val,
            val,
        ),
        default_parameters,
        parameters,
        sae.local("_{}_{}/".format(aeclass, num_actions)),
        limit=100,
        report_best=lambda net: net.save(),
    )
elif 'reproduce' in mode:
    aae, _, _ = reproduce(
        curry(
            nn_task,
            eval(aeclass),
Ejemplo n.º 10
0
def learn(method):
    global cae, discriminator
    default_parameters = {
        'lr': 0.0001,
        'batch_size': 2000,
        'full_epoch': 1000,
        'epoch': 1000,
        'max_temperature': 2.0,
        'min_temperature': 0.1,
        'M': 2,
        'min_grad': 0.0,
    }
    data_valid = np.loadtxt(sae.local("states.csv"), dtype=np.int8)
    train_in, train_out, test_in, test_out, data_valid, data_mixed = prepare(
        data_valid, sae)
    sae.plot_autodecode(data_mixed[:8], "_sd3/fake_samples.png")

    if method == "feature":
        # decode into image, extract features and learn from it
        train_image, test_image = sae.decode_binary(
            train_in), sae.decode_binary(test_in)
        train_in2, test_in2 = sae.get_features(train_image), sae.get_features(
            test_image)
        discriminator, _, _ = grid_search(
            curry(
                nn_task,
                default_networks['PUDiscriminator'],
                sae.local("_sd3/"),
                train_in2,
                train_out,
                test_in2,
                test_out,
            ), default_parameters, {
                'num_layers': [1],
                'layer': [50],
                'clayer': [16],
                'dropout': [0.8],
                'batch_size': [1000],
                'full_epoch': [1000],
                'activation': ['relu'],
                'epoch': [3000],
                'lr': [0.0001],
            })
    if method == "cae":
        # decode into image, learn a separate cae and learn from it
        train_image, test_image = sae.decode_binary(
            train_in), sae.decode_binary(test_in)
        cae, _, _ = grid_search(
            curry(nn_task, default_networks['SimpleCAE'], sae.local("_cae"),
                  train_image, train_image, test_image, test_image),
            default_parameters, {
                'num_layers': [2],
                'layer': [500],
                'clayer': [16],
                'dropout': [0.4],
                'batch_size': [4000],
                'full_epoch': [1000],
                'activation': ['relu'],
                'epoch': [30],
                'lr': [0.001],
            })
        cae.save()
        train_in2, test_in2 = cae.encode(train_image), cae.encode(test_image)
        discriminator, _, _ = grid_search(
            curry(
                nn_task,
                default_networks['PUDiscriminator'],
                sae.local("_sd3/"),
                train_in2,
                train_out,
                test_in2,
                test_out,
            ), default_parameters, {
                'num_layers': [1],
                'layer': [50],
                'clayer': [16],
                'dropout': [0.8],
                'batch_size': [1000],
                'full_epoch': [1000],
                'activation': ['relu'],
                'epoch': [3000],
                'lr': [0.0001],
            })
    if method == "direct":
        # learn directly from the latent encoding
        discriminator, _, _ = grid_search(
            curry(
                nn_task,
                default_networks['PUDiscriminator'],
                sae.local("_sd3/"),
                train_in,
                train_out,
                test_in,
                test_out,
            ),
            default_parameters,
            {
                'layer': [300],  # [400,4000],
                'dropout': [0.1],  #[0.1,0.4],
                'num_layers': [2],
                'batch_size': [1000],
                'full_epoch': [1000],
                'activation': ['tanh'],
                # quick eval
                'epoch': [200],
                'lr': [0.0001],
            })
    if method == "image":
        # learn directly from the image
        train_image, test_image = sae.decode_binary(
            train_in), sae.decode_binary(test_in)
        discriminator, _, _ = grid_search(
            curry(
                nn_task,
                default_networks['PUDiscriminator'],
                sae.local("_sd3/"),
                train_image,
                train_out,
                test_image,
                test_out,
            ),
            default_parameters,
            {
                'layer': [300],  # [400,4000],
                'dropout': [0.1],  #[0.1,0.4],
                'num_layers': [2],
                'batch_size': [1000],
                'full_epoch': [1000],
                'activation': ['tanh'],
                # quick eval
                'epoch': [200],
                'lr': [0.0001],
            })
    discriminator.parameters["method"] = method
    discriminator.save()