Пример #1
0
def main():

    # Load Experiment Specifications
    precursor = sys.argv[1]
    ID = int(sys.argv[2])
    factor = int(sys.argv[3])

    ID += factor * 1000

    opt = inst.gen_tune_exp(precursor)[ID]
    opt.reinitialize_paths()

    print(opt)

    #TRAIN
    tf.reset_default_graph()
    if opt.mode == "train" or opt.mode == 'both':
        train_model = model.Autoencoder(opt)
        with tf.Session() as sess:
            train_model.train(sess)

    #TEST
    tf.reset_default_graph()
    if opt.mode == "test" or opt.mode == 'both':
        test_model = model.Autoencoder(opt)
        with tf.Session() as sess:
            test_model.tester(sess)
Пример #2
0
 def __init__(self, model_name="model.pt", data=None):
     if model_name is None:
         import model
         self.enc = model.Autoencoder()
     else:
         self.enc = torch.load(os.path.join(dir_path, model_name),
                               map_location='cpu')
     self.enc.eval()
def main():
    m = model.Autoencoder()
    m.load_state_dict(torch.load("autoencoder_save.pth"))

    decoder = m.decoder.eval()
    for i in range(100):  # 100 random samples
        rnd_latent_vector = torch.rand(1, 64, 1, 1) * 2 - 1
        output = decoder(rnd_latent_vector)
        image = ld.resize_from_3x32x32_to_32x32x3(
            np.squeeze(output.data.numpy(), 0))
        image = (image * 255).astype(np.uint8)
        ld.save_img(image, "generated data/cifar10_generated_image" + str(i))
Пример #4
0
def train_and_evaluate(args):
    Xgbmodel = models.Xgb(args)
    AEmodel = models.Autoencoder(args)
    bst, train_seen_bidids = train(args, Xgbmodel, AEmodel)
    auc, valid_seen_bidids = evaluate(args, Xgbmodel, AEmodel, bst,
                                      train_seen_bidids)
    print('Validation AUC: {:.5f}'.format(auc))
    f = open(args.filepath, 'a')
    f.write('Validation AUC: %.5f\n' % (auc))
    f.close()
    if not args.cv:
        auc, _ = evaluate(args, Xgbmodel, AEmodel, bst, train_seen_bidids,
                          valid_seen_bidids, False)
        print('Test AUC: {:.5f}'.format(auc))
        f = open(args.filepath, 'a')
        f.write('Test AUC: %.5f\n' % (auc))
        f.close()
the games of the masters from the website "https://www.pgnmentor.com/players/"
"""


def get_move(line):
    line = str(line)
    game = []  # list of moves in pgn notation
    for move in re.findall(r"[^{\[.}\]]+ ",
                           line.replace("?", "").replace(
                               "!", "")):  # Extract all moves without comments
        game.extend(move.strip().split(
            " "))  # Sometimes one string contains two moves with space between
    return game


coder = model.Autoencoder(settings.BOARD_SHAPE,
                          settings.LATENT_SIZE).to(settings.DEVICE)
coder.load_state_dict(
    torch.load(settings.CODER_PATH, map_location=settings.DEVICE))
coder = coder.coder
coder.eval()
inf = Inference(settings.DEVICE, coder)
csv_name = "positions_lite.csv"
ID = 0

with open(csv_name, "w", newline="") as file:
    writer = csv.writer(file, delimiter=";")
    writer.writerow(["ID", "Author", "Number", "Move", "Embeding"])
    games_csv = open(os.path.join(os.getcwd(), 'games_lite.csv'))
    for row in games_csv:
        try:
            data = row[:-2].split(";")
Пример #6
0
# Normalize in [0, 1]

r = df['rating'].values.astype(float)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(r.reshape(-1, 1))
df_normalized = pd.DataFrame(x_scaled)
df['rating'] = df_normalized

# Convert DataFrame in user-item matrix

matrix = df.pivot(index='user_id', columns='item_id', values='rating')
matrix.fillna(0, inplace=True)

# Users and items ordered as they are in matrix

users = matrix.index.tolist()
items = matrix.columns.tolist()

matrix = matrix.values

print("Matrix shape: {}".format(matrix.shape))

# num_users = matrix.shape[0]
# num_items = matrix.shape[1]
# print("USERS: {} ITEMS: {}".format(num_users, num_items))

#%% Define and train model
mymodel = model.Autoencoder(input_size=len(items), hidden_layer_size=100)
mymodel.fit(X=matrix, epochs=200)
Пример #7
0
#!/usr/bin/env python3
import numpy as np
import sys, os
import pickle, gzip
import pandas as pd
import gc, gzip
import model
import torch

enc = model.Autoencoder()
enc.eval()


def encode(x):
    x = torch.from_numpy(x)
    emb = enc.autoencode_1.encode(x[None, None, :])[0, :, 0].detach().numpy()
    return emb


test_labels = pd.read_csv("test_labels.csv.zip")
test_labels.set_index("id", inplace=True)

frame_length = 2**11 + 1
filename = ""
labels = None
results = []

#f= open("test_emb.csv","w+")
f = gzip.open('test_emb_enc.csv.gz', 'wt')
f.write("sample, segment, frame," + ",".join(map(str, range(frame_length))) +
        " \n")
Пример #8
0
import model
import mxnet as mx
#implementation

#dataset = MNIST or FashionMNIST
result = model.Autoencoder(epoch=1,
                           batch_size=128,
                           save_period=100,
                           load_period=100,
                           weight_decay=0.0001,
                           learning_rate=0.001,
                           dataset="MNIST",
                           ctx=mx.gpu(0))
print("///" + result + "///")
Пример #9
0
import model
import mxnet as mx
#implementation

#dataset = MNIST or FashionMNIST
result = model.Autoencoder(epoch=0,
                           batch_size=128,
                           save_period=100,
                           load_period=100,
                           optimizer="adam",
                           learning_rate=0.001,
                           dataset="FashionMNIST",
                           ctx=mx.gpu(0))
print("///" + result + "///")
Пример #10
0
'''
Import
'''

import idx2numpy as idx
import numpy as np

import scipy.misc
import img_dataset as ig
import tensorflow as tf
import model as md

print('beginning training')
minst = ig.ImgDataset("train-images.idx3-ubyte")

ds = md.Autoencoder()

ds.train()

print('training complete')