import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D

from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression

def warn(*args, **kwargs):
    pass
import warnings
warnings.warn = warn


dataset_training = 'datasets/arithmetic/1e2/training.txt'
dataset_testing = 'datasets/arithmetic/1e2/testing.txt'

dataset = Dataset(dataset_training,dataset_testing)

num_layers = 1
hidden_size = 100
num_epochs = 5
input_size = dataset.vector_size
PATH = 'models/arithmetic_l_'+str(num_layers)+'_h_'+str(hidden_size)+'_ep_'+str(num_epochs)

model = GatedGRU(dataset.vector_size,hidden_size,output_size=1)
model.load_state_dict(torch.load(PATH))
model.eval()
#0 - sign, 1 - sign/hundreds, 2 - sign/tens, 3 - ones, 4-plus,
#5 - sign, 6 - sign/hundreds, 7 - sign/tens, 8 - ones, 9-equals
temporal_decoding = {0:[],1:[],2:[],3:[],4:[],
                     5:[],6:[],7:[],8:[],9:[]}
temporal_labels_first_num = {0:[],1:[],2:[],3:[],4:[],
Beispiel #2
0
import random

from tqdm import tqdm

import matplotlib.pyplot as plt
from matplotlib.colors import Normalize

from sklearn.decomposition import PCA
from textwrap import wrap

from tqdm import tqdm

dataset_training = 'datasets/arithmetic/fixed_L4_1e3/training.txt'
dataset_testing = 'datasets/arithmetic/fixed_L4_1e3/testing.txt'

dataset = Dataset(dataset_training, dataset_testing)

hidden_sizes = [100]

losses = []
for hidden in hidden_sizes:
    print('Testing:', hidden)
    total_loss = 0

    num_layers = 1
    hidden_size = hidden
    num_epochs = 249
    input_size = dataset.vector_size
    PATH = 'models/arithmetic_L4_1e3_fixed_l_' + str(num_layers) + '_h_' + str(
        hidden_size) + '_ep_' + str(num_epochs)
parser.add_argument('--training_set',
                    type=str,
                    default='datasets/arithmetic/fixed_1e2/training.txt')
parser.add_argument('--testing_set',
                    type=str,
                    default='datasets/arithmetic/fixed_1e2/testing.txt')
parser.add_argument('--model_prefix', type=str, default='arithmetic_1e2_fixed')
parser.add_argument('--dataset_type',
                    type=str,
                    default='normal',
                    help='{normal | polish}')

args = parser.parse_args()

if args.dataset_type == 'normal':
    dataset = NormalDataset(args.training_set, args.testing_set)
elif args.dataset_type == 'polish':
    dataset = PolishDataset(args.training_set, args.testing_set)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
    print('CUDA AVAILABLE')

input_size = dataset.vector_size
PATH = args.model_prefix + '_l_' + str(args.num_layers) + '_h_' + str(
    args.hidden_size) + '_ep_' + str(args.num_epochs)

model = GatedGRU(input_size, args.hidden_size, output_size=1)
model.to(device)

criterion = nn.MSELoss()

def function_scaling(x):
    return x
    # if x < 0:
    #     return -np.log(-x+1)
    # else:
    #     return np.log(x+1)


dataset_training = 'datasets/arithmetic/fixed_1e2/training.txt'
dataset_testing = 'datasets/arithmetic/fixed_1e2/testing.txt'

decoder_training_percent = 0.9

dataset = Dataset(dataset_training, dataset_testing)

num_layers = 1
hidden_size = 100
num_epochs = 449
input_size = dataset.vector_size
PATH = 'models/arithmetic_1e2_fixed_l_' + str(num_layers) + '_h_' + str(
    hidden_size) + '_ep_' + str(num_epochs)

regression_trials = 1

model = GatedGRU(dataset.vector_size, hidden_size, output_size=1)
model.load_state_dict(torch.load(PATH))
model.eval()

temporal_hidden = {
from tqdm import tqdm

import matplotlib.pyplot as plt
from matplotlib.colors import Normalize

from sklearn.decomposition import PCA
from sklearn.linear_model import RidgeClassifier

from textwrap import wrap

from tqdm import tqdm

dataset_training = 'datasets/arithmetic/1e2/training.txt'
dataset_testing = 'datasets/arithmetic/1e2/testing.txt'

dataset = Dataset(dataset_training,dataset_testing)

num_layers = 1
hidden_size = 100
num_epochs = 5
input_size = dataset.vector_size
PATH = 'models/arithmetic_l_'+str(num_layers)+'_h_'+str(hidden_size)+'_ep_'+str(num_epochs)

model = GatedGRU(dataset.vector_size,hidden_size,output_size=1)
model.load_state_dict(torch.load(PATH))
model.eval()

wiu = model.W_iu.detach()
whu = model.W_hu.detach()
bu = model.b_u.detach()