v_xs = MinMaxScaler().fit_transform(v_xs) a_dim = len(a_xs[0]) v_dim = len(v_xs[0]) som_a = SOM(5, 5, a_dim, checkpoint_dir=soma_path, n_iterations=100, batch_size=4) som_v = SOM(5, 5, v_dim, checkpoint_dir=somv_path, n_iterations=100, batch_size=4) som_a.train(a_xs) som_v.train(v_xs) som_a.memorize_examples_by_class(a_xs, a_ys) som_v.memorize_examples_by_class(v_xs, v_ys) hebbian_model = HebbianModel(som_a, som_v, a_dim=a_dim, v_dim=v_dim, n_presentations=1, learning_rate=1, n_classes=n_classes, checkpoint_dir=hebbian_path) print('Training...') hebbian_model.train(a_xs, v_xs) print('Evaluating...') accuracy = hebbian_model.evaluate(a_xs,
n_iterations=100, tau=0.1, threshold=0.6, batch_size=1) type_file = 'visual_' + str(i + 1) som_v = SOM(20, 30, v_dim, alpha=0.7, sigma=15, n_iterations=100, threshold=0.6, batch_size=1, data=type_file) som_a.train(a_xs_train, input_classes=a_ys_train, test_vects=a_xs_test, test_classes=a_ys_test) som_v.train(v_xs_train, input_classes=v_ys_train, test_vects=v_xs_test, test_classes=v_ys_test) # showSom(som_v, v_xs, v_ys, 1, 'Visual map') hebbian_mmodel = HebbianModel(som_a, som_v, a_dim, v_dim, n_presentations=10) hebbian_mmodel.train(a_xs_train, v_xs_train) accuracy = hebbian_mmodel.evaluate(a_xs_train, v_xs_train, a_ys_train,
from utils.utils import load_data from utils.utils import from_csv_with_filenames from utils.constants import Constants from sklearn.externals import joblib import os import logging import numpy as np csv_path = os.path.join(Constants.DATA_FOLDER, '10classes', 'audio_data.csv') LOAD = True if __name__ == '__main__': logging.info('Loading data') xs, ys, filenames = from_csv_with_filenames(csv_path) ys = [int(y) - 1000 for y in ys] vect_size = len(xs[0]) audio_som = SOM(20, 30, vect_size, n_iterations=100, checkpoint_dir=os.path.join(Constants.DATA_FOLDER, '10classes', 'audio_model', '')) if not LOAD: audio_som.train(xs) else: logging.info('Training som') audio_som.restore_trained() #audio_som.plot_som(xs, ys, plot_name='audio_som.png') showSom(audio_som, xs, ys, 1, 'Audio Map', filenames=filenames)
acc = [] for i in range(20): n_classes = 4 dataset = OneHotDataset(n_classes) a_xs = dataset.x a_ys = dataset.y v_xs = dataset.x v_ys = dataset.y # scale audio data to 0-1 range a_xs = MinMaxScaler().fit_transform(a_xs) v_xs = MinMaxScaler().fit_transform(v_xs) a_dim = len(a_xs[0]) v_dim = len(v_xs[0]) som_a = SOM(5, 5, a_dim, n_iterations=100, batch_size=4) som_v = SOM(5, 5, v_dim, n_iterations=100, batch_size=4) som_a.train(a_xs, input_classes=v_ys) som_v.train(v_xs, input_classes=v_xs) som_a.memorize_examples_by_class(a_xs, a_ys) som_v.memorize_examples_by_class(v_xs, v_ys) hebbian_model = HebbianModel(som_a, som_v, a_dim=a_dim, v_dim=v_dim, n_presentations=1, learning_rate=1, n_classes=n_classes, checkpoint_dir=hebbian_path) print('Training...') hebbian_model.train(a_xs, v_xs) print('Evaluating...') accuracy = hebbian_model.evaluate(a_xs, v_xs, a_ys, v_ys, source='a', prediction_alg='regular') hebbian_model.make_plot(a_xs[0], v_xs[0], v_ys[0], v_xs, source='a') acc.append(accuracy) print('n={}, accuracy={}'.format(1, accuracy)) som_v.plot_som(a_xs, a_ys) print(sum(acc)/len(acc))
if args.data == 'audio': xs, ys, _ = from_csv_with_filenames(audio_data_path) elif args.data == 'video': xs, ys = from_csv_visual_100classes(visual_data_path) else: raise ValueError('--data argument not recognized') dim = len(xs[0]) som = SOM(args.neurons1, args.neurons2, dim, n_iterations=args.epochs, alpha=args.alpha, tau=0.1, threshold=0.6, batch_size=args.batch, data=args.data, sigma=args.sigma, num_classes=args.classes, sigma_decay='constant') ys = np.array(ys) xs = np.array(xs) if args.subsample: xs, _, ys, _ = train_test_split(xs, ys, test_size=0.6, stratify=ys, random_state=args.seed) print('Training on {} examples.'.format(len(xs))) xs_train, xs_test, ys_train, ys_test = train_test_split(xs, ys, test_size=0.2, stratify=ys, random_state=args.seed) xs_train, xs_val, ys_train, ys_val = train_test_split(xs_train, ys_train, test_size=0.5, stratify=ys_train, random_state=args.seed) xs_train, xs_test = transform_data(xs_train, xs_val, rotation=args.rotation) som.train(xs_train, input_classes=ys_train, test_vects=xs_val, test_classes=ys_val, logging=args.logging)
nameInputs = list() with open(fInput, 'r') as inp: i = 0 for line in inp: if len(line) > 2: inputs[i] = (np.array(line.split(',')[1:])).astype(np.float) nameInputs.append((line.split(',')[0]).split('/')[6]) i = i + 1 print(nameInputs[0]) #get the 20x30 SOM or train a new one (if the folder does not contain the model) som = SOM(20, 30, lenExample, checkpoint_dir=os.path.join(Constants.DATA_FOLDER, 'VisualModel10classes/'), n_iterations=20, sigma=4.0) loaded = som.restore_trained() if not loaded: logging.info('Training SOM') som.train(inputs) for k in range(len(nameInputs)): nameInputs[k] = nameInputs[k].split('_')[0] #shows the SOM showSom(som, inputs, nameInputs, 1, 'Visual map')