res_size = 0 if bidirectional == "True": total_res_size = res_size * 2 else: total_res_size = res_size a = float(args.leak_rate) # leaking rate res_sparsity = float(args.res_sparsity) only_open_class = args.only_open_class training_iterations = int(args.training_iterations) save_path = args.save_path embeddings = load_embeddings(embeddings_model) # load the embeddings f_sensekey2synset = cPickle.load(open( sensekey2synset, "rb")) # get the mapping between synset keys and IDs train_data, known_lemmas = read_data( train_data_path, f_sensekey2synset, only_open_class) # read the training data lemma2synset = get_lemma2syn(dictionary) lemma2id, synset2id = get_lemma_synset_maps(lemma2synset, known_lemmas) if softmax == "True": output_size = len(synset2id) else: output_size = embeddings_size input_size = embeddings_size * window_size # generate the ESN reservoir name_add = str(res_size) + '_' + str(res_sparsity) + '_' + str(a) random.seed(42) Wout = (random.rand(output_size, total_res_size + input_size) - 0.5) * 1.0 if softmax == "True":
import os import matplotlib import matplotlib.pyplot as plt import numpy as np import cv2 import datetime from sklearn.model_selection import train_test_split from sklearn import preprocessing import pdb from preprocess_data import read_data ''' !!!the KTH dataset may have 599 files,I add one duplicate video in the folder having 99 images ''' X_tr = read_data() X_train = np.array(X_tr) #convert the frames read into array num_samples = len(X_train) #600 label = np.ones((num_samples, ), dtype=int) #each class has 100 videos label[0:100] = 0 label[100:200] = 1 label[200:300] = 2 label[300:400] = 3 label[400:500] = 4 label[500:] = 5 y_train = label #600 num_samples img_rows, img_cols, img_depth = 32, 32, 15
bidirectional = args.bidirectional if use_reservoirs == "True": res_size = int(args.res_size) else: res_size = 0 if bidirectional == "True": total_res_size = res_size * 2 else: total_res_size = res_size only_open_class = args.only_open_class save_path = args.save_path embeddings = load_embeddings(embeddings_model) # load the embeddings f_sensekey2synset = cPickle.load(open( sensekey2synset, "rb")) # get the mapping between synset keys and IDs test_data = read_data(test_data_path, f_sensekey2synset, only_open_class) inSize = embeddings_size * window_size outSize = embeddings_size with open(save_path, "rb") as input_file: _ = cPickle.load(input_file) resSparsity = cPickle.load(input_file) a = cPickle.load(input_file) Wout = cPickle.load(input_file) if use_reservoirs == "True": Win_fw = cPickle.load(input_file) Win_bw = cPickle.load(input_file) W_fw = cPickle.load(input_file) W_bw = cPickle.load(input_file) G_fw = cPickle.load(input_file)
if bidirectional == "True": total_res_size = res_size * 2 else: total_res_size = res_size only_open_class = args.only_open_class save_path = args.save_path f_syn2gloss = args.syn2gloss if f_syn2gloss != "None": syn2gloss = pickle.load(open(f_syn2gloss, "rb")) else: syn2gloss = None error_log = args.error_log embeddings = load_embeddings(embeddings_model) # load the embeddings f_sensekey2synset = cPickle.load(open(sensekey2synset, "rb")) # get the mapping between synset keys and IDs test_data, known_lemmas = read_data(test_data_path, f_sensekey2synset, only_open_class) input_size = embeddings_size * window_size output_size = embeddings_size pickled_files = "" for f in os.listdir(save_path): if f.endswith(".cpickle"): pickled_files = f synset2id = None with open(os.path.join(save_path, pickled_files), "rb") as input_file: _ = cPickle.load(input_file) resSparsity = cPickle.load(input_file) a = cPickle.load(input_file) if softmax == "True":