def reload(self): """ Load components values from disk. """ for name, param in self.components.items(): param_path = os.path.join(self.model_path, "%s.mat" % name) param_values = scipy.io.loadmat(param_path) if hasattr(param, 'params'): for p in param.params: set_values(p.name, p, param_values[p.name]) else: set_values(name, param, param_values[name])
def reload(self): """ Load components values from disk. """ print 'loading from ' + self.load_path for name, param in self.components.items(): # if 'final_layer' in name or 'trans' in name or 'word_lstm' in name: # continue param_path = os.path.join(self.load_path, "%s.mat" % name) param_values = scipy.io.loadmat(param_path) if hasattr(param, 'params'): for p in param.params: set_values(p.name, p, param_values[p.name]) else: set_values(name, param, param_values[name])
def fit(self, X, y, a, values=None): ''' Fit the bandit on contextual data. Parameters: X (array): An m x n array of contexts y (array): A 1 x m array of binary payoffs a (array): A 1 x m array indicating the arm chosen for each context in X Updates: self.A_inv (dict): A dict of covariance matrices for each arm self.theta (dict): A dict of parameter vectors for each arm self.arms (array): An array of arm indices ''' X = np.matrix(X) y = np.matrix(y) self.arms = np.unique(a) self.values = utils.set_values(values, len(self.arms)) for arm in self.arms: ix = (a == arm).flatten() try: self.A_inv[arm], self.theta[arm] = utils.ridge_reg( X[ix, :], y[ix]) except Exception, e: self.log.error('Arm %d is broken: %s.' % (arm, e)) raise SystemExit(1)
def fit(self, successes, trials, n_samples=1000, baseline=0.0, values=None, smoothing=1.0): ''' Generate the weights for each arm based on bandit history. Parameters: successes (array): A 1 x n array with total successes for each arm trials (array): A 1 x n array with total trials for each arm n_samples (int): The number of samples to pull from each arm's distribution for Thompson Sampling. baseline (float): The minimum weight to give each ar values (array): A 1 x n array with the reward value for each arm, or None smoothing (float): The constant factor by which to divide all trials and successes Updates self.weights (array): A 1 x n array with normalized weights for each arm ''' self.values = utils.set_values(values, len(trials)) self.samples = utils.get_samples(trials, successes, n_samples, smoothing, self.values) self._raw_weights = utils.get_weights(self.samples) self.weights = utils.normalize_weights(self._raw_weights, baseline)
def reload(self): """ Load components values from disk. """ for name, param in self.components.items(): param_path = os.path.join(self.model_path, "%s.mat" % name) #load word layer during build from pretrained embeddings file. if name=='word_layer': continue else: param_values = scipy.io.loadmat(param_path) if hasattr(param, 'params'): for p in param.params: set_values(p.name, p, param_values[p.name]) else: set_values(name, param, param_values[name])
def fit(self, X, y, a, values=None): ''' Fit the bandit on contextual data. Parameters: X (array): An m x n array of contexts y (array): A 1 x m array of binary payoffs a (array): A 1 x m array indicating the arm chosen for each context in X Updates: self.A_inv (dict): A dict of covariance matrices for each arm self.theta (dict): A dict of parameter vectors for each arm self.arms (array): An array of arm indices ''' X = np.matrix(X) y = np.matrix(y) self.arms = np.unique(a) self.values = utils.set_values(values, len(self.arms)) for arm in self.arms: ix = (a == arm).flatten() try: self.A_inv[arm], self.theta[arm] = utils.ridge_reg(X[ix, :], y[ix]) except Exception, e: self.log.error('Arm %d is broken: %s.' % (arm, e)) raise SystemExit(1)
def reload(self): """ Load components values from disk. """ for name, param in self.components.items(): if name == "word_layer" and self.parameters['pre_emb']!='': new_weights = self.components['word_layer'].embeddings.get_value() # load the trained matrix and mappings print "Getting training matrix and dictionary %s..." % os.path.abspath(self.model_path) trainedM = scipy.io.loadmat(os.path.join(os.path.abspath(self.model_path),"word_layer.mat"))['word_layer__embeddings'] with open(os.path.join(os.path.abspath(self.model_path),"mappings.pkl")) as voc_file: trainedV = cPickle.load(voc_file)['id_to_word'] print "Excerpt from trainedV is...",trainedV.items()[:5] trainedV = dict([(w,i) for i,w in trainedV.iteritems()]) # load the external matrix and mappings print "Getting external matrix and dictionary %s..." % os.path.abspath(self.parameters['pre_emb']) extM = np.load(os.path.abspath(self.parameters['pre_emb'])) extV = dict([(w,i) for i,w in enumerate(np.load(self.parameters['pre_voc']))]) # create a pretrained dictionary containing a mixture of the two matrices pretrained = {} for w in trainedV: pretrained[w.lower()] = np.array( [float(x) for x in trainedM[trainedV[w]]]).astype(np.float32) for w in extV: if w not in trainedV: pretrained[w.lower()] = np.array( [float(x) for x in extM[extV[w]]]).astype(np.float32) # Lookup table initialization for i in xrange(len(self.id_to_word)): word = self.id_to_word[i] if word in pretrained: new_weights[i] = pretrained[word] #c_found += 1 elif word.lower() in pretrained: new_weights[i] = pretrained[word.lower()] #c_lower += 1 self.components['word_layer'].embeddings.set_value(new_weights) else: param_path = os.path.join(self.model_path, "%s.mat" % name) param_values = scipy.io.loadmat(param_path) if hasattr(param, 'params'): for p in param.params: set_values(p.name, p, param_values[p.name]) else: set_values(name, param, param_values[name])
def reload(self): """ Load components values from disk. """ ignore_shape = False for name, param in self.components.items(): param_path = os.path.join(self.model_path, "%s.mat" % name) if not os.path.exists(param_path): print "param :", param, "is not saved before to be loaded. " continue param_values = scipy.io.loadmat(param_path) if 'word_layer' in name: ignore_shape = True if hasattr(param, 'params'): for p in param.params: set_values(p.name, p, param_values[p.name], ignore_size=ignore_shape) else: set_values(name, param, param_values[name], ignore_size=ignore_shape)
def reload(self, features=None): #{{{ """ Load components values from disk. """ featureLayerNameMap = [ 'pos_layer', 'lemma_layer', 'chunk_layer', 'dic_layer' ] for name, param in self.components.items(): #when feature is use to attended and not lstm-input, #we will not reload the param if features is not None and name in featureLayerNameMap: featuresName = name[:name.find('_')] if features[featuresName]['attended']==1 and \ features[featuresName]['lstm-input']==0: continue param_path = os.path.join(self.model_path, "%s.mat" % name) param_values = scipy.io.loadmat(param_path) if hasattr(param, 'params'): for p in param.params: set_values(p.name, p, param_values[p.name]) else: set_values(name, param, param_values[name])