def _build_hidden_layers(self, input, add_cost, Y, updates, external_grad=None): lin_output = tensor.dot(input, self.W_theano.T)+self.b_theano[None,:] if self.activation=='tanh': output = tensor.tanh(lin_output) elif self.activation=='softplus': output = tensor.nnet.softplus(lin_output) elif self.activation is None: output = lin_output else: raise 'Unsupported activation function!' if self.regularization == 'L1': add_cost = add_cost -self.reg_weight*tensor.abs(self.W_theano).sum() elif self.regularization == 'L2': add_cost = add_cost -self.reg_weight*(self.W_theano**2).sum() # Compute the cost function if self.layer_forward is None: if external_grad is None: cost = -((output-Y)**2).sum()/self.sigma2_theano[0]+add_cost else: cost = (external_grad*output).sum() Y_out = output else: cost, Y_out = self.layer_forward._build_hidden_layers(output, add_cost, Y, updates, external_grad=external_grad) # Update parameter gradients W_grad = tensor.grad(cost, self.W_theano) b_grad = tensor.grad(cost, self.b_theano) updates.extend([(self.W_grad_theano,self.W_grad_theano+W_grad), (self.b_grad_theano,self.b_grad_theano+b_grad)]) return cost, Y_out
def penalty(x): ''' Sum of magnitudes of differences between elements of x and 'val' sqrt((x-1)**2) ''' return T.sum(T.abs(x - val * T.ones_like(x)))
def add_params_to_self(self, args, layer): # W for weights with corresponding hyperparamters, b for weights without hyperparameters # Want to params_lambda and params_weight to be ordered the same (corresponding [(weights, hyperparameters),...]) # otherwise Hessian-vector products will not be properly aligned if layer.W is not None: #regularized weights self.params_weight += [layer.W] if layer.b is not None: self.params_theta += [layer.W, layer.b] else: self.params_theta += [layer.W] # define new regularization term for a layer if layer.L2 is not None: #tempL2 = layer.L2 * T.sqr(layer.W) tempL2 = (10.**layer.L2) * T.sqr( layer.W) #regularization on logarithmic scale #print("regularization", layer.name, tempL2, tempL2.type) self.penalty += T.sum(tempL2) self.params_lambda += [layer.L2] if layer.L1 is not None: tempL1 = (10.**layer.L1) * T.abs( layer.W) #Michael: use 10.**regularization constants self.penalty += T.sum(tempL1) self.params_lambda += [layer.L1] #if layer.initScale is not None: #self.params_lambda += [layer.initScale] elif layer.b is not None: #all unregularized weights self.params_theta += [layer.b]
def my_activation(input): d = 5 input = input * T.power(10, d) input = T.round(input) x = input / T.power(10, d) abs_x = T.abs(x) return x / (1. + abs_x)
def __init__(self, x, y, args): self.params_theta = [] self.params_lambda = [] self.params_weight = [] if args.dataset == 'mnist': input_size = (None, 28 * 28) elif args.dataset == 'cifar10': input_size = (None, 3, 32 * 32) else: raise AssertionError layers = [ll.InputLayer(input_size)] penalty = theano.shared(np.array(0.)) for (k, num) in enumerate(args.MLPlayer): # the last layer should use softmax if k == len(args.MLPlayer) - 1: # layers.append(ll.DenseLayer(layers[-1], num, nonlinearity=nonlinearities.softmax)) layers.append( DenseLayerWithReg(args, layers[-1], num_units=num, nonlinearity=nonlinearities.softmax)) else: # layers.append(ll.DenseLayer(layers[-1], num)) layers.append( DenseLayerWithReg(args, layers[-1], num_units=num)) if layers[-1].W is not None: self.params_theta += [layers[-1].W, layers[-1].b] self.params_weight += [layers[-1].W] # define new regularization term for a layer if args.regL2 is True: tempL2 = layers[-1].L2 * T.sqr( layers[-1].W ) #Michael: use 10**regularization constants penalty += T.sum(tempL2) self.params_lambda += [layers[-1].L2] if args.regL1 is True: tempL1 = layers[-1].L1 * T.abs( layers[-1].W ) #Michael: use 10**regularization constants penalty += T.sum(tempL1) self.params_lambda += [layers[-1].L1] self.layers = layers self.y = ll.get_output(layers[-1], x, deterministic=False) self.prediction = T.argmax(self.y, axis=1) self.penalty = penalty # self.penalty = penalty if penalty != 0. else T.constant(0.) print(self.params_lambda) # time.sleep(20) # cost function self.loss = T.mean(categorical_crossentropy(self.y, y)) self.lossWithPenalty = T.add(self.loss, self.penalty) print("loss and losswithpenalty", type(self.loss), type(self.lossWithPenalty))
def absolute_loss(predictions, targets): """Computes the element-wise absolute difference between two tensors. L(p, t) = abs(p - t) :param predictions: Theano tensor Predictions. :param targets: Theano tensor Targets. :return Theano tensor An expression for the element-wise absolute difference. """ return T.abs(predictions - targets)
def linear_loss(mx, Sx, target, Q, absolute=True, *args, **kwargs): ''' Linear penalty function c(x) = Q.dot(|x-target|) ''' if Sx is None: # deterministic case if mx.ndim == 1: mx = mx[None, :] delta = mx - target if absolute: delta = tt.abs(delta) cost = (delta).dot(Q) return cost else: # stochastic case (moment matching) delta = mx - target if absolute: delta = tt.abs(delta) SxQ = Sx.dot(Q) m_cost = Q.T.dot(delta) s_cost = Q.T.dot(SxQ) return m_cost, s_cost
def add_params_to_self(self, args, layer): if layer.W is not None: self.params_theta += [layer.W, layer.b] # Michael: weights and biases? self.params_weight += [layer.W] # Michael: weights but not biases? # define new regularization term for a layer if args.regL2 is True: #tempL2 = layer.L2 * T.sqr(layer.W) tempL2 = (10.**layer.L2) * T.sqr( layer.W) #Michael: use 10.**regularization constants self.penalty += T.sum(tempL2) self.params_lambda += [layer.L2] if args.regL1 is True: #tempL1 = layer.L1 * layer.W tempL1 = (10.**layer.L1) * T.abs( layer.W) #Michael: use 10.**regularization constants self.penalty += T.sum(tempL1) self.params_lambda += [layer.L1]
def interpolate_bilinear(coords, inputs, dim, wrap=False): """ interpolate_bilinear - the default interpolation kernel to be used with the spatial transformer. Differential w.r.t. both the indices and the input tensors to be sampled. :param coords shape: (N, dim, width, height, ...) :param inputs shape: (N, width, height, .. n_chan) :param dim - dimensionality of the data, e.g. 2 if inputs is a batch of images :param wrap - whether to wrap, or otherwise clip during the interpolation :returns - the sampled result :shape (N, width, height, ..., n_chan), where width, height, ... come from the coords shape """ inputs_shape = K.shape(inputs) maxes = K.cast(inputs_shape[1:-1] - 1, "float32") coords_float = upscale(coords, maxes, dim) # floored coordinates, time to build the surrounding points based on them if K.backend() == "tensorflow": import tensorflow as tf coords = tf.floor(coords_float) else: import theano.tensor as T coords = T.floor(coords_float) # construct the surrounding 2^dim coord sets which will all be used for interpolation # (e.g. corresponding to the 4 points in 2D that surround the point to be interpolated, # or to the 8 points in 3D, etc ...) surround_coord_sets = [] surround_inputs = [] for i in range(2**dim): bits = bitfield(i) bits = [0] * (dim - len(bits)) + bits offsets = K.variable( np.array(bits), name="spatial_transform/bilinear_surround_offsets") offsets = K.reshape(offsets, shape=[1, -1] + [1] * dim) surround_coord_set = coords + offsets surround_coord_sets.append(surround_coord_set) # sample for each of the surrounding points before interpolating surround_input = sample(inputs, surround_coord_set, dim, wrapped=wrap) surround_inputs.append(surround_input) # Bilinear interpolation, this part of the kernel lets the gradients flow through the # coords as well as the inputs products = list() for coords_set, surround_input in zip(surround_coord_sets, surround_inputs): if K.backend() == "tensorflow": import tensorflow as tf # shape N, width, height, ... product = tf.reduce_prod(1 - tf.abs(coords_set - coords_float), axis=1) else: import theano.tensor as T product = T.prod(1 - T.abs(coords_set - coords_float), axis=1) # shape: (N, width, height, ..., n_channels) product = surround_input * K.expand_dims(product, -1) products.append(product) return sum(products)
def l1(params): return T.sum([T.sum(T.abs(p)) for p in params.values()])
def get_l1_weight_decay(self, coeff): if isinstance(coeff, str): coeff = float(coeff) assert isinstance(coeff, float) or hasattr(coeff, 'dtype') W, = self.transformer.get_params() return coeff * T.abs(W).sum()
def main(args): #theano.optimizer='fast_compile' #theano.config.exception_verbosity='high' trial = int(args['trial']) pkl_name = 'vrnn_gmm_%d' % trial channel_name = 'valid_nll_upper_bound' data_path = args['data_path'] save_path = args[ 'save_path'] #+'/gmm/'+datetime.datetime.now().strftime("%y-%m-%d_%H-%M") flgMSE = int(args['flgMSE']) period = int(args['period']) n_steps = int(args['n_steps']) stride_train = int(args['stride_train']) stride_test = n_steps # int(args['stride_test']) monitoring_freq = int(args['monitoring_freq']) epoch = int(args['epoch']) batch_size = int(args['batch_size']) x_dim = int(args['x_dim']) y_dim = int(args['y_dim']) flgAgg = int(args['flgAgg']) z_dim = int(args['z_dim']) rnn_dim = int(args['rnn_dim']) k = int(args['num_k']) #a mixture of K Gaussian functions lr = float(args['lr']) debug = int(args['debug']) num_sequences_per_batch = int(args['numSequences']) #based on appliance loadParam = args['loadAsKelly'] target_inclusion_prob = float(args['target_inclusion_prob']) loadAsKelly = True if (loadParam == 'N' or loadParam == 'n' or loadParam == 'no' or loadParam == 'NO' or loadParam == 'No'): loadAsKelly = False print "trial no. %d" % trial print "batch size %d" % batch_size print "learning rate %f" % lr print "saving pkl file '%s'" % pkl_name print "to the save path '%s'" % save_path q_z_dim = 100 #150 p_z_dim = 60 #150 p_x_dim = 20 #250 x2s_dim = 40 #250 z2s_dim = 40 #150 target_dim = k #x_dim #(x_dim-1)*k model = Model() Xtrain, ytrain, Xval, yval, reader = fetch_ukdale( data_path, windows, appliances, numApps=flgAgg, period=period, n_steps=n_steps, stride_train=stride_train, stride_test=stride_test, flgAggSumScaled=1, flgFilterZeros=1, isKelly=loadAsKelly, seq_per_batch=num_sequences_per_batch, target_inclusion_prob=target_inclusion_prob) instancesPlot = { 0: [4, 20], 2: [5, 10] } #for now use hard coded instancesPlot for kelly sampling if (not loadAsKelly): instancesPlot = reader.build_dict_instances_plot( listDates, batch_size, Xval.shape[0]) train_data = UKdale( name='train', prep='normalize', cond=True, # False #path=data_path, inputX=Xtrain, labels=ytrain) X_mean = train_data.X_mean X_std = train_data.X_std valid_data = UKdale( name='valid', prep='normalize', cond=True, # False #path=data_path, X_mean=X_mean, X_std=X_std, inputX=Xval, labels=yval) init_W = InitCell('rand') init_U = InitCell('ortho') init_b = InitCell('zeros') init_b_sig = InitCell('const', mean=0.6) x, mask, y, y_mask = train_data.theano_vars() x.name = 'x_original' if debug: x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32) temp = np.ones((15, batch_size), dtype=np.float32) temp[:, -2:] = 0. mask.tag.test_value = temp x_1 = FullyConnectedLayer(name='x_1', parent=['x_t'], parent_dim=[x_dim], nout=x2s_dim, unit='relu', init_W=init_W, init_b=init_b) z_1 = FullyConnectedLayer(name='z_1', parent=['z_t'], parent_dim=[z_dim], nout=z2s_dim, unit='relu', init_W=init_W, init_b=init_b) rnn = LSTM(name='rnn', parent=['x_1', 'z_1'], parent_dim=[x2s_dim, z2s_dim], nout=rnn_dim, unit='tanh', init_W=init_W, init_U=init_U, init_b=init_b) ''' dissag_pred = FullyConnectedLayer(name='disag_1', parent=['s_tm1'], parent_dim=[rnn_dim], nout=num_apps, unit='relu', init_W=init_W, init_b=init_b) ''' phi_1 = FullyConnectedLayer(name='phi_1', parent=['x_1', 's_tm1'], parent_dim=[x2s_dim, rnn_dim], nout=q_z_dim, unit='relu', init_W=init_W, init_b=init_b) phi_mu = FullyConnectedLayer(name='phi_mu', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='linear', init_W=init_W, init_b=init_b) phi_sig = FullyConnectedLayer(name='phi_sig', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) prior_1 = FullyConnectedLayer(name='prior_1', parent=['s_tm1'], parent_dim=[rnn_dim], nout=p_z_dim, unit='relu', init_W=init_W, init_b=init_b) prior_mu = FullyConnectedLayer(name='prior_mu', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='linear', init_W=init_W, init_b=init_b) prior_sig = FullyConnectedLayer(name='prior_sig', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) theta_1 = FullyConnectedLayer(name='theta_1', parent=['z_1', 's_tm1'], parent_dim=[z2s_dim, rnn_dim], nout=p_x_dim, unit='relu', init_W=init_W, init_b=init_b) theta_mu = FullyConnectedLayer(name='theta_mu', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit='linear', init_W=init_W, init_b=init_b) theta_sig = FullyConnectedLayer(name='theta_sig', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) coeff = FullyConnectedLayer(name='coeff', parent=['theta_1'], parent_dim=[p_x_dim], nout=k, unit='softmax', init_W=init_W, init_b=init_b) corr = FullyConnectedLayer(name='corr', parent=['theta_1'], parent_dim=[p_x_dim], nout=k, unit='tanh', init_W=init_W, init_b=init_b) binary = FullyConnectedLayer(name='binary', parent=['theta_1'], parent_dim=[p_x_dim], nout=1, unit='sigmoid', init_W=init_W, init_b=init_b) nodes = [ rnn, x_1, z_1, #dissag_pred, phi_1, phi_mu, phi_sig, prior_1, prior_mu, prior_sig, theta_1, theta_mu, theta_sig, coeff ] #, corr, binary params = OrderedDict() for node in nodes: if node.initialize() is not None: params.update(node.initialize()) params = init_tparams(params) s_0 = rnn.get_init_state(batch_size) x_1_temp = x_1.fprop([x], params) def inner_fn(x_t, s_tm1): phi_1_t = phi_1.fprop([x_t, s_tm1], params) phi_mu_t = phi_mu.fprop([phi_1_t], params) phi_sig_t = phi_sig.fprop([phi_1_t], params) prior_1_t = prior_1.fprop([s_tm1], params) prior_mu_t = prior_mu.fprop([prior_1_t], params) prior_sig_t = prior_sig.fprop([prior_1_t], params) z_t = Gaussian_sample(phi_mu_t, phi_sig_t) z_1_t = z_1.fprop([z_t], params) theta_1_t = theta_1.fprop([z_1_t, s_tm1], params) theta_mu_t = theta_mu.fprop([theta_1_t], params) theta_sig_t = theta_sig.fprop([theta_1_t], params) coeff_t = coeff.fprop([theta_1_t], params) #corr_t = corr.fprop([theta_1_t], params) #binary_t = binary.fprop([theta_1_t], params) pred = GMM_sample(theta_mu_t, theta_sig_t, coeff_t) #Gaussian_sample(theta_mu_t, theta_sig_t) s_t = rnn.fprop([[x_t, z_1_t], [s_tm1]], params) #y_pred = dissag_pred.fprop([s_t], params) return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, z_t, z_1_t, theta_1_t, theta_mu_t, theta_sig_t, coeff_t, pred #, y_pred #corr_temp, binary_temp ((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp,z_t_temp, z_1_temp, theta_1_temp, theta_mu_temp, theta_sig_temp, coeff_temp, prediction), updates) =\ theano.scan(fn=inner_fn, sequences=[x_1_temp], outputs_info=[s_0, None, None, None, None, None, None, None, None, None, None, None]) for k, v in updates.iteritems(): k.default_update = v s_temp = concatenate( [s_0[None, :, :], s_temp[:-1]], axis=0 ) # seems like this is for creating an additional dimension to s_0 ''' theta_1_temp = theta_1.fprop([z_1_temp, s_temp], params) theta_mu_temp = theta_mu.fprop([theta_1_temp], params) theta_sig_temp = theta_sig.fprop([theta_1_temp], params) coeff_temp = coeff.fprop([theta_1_temp], params) corr_temp = corr.fprop([theta_1_temp], params) binary_temp = binary.fprop([theta_1_temp], params) ''' s_temp.name = 'h_1' #gisse z_1_temp.name = 'z_1' #gisse z_t_temp.name = 'z' theta_mu_temp.name = 'theta_mu_temp' theta_sig_temp.name = 'theta_sig_temp' coeff_temp.name = 'coeff' #corr_temp.name = 'corr' #binary_temp.name = 'binary' if (flgAgg == -1): prediction.name = 'x_reconstructed' mse = T.mean((prediction - x)**2) # CHECK RESHAPE with an assertion mae = T.mean(T.abs(prediction - x)) mse.name = 'mse' pred_in = x.reshape((x_shape[0] * x_shape[1], -1)) else: prediction.name = 'pred_' + str(flgAgg) #[:,:,flgAgg].reshape((y.shape[0],y.shape[1],1) mse = T.mean( (prediction - y)**2) # As axis = None is calculated for all mae = T.mean(T.abs_(prediction - y)) mse.name = 'mse' mae.name = 'mae' pred_in = y.reshape((y.shape[0] * y.shape[1], -1)) kl_temp = KLGaussianGaussian(phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp) x_shape = x.shape theta_mu_in = theta_mu_temp.reshape((x_shape[0] * x_shape[1], -1)) theta_sig_in = theta_sig_temp.reshape((x_shape[0] * x_shape[1], -1)) coeff_in = coeff_temp.reshape((x_shape[0] * x_shape[1], -1)) #corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1)) #binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1)) recon = GMM( pred_in, theta_mu_in, theta_sig_in, coeff_in ) # BiGMM(x_in, theta_mu_in, theta_sig_in, coeff_in, corr_in, binary_in) recon = recon.reshape((x_shape[0], x_shape[1])) recon.name = 'gmm_out' #recon = recon * mask recon_term = recon.sum(axis=0).mean() recon_term.name = 'recon_term' #kl_temp = kl_temp * mask kl_term = kl_temp.sum(axis=0).mean() kl_term.name = 'kl_term' nll_upper_bound = recon_term + kl_term #+ mse if (flgMSE): nll_upper_bound = nll_upper_bound + mse nll_upper_bound.name = 'nll_upper_bound' max_x = x.max() mean_x = x.mean() min_x = x.min() max_x.name = 'max_x' mean_x.name = 'mean_x' min_x.name = 'min_x' max_theta_mu = theta_mu_in.max() mean_theta_mu = theta_mu_in.mean() min_theta_mu = theta_mu_in.min() max_theta_mu.name = 'max_theta_mu' mean_theta_mu.name = 'mean_theta_mu' min_theta_mu.name = 'min_theta_mu' max_theta_sig = theta_sig_in.max() mean_theta_sig = theta_sig_in.mean() min_theta_sig = theta_sig_in.min() max_theta_sig.name = 'max_theta_sig' mean_theta_sig.name = 'mean_theta_sig' min_theta_sig.name = 'min_theta_sig' coeff_max = coeff_in.max() coeff_min = coeff_in.min() coeff_mean_max = coeff_in.mean(axis=0).max() coeff_mean_min = coeff_in.mean(axis=0).min() coeff_max.name = 'coeff_max' coeff_min.name = 'coeff_min' coeff_mean_max.name = 'coeff_mean_max' coeff_mean_min.name = 'coeff_mean_min' max_phi_sig = phi_sig_temp.max() mean_phi_sig = phi_sig_temp.mean() min_phi_sig = phi_sig_temp.min() max_phi_sig.name = 'max_phi_sig' mean_phi_sig.name = 'mean_phi_sig' min_phi_sig.name = 'min_phi_sig' max_prior_sig = prior_sig_temp.max() mean_prior_sig = prior_sig_temp.mean() min_prior_sig = prior_sig_temp.min() max_prior_sig.name = 'max_prior_sig' mean_prior_sig.name = 'mean_prior_sig' min_prior_sig.name = 'min_prior_sig' model.inputs = [x, mask, y, y_mask] model.params = params model.nodes = nodes optimizer = Adam(lr=lr) extension = [ GradientClipping(batch_size=batch_size), EpochCount(epoch), Monitoring( freq=monitoring_freq, ddout=[ nll_upper_bound, recon_term, kl_term, mse, mae, theta_mu_temp, theta_sig_temp, z_t_temp, prediction, #corr_temp, binary_temp, s_temp, z_1_temp ], indexSep=5, indexDDoutPlot=[(0, theta_mu_temp), (2, z_t_temp), (3, prediction)], instancesPlot=instancesPlot, #{0:[4,20],2:[5,10]},#, 80,150 data=[Iterator(valid_data, batch_size)], savedFolder=save_path), Picklize(freq=monitoring_freq, path=save_path), EarlyStopping(freq=monitoring_freq, path=save_path, channel=channel_name), WeightNorm() ] lr_iterations = {0: lr, 15: (lr / 10), 70: (lr / 100)} mainloop = Training(name=pkl_name, data=Iterator(train_data, batch_size), model=model, optimizer=optimizer, cost=nll_upper_bound, outputs=[nll_upper_bound], extension=extension, lr_iterations=lr_iterations) #mainloop.run() fLog = open(save_path + '/output.csv', 'w') fLog.write("log,kl,nll_upper_bound,mse,mae\n") for i, item in enumerate(mainloop.trainlog.monitor['nll_upper_bound']): a = mainloop.trainlog.monitor['recon_term'][i] b = mainloop.trainlog.monitor['kl_term'][i] c = mainloop.trainlog.monitor['nll_upper_bound'][i] d = mainloop.trainlog.monitor['mse'][i] e = mainloop.trainlog.monitor['mae'][i] fLog.write("{},{},{},{},{}\n".format(a, b, c, d, e))
def main(args): #theano.optimizer='fast_compile' #theano.config.exception_verbosity='high' trial = int(args['trial']) pkl_name = 'dp_dis1-sch_%d' % trial channel_name = 'mae' data_path = args['data_path'] save_path = args[ 'save_path'] #+'/gmm/'+datetime.datetime.now().strftime("%y-%m-%d_%H-%M") flgMSE = int(args['flgMSE']) period = int(args['period']) n_steps = int(args['n_steps']) stride_train = int(args['stride_train']) stride_test = n_steps # int(args['stride_test']) monitoring_freq = int(args['monitoring_freq']) epoch = int(args['epoch']) batch_size = int(args['batch_size']) x_dim = int(args['x_dim']) y_dim = int(args['y_dim']) flgAgg = int(args['flgAgg']) z_dim = int(args['z_dim']) rnn_dim = int(args['rnn_dim']) k = int(args['num_k']) #a mixture of K Gaussian functions lr = float(args['lr']) typeLoad = int(args['typeLoad']) debug = int(args['debug']) kSchedSamp = int(args['kSchedSamp']) print "trial no. %d" % trial print "batch size %d" % batch_size print "learning rate %f" % lr print "saving pkl file '%s'" % pkl_name print "to the save path '%s'" % save_path q_z_dim = 150 p_z_dim = 150 p_x_dim = 150 #250 x2s_dim = 100 #250 y2s_dim = 100 z2s_dim = 100 #150 target_dim = k #x_dim #(x_dim-1)*k model = Model() Xtrain, ytrain, Xval, yval, Xtest, ytest, reader = fetch_dataport( data_path, windows, appliances, numApps=flgAgg, period=period, n_steps=n_steps, stride_train=stride_train, stride_test=stride_test, trainPer=0.6, valPer=0.2, testPer=0.2, typeLoad=typeLoad, flgAggSumScaled=1, flgFilterZeros=1) print(reader.stdTrain, reader.meanTrain) instancesPlot = { 0: [4], 2: [10] } #for now use hard coded instancesPlot for kelly sampling train_data = Dataport( name='train', prep='normalize', cond=True, # False #path=data_path, inputX=Xtrain, labels=ytrain) X_mean = train_data.X_mean X_std = train_data.X_std valid_data = Dataport( name='valid', prep='normalize', cond=True, # False #path=data_path, X_mean=X_mean, X_std=X_std, inputX=Xval, labels=yval) test_data = Dataport( name='valid', prep='normalize', cond=True, # False #path=data_path, X_mean=X_mean, X_std=X_std, inputX=Xtest, labels=ytest) init_W = InitCell('rand') init_U = InitCell('ortho') init_b = InitCell('zeros') init_b_sig = InitCell('const', mean=0.6) x, mask, y, y_mask = train_data.theano_vars() scheduleSamplingMask = T.fvector('schedMask') x.name = 'x_original' if debug: x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32) temp = np.ones((15, batch_size), dtype=np.float32) temp[:, -2:] = 0. mask.tag.test_value = temp """ print (mainloop) attrs = vars(mainloop) print ', '.join("%s: %s" % item for item in attrs.items()) print (type(mainloop.model.nodes[1])) print (type(mainloop.model.params))""" #for node in mainloop.model.nodes: # print(node.name) ''' for node in mainloop.model.nodes: print("Name:", node.name, "Parent:", node.parent, "Unit:",node.unit,"init_W:", node.init_W, "init_b:", node.init_b) for param in mainloop.model.params: print(type(param),param) ''' """rnn = LSTM(name='rnn', parent=['x_1', 'z_1','y_1'], parent_dim=[x2s_dim, z2s_dim, y_dim], nout=rnn_dim, unit='tanh', init_W=mainloop.model.nodes[0].init_W, init_U=mainloop.model.nodes[0].init_U, init_b=mainloop.model.nodes[0].init_b) x_1 = FullyConnectedLayer(name='x_1', parent=['x_t'], parent_dim=[x_dim], nout=x2s_dim, unit='relu', init_W=mainloop.model.nodes[1].init_W, init_b=mainloop.model.nodes[1].init_b) y_1 = FullyConnectedLayer(name='y_1', parent=['y_t'], parent_dim=[y_dim], nout=y2s_dim, unit='relu', init_W=mainloop.model.nodes[2].init_W, init_b=mainloop.model.nodes[2].init_b) z_1 = FullyConnectedLayer(name='z_1', parent=['z_t'], parent_dim=[z_dim], nout=z2s_dim, unit='relu', init_W=mainloop.model.nodes[3].init_W, init_b=mainloop.model.nodes[3].init_b) phi_1 = FullyConnectedLayer(name='phi_1', parent=['x_1', 's_tm1','y_1'], parent_dim=[x2s_dim, rnn_dim,y2s_dim], nout=q_z_dim, unit='relu', init_W=mainloop.model.nodes[4].init_W, init_b=mainloop.model.nodes[4].init_b) phi_mu = FullyConnectedLayer(name='phi_mu', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='linear', init_W=mainloop.model.nodes[5].init_W, init_b=mainloop.model.nodes[5].init_b) phi_sig = FullyConnectedLayer(name='phi_sig', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=mainloop.model.nodes[6].init_W, init_b=mainloop.model.nodes[6].init_b) prior_1 = FullyConnectedLayer(name='prior_1', parent=['x_1','s_tm1'], parent_dim=[x2s_dim,rnn_dim], nout=p_z_dim, unit='relu', init_W=mainloop.model.nodes[7].init_W, init_b=mainloop.model.nodes[7].init_b) prior_mu = FullyConnectedLayer(name='prior_mu', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='linear', init_W=mainloop.model.nodes[8].init_W, init_b=mainloop.model.nodes[8].init_b) prior_sig = FullyConnectedLayer(name='prior_sig', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=mainloop.model.nodes[9].init_W, init_b=mainloop.model.nodes[9].init_b) theta_1 = FullyConnectedLayer(name='theta_1', parent=['z_1', 's_tm1'], parent_dim=[z2s_dim, rnn_dim], nout=p_x_dim, unit='relu', init_W=mainloop.model.nodes[10].init_W, init_b=mainloop.model.nodes[10].init_b) theta_mu = FullyConnectedLayer(name='theta_mu', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit='linear', init_W=mainloop.model.nodes[11].init_W, init_b=mainloop.model.nodes[11].init_b) theta_sig = FullyConnectedLayer(name='theta_sig', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit='softplus', cons=1e-4, init_W=mainloop.model.nodes[12].init_W, init_b=mainloop.model.nodes[12].init_b) coeff = FullyConnectedLayer(name='coeff', parent=['theta_1'], parent_dim=[p_x_dim], nout=k, unit='softmax', init_W=mainloop.model.nodes[13].init_W, init_b=mainloop.model.nodes[13].init_b)""" #pickle is from experiment gmmAE/18-05-30_16-07_app3 fmodel = open('dp_dis1-sch_1.pkl', 'rb') mainloop = cPickle.load(fmodel) fmodel.close() rnn = mainloop.model.nodes[0] x_1 = mainloop.model.nodes[1] y_1 = mainloop.model.nodes[2] z_1 = mainloop.model.nodes[3] phi_1 = mainloop.model.nodes[4] phi_mu = mainloop.model.nodes[5] phi_sig = mainloop.model.nodes[6] prior_1 = mainloop.model.nodes[7] prior_mu = mainloop.model.nodes[8] prior_sig = mainloop.model.nodes[9] theta_1 = mainloop.model.nodes[10] theta_mu = mainloop.model.nodes[11] theta_sig = mainloop.model.nodes[12] coeff = mainloop.model.nodes[13] nodes = [ rnn, x_1, y_1, z_1, #dissag_pred, phi_1, phi_mu, phi_sig, prior_1, prior_mu, prior_sig, theta_1, theta_mu, theta_sig, coeff ] #, corr, binary params = mainloop.model.params s_0 = rnn.get_init_state(batch_size) x_1_temp = x_1.fprop([x], params) y_1_temp = y_1.fprop([y], params) def inner_fn_val(x_t, s_tm1): prior_1_t = prior_1.fprop([x_t, s_tm1], params) prior_mu_t = prior_mu.fprop([prior_1_t], params) prior_sig_t = prior_sig.fprop([prior_1_t], params) z_t = Gaussian_sample(prior_mu_t, prior_sig_t) z_1_t = z_1.fprop([z_t], params) theta_1_t = theta_1.fprop([z_1_t, s_tm1], params) theta_mu_t = theta_mu.fprop([theta_1_t], params) theta_sig_t = theta_sig.fprop([theta_1_t], params) coeff_t = coeff.fprop([theta_1_t], params) pred_t = GMM_sample(theta_mu_t, theta_sig_t, coeff_t) #Gaussian_sample(theta_mu_t, theta_sig_t) pred_1_t = y_1.fprop([pred_t], params) s_t = rnn.fprop([[x_t, z_1_t, pred_1_t], [s_tm1]], params) #y_pred = dissag_pred.fprop([s_t], params) return s_t, prior_mu_t, prior_sig_t, theta_mu_t, theta_sig_t, coeff_t, pred_t #, y_pred #corr_temp, binary_temp ((s_temp_val, prior_mu_temp_val, prior_sig_temp_val, theta_mu_temp_val, theta_sig_temp_val, coeff_temp_val, prediction_val), updates_val) =\ theano.scan(fn=inner_fn_val, sequences=[x_1_temp], outputs_info=[s_0, None, None, None, None, None, None]) for k, v in updates_val.iteritems(): k.default_update = v s_temp_val = concatenate([s_0[None, :, :], s_temp_val[:-1]], axis=0) def inner_fn_train(x_t, y_t, schedSampMask, s_tm1): phi_1_t = phi_1.fprop([x_t, s_tm1, y_t], params) phi_mu_t = phi_mu.fprop([phi_1_t], params) phi_sig_t = phi_sig.fprop([phi_1_t], params) prior_1_t = prior_1.fprop([x_t, s_tm1], params) prior_mu_t = prior_mu.fprop([prior_1_t], params) prior_sig_t = prior_sig.fprop([prior_1_t], params) z_t = Gaussian_sample(phi_mu_t, phi_sig_t) z_1_t = z_1.fprop([z_t], params) theta_1_t = theta_1.fprop([z_1_t, s_tm1], params) theta_mu_t = theta_mu.fprop([theta_1_t], params) theta_sig_t = theta_sig.fprop([theta_1_t], params) coeff_t = coeff.fprop([theta_1_t], params) #corr_t = corr.fprop([theta_1_t], params) #binary_t = binary.fprop([theta_1_t], params) pred = GMM_sample(theta_mu_t, theta_sig_t, coeff_t) #Gaussian_sample(theta_mu_t, theta_sig_t) if (schedSampMask == 1): s_t = rnn.fprop([[x_t, z_1_t, y_t], [s_tm1]], params) else: y_t_aux = y_1.fprop([pred], params) s_t = rnn.fprop([[x_t, z_1_t, y_t_aux], [s_tm1]], params) #y_pred = dissag_pred.fprop([s_t], params) return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, theta_mu_t, theta_sig_t, coeff_t, pred #, y_pred #corr_temp, binary_temp ((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp, theta_mu_temp, theta_sig_temp, coeff_temp, prediction), updates) =\ theano.scan(fn=inner_fn_train, sequences=[x_1_temp, y_1_temp, scheduleSamplingMask], outputs_info=[s_0, None, None, None, None, None, None, None, None]) for k, v in updates.iteritems(): k.default_update = v #s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)# seems like this is for creating an additional dimension to s_0 theta_mu_temp.name = 'theta_mu_temp' theta_sig_temp.name = 'theta_sig_temp' coeff_temp.name = 'coeff' if (flgAgg == -1): prediction.name = 'x_reconstructed' mse = T.mean((prediction - x)**2) # CHECK RESHAPE with an assertion mae = T.mean(T.abs(prediction - x)) mse.name = 'mse' pred_in = x.reshape((x_shape[0] * x_shape[1], -1)) else: prediction.name = 'pred_' + str(flgAgg) mse = T.mean( (prediction - y)**2) # As axis = None is calculated for all mae = T.mean(T.abs_(prediction - y)) mse.name = 'mse' mae.name = 'mae' pred_in = y.reshape((y.shape[0] * y.shape[1], -1)) kl_temp = KLGaussianGaussian(phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp) x_shape = x.shape theta_mu_in = theta_mu_temp.reshape((x_shape[0] * x_shape[1], -1)) theta_sig_in = theta_sig_temp.reshape((x_shape[0] * x_shape[1], -1)) coeff_in = coeff_temp.reshape((x_shape[0] * x_shape[1], -1)) #corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1)) #binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1)) recon = GMM( pred_in, theta_mu_in, theta_sig_in, coeff_in ) # BiGMM(x_in, theta_mu_in, theta_sig_in, coeff_in, corr_in, binary_in) recon = recon.reshape((x_shape[0], x_shape[1])) recon.name = 'gmm_out' recon_term = recon.sum(axis=0).mean() recon_term.name = 'recon_term' kl_term = kl_temp.sum(axis=0).mean() kl_term.name = 'kl_term' nll_upper_bound = recon_term + kl_term #+ mse if (flgMSE): nll_upper_bound = nll_upper_bound + mse nll_upper_bound.name = 'nll_upper_bound' ######################## TEST (GENERATION) TIME prediction_val.name = 'generated__' + str(flgAgg) mse_val = T.mean( (prediction_val - y)**2) # As axis = None is calculated for all mae_val = T.mean(T.abs_(prediction_val - y)) #y_unNormalize = (y * reader.stdTrain) + reader.meanTrain # accessing to just an scalar when loading y_dim=1 #prediction_valAux = (prediction_val * reader.stdTrain) + reader.meanTrain #mse_valUnNorm = T.mean((prediction_valAux - y_unNormalize)**2) # As axis = None is calculated for all #mae_valUnNorm = T.mean( T.abs_(prediction_valAux - y_unNormalize) ) mse_val.name = 'mse_val' mae_val.name = 'mae_val' pred_in_val = y.reshape((y.shape[0] * y.shape[1], -1)) theta_mu_in_val = theta_mu_temp_val.reshape((x_shape[0] * x_shape[1], -1)) theta_sig_in_val = theta_sig_temp_val.reshape( (x_shape[0] * x_shape[1], -1)) coeff_in_val = coeff_temp_val.reshape((x_shape[0] * x_shape[1], -1)) recon_val = GMM( pred_in_val, theta_mu_in_val, theta_sig_in_val, coeff_in_val ) # BiGMM(x_in, theta_mu_in, theta_sig_in, coeff_in, corr_in, binary_in) recon_val = recon_val.reshape((x_shape[0], x_shape[1])) recon_val.name = 'gmm_out_val' recon_term_val = recon_val.sum(axis=0).mean() recon_term_val.name = 'recon_term_val' model.inputs = [x, mask, y, y_mask, scheduleSamplingMask] model.params = params model.nodes = nodes optimizer = Adam(lr=lr) header = "epoch,log,kl,nll_upper_bound,mse,mae\n" extension = [ GradientClipping(batch_size=batch_size), EpochCount(epoch, save_path, header), Monitoring( freq=monitoring_freq, ddout=[nll_upper_bound, recon_term, kl_term, mse, mae, prediction], indexSep=5, instancesPlot=instancesPlot, #{0:[4,20],2:[5,10]},#, 80,150 data=[Iterator(valid_data, batch_size)], savedFolder=save_path), Picklize(freq=monitoring_freq, path=save_path), EarlyStopping(freq=monitoring_freq, path=save_path, channel=channel_name), WeightNorm() ] lr_iterations = {0: lr, 75: (lr / 10), 150: (lr / 100)} """mainloop = Training( name=pkl_name, data=Iterator(train_data, batch_size), model=model, optimizer=optimizer, cost=nll_upper_bound, outputs=[recon_term, kl_term, nll_upper_bound, mse, mae], n_steps = n_steps, extension=extension, lr_iterations=lr_iterations, k_speedOfconvergence=kSchedSamp )""" """mainloop.restore( data=Iterator(train_data, batch_size), cost=nll_upper_bound, model=model, optimizer=mainloop.optimizer )""" mainloop.restore(name=pkl_name, data=Iterator(train_data, batch_size), model=model, optimizer=optimizer, cost=nll_upper_bound, outputs=[recon_term, kl_term, nll_upper_bound, mse, mae], n_steps=n_steps, extension=extension, lr_iterations=lr_iterations, k_speedOfconvergence=kSchedSamp) mainloop.run() data = Iterator(test_data, batch_size) test_fn = theano.function( inputs=[x, y], #[x, y], #givens={x:Xtest}, #on_unused_input='ignore', #z=( ,200,1) allow_input_downcast=True, outputs=[prediction_val, recon_term_val, mse_val, mae_val] #prediction_val, mse_val, mae_val , updates= updates_val #, allow_input_downcast=True, on_unused_input='ignore' ) testOutput = [] numBatchTest = 0 for batch in data: outputGeneration = test_fn(batch[0], batch[2]) #(20, 220, 1) testOutput.append(outputGeneration[1:]) # outputGeneration[0].shape #(20, 220, 40) #if (numBatchTest<5): ''' plt.figure(1) plt.plot(np.transpose(outputGeneration[0],[1,0,2])[4]) plt.savefig(save_path+"/vrnn_dis_generated{}_z_0-4".format(numBatchTest)) plt.clf() plt.figure(2) plt.plot(np.transpose(outputGeneration[1],[1,0,2])[4]) plt.savefig(save_path+"/vrnn_dis_generated{}_s_0-4".format(numBatchTest)) plt.clf() plt.figure(3) plt.plot(np.transpose(outputGeneration[2],[1,0,2])[4]) plt.savefig(save_path+"/vrnn_dis_generated{}_theta_0-4".format(numBatchTest)) plt.clf() ''' plt.figure(4) plt.plot(np.transpose(outputGeneration[0], [1, 0, 2])[4]) plt.plot(np.transpose(batch[2], [1, 0, 2])[4]) plt.savefig( save_path + "/vrnn_dis_generated{}_RealAndPred_0-4".format(numBatchTest)) plt.clf() plt.figure(4) plt.plot(np.transpose(batch[0], [1, 0, 2])[4]) plt.savefig(save_path + "/vrnn_dis_generated{}_Realagg_0-4".format(numBatchTest)) plt.clf() numBatchTest += 1 testOutput = np.asarray(testOutput) print(testOutput.shape) recon_test = testOutput[:, 0].mean() mse_test = testOutput[:, 1].mean() mae_test = testOutput[:, 2].mean() #mseUnNorm_test = testOutput[:, 3].mean() #maeUnNorm_test = testOutput[:, 4].mean() fLog = open(save_path + '/output.csv', 'w') fLog.write(str(lr_iterations) + "\n") fLog.write(str(windows) + "\n") fLog.write("logTest,mseTest,maeTest, mseTestUnNorm, maeTestUnNorm\n") fLog.write("{},{},{}\n".format(recon_test, mse_test, mae_test)) fLog.write("q_z_dim,p_z_dim,p_x_dim,x2s_dim,y2s_dim,z2s_dim\n") fLog.write("{},{},{},{},{},{}\n".format(q_z_dim, p_z_dim, p_x_dim, x2s_dim, y2s_dim, z2s_dim)) header = "epoch,log,kl,mse,mae\n" fLog.write(header) for i, item in enumerate(mainloop.trainlog.monitor['recon_term']): f = mainloop.trainlog.monitor['epoch'][i] a = mainloop.trainlog.monitor['recon_term'][i] b = mainloop.trainlog.monitor['kl_term'][i] d = mainloop.trainlog.monitor['mse'][i] e = mainloop.trainlog.monitor['mae'][i] fLog.write("{:d},{:.2f},{:.2f},{:.3f},{:.3f}\n".format(f, a, b, d, e))
def laplacian(b, mu=0.0): # laplacian distributition is only exponential family when mu=0! uniform_samples = theano_rng.uniform(size=b.shape, dtype=theano.config.floatX) return mu - b*T.sgn(uniform_samples-0.5) * T.log(1 - 2*T.abs(uniform_samples-0.5))
def main(args): #theano.optimizer='fast_compile' #theano.config.exception_verbosity='high' trial = int(args['trial']) pkl_name = 'dp_dis1-nosch_%d' % trial channel_name = 'mae' data_path = args['data_path'] save_path = args[ 'save_path'] #+'/gmm/'+datetime.datetime.now().strftime("%y-%m-%d_%H-%M") flgMSE = int(args['flgMSE']) period = int(args['period']) n_steps = int(args['n_steps']) stride_train = int(args['stride_train']) stride_test = n_steps # int(args['stride_test']) monitoring_freq = int(args['monitoring_freq']) epoch = int(args['epoch']) batch_size = int(args['batch_size']) x_dim = int(args['x_dim']) y_dim = int(args['y_dim']) flgAgg = int(args['flgAgg']) z_dim = int(args['z_dim']) rnn_dim = int(args['rnn_dim']) k = int(args['num_k']) #a mixture of K Gaussian functions lr = float(args['lr']) typeLoad = int(args['typeLoad']) debug = int(args['debug']) kSchedSamp = int(args['kSchedSamp']) typeActivFunc = args['typeActivFunc'] print "trial no. %d" % trial print "batch size %d" % batch_size print "learning rate %f" % lr print "saving pkl file '%s'" % pkl_name print "to the save path '%s'" % save_path q_z_dim = 150 p_z_dim = 150 p_x_dim = 150 #250 x2s_dim = 100 #250 y2s_dim = 100 z2s_dim = 100 #150 target_dim = k #x_dim #(x_dim-1)*k model = Model() Xtrain, ytrain, Xval, yval, Xtest, ytest, reader = fetch_dataport( data_path, windows, appliances, numApps=flgAgg, period=period, n_steps=n_steps, stride_train=stride_train, stride_test=stride_test, trainPer=0.6, valPer=0.2, testPer=0.2, typeLoad=typeLoad, flgAggSumScaled=1, flgFilterZeros=1) print(reader.stdTrain, reader.meanTrain) instancesPlot = { 0: [4], 2: [5] } #for now use hard coded instancesPlot for kelly sampling train_data = Dataport( name='train', prep='normalize', cond=True, # False #path=data_path, inputX=Xtrain, labels=ytrain) X_mean = train_data.X_mean X_std = train_data.X_std valid_data = Dataport( name='valid', prep='normalize', cond=True, # False #path=data_path, X_mean=X_mean, X_std=X_std, inputX=Xval, labels=yval) test_data = Dataport( name='valid', prep='normalize', cond=True, # False #path=data_path, X_mean=X_mean, X_std=X_std, inputX=Xtest, labels=ytest) init_W = InitCell('rand') init_U = InitCell('ortho') init_b = InitCell('zeros') init_b_sig = InitCell('const', mean=0.6) x, mask, y, y_mask = train_data.theano_vars() scheduleSamplingMask = T.fvector('schedMask') x.name = 'x_original' if debug: x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32) temp = np.ones((15, batch_size), dtype=np.float32) temp[:, -2:] = 0. mask.tag.test_value = temp x_1 = FullyConnectedLayer(name='x_1', parent=['x_t'], parent_dim=[x_dim], nout=x2s_dim, unit='relu', init_W=init_W, init_b=init_b) y_1 = FullyConnectedLayer(name='y_1', parent=['y_t'], parent_dim=[y_dim], nout=y2s_dim, unit='relu', init_W=init_W, init_b=init_b) z_1 = FullyConnectedLayer(name='z_1', parent=['z_t'], parent_dim=[z_dim], nout=z2s_dim, unit='relu', init_W=init_W, init_b=init_b) rnn = LSTM(name='rnn', parent=['x_1', 'z_1', 'y_1'], parent_dim=[x2s_dim, z2s_dim, y_dim], nout=rnn_dim, unit='tanh', init_W=init_W, init_U=init_U, init_b=init_b) phi_1 = FullyConnectedLayer(name='phi_1', parent=['x_1', 's_tm1', 'y_1'], parent_dim=[x2s_dim, rnn_dim, y2s_dim], nout=q_z_dim, unit='relu', init_W=init_W, init_b=init_b) phi_mu = FullyConnectedLayer(name='phi_mu', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='linear', init_W=init_W, init_b=init_b) phi_sig = FullyConnectedLayer(name='phi_sig', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) prior_1 = FullyConnectedLayer(name='prior_1', parent=['x_1', 's_tm1'], parent_dim=[x2s_dim, rnn_dim], nout=p_z_dim, unit='relu', init_W=init_W, init_b=init_b) prior_mu = FullyConnectedLayer(name='prior_mu', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='linear', init_W=init_W, init_b=init_b) prior_sig = FullyConnectedLayer(name='prior_sig', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) theta_1 = FullyConnectedLayer(name='theta_1', parent=['z_1', 's_tm1'], parent_dim=[z2s_dim, rnn_dim], nout=p_x_dim, unit='relu', init_W=init_W, init_b=init_b) theta_mu = FullyConnectedLayer(name='theta_mu', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit=typeActivFunc, init_W=init_W, init_b=init_b) theta_sig = FullyConnectedLayer(name='theta_sig', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) coeff = FullyConnectedLayer(name='coeff', parent=['theta_1'], parent_dim=[p_x_dim], nout=k, unit='softmax', init_W=init_W, init_b=init_b) corr = FullyConnectedLayer(name='corr', parent=['theta_1'], parent_dim=[p_x_dim], nout=k, unit='tanh', init_W=init_W, init_b=init_b) binary = FullyConnectedLayer(name='binary', parent=['theta_1'], parent_dim=[p_x_dim], nout=1, unit='sigmoid', init_W=init_W, init_b=init_b) nodes = [ rnn, x_1, y_1, z_1, #dissag_pred, phi_1, phi_mu, phi_sig, prior_1, prior_mu, prior_sig, theta_1, theta_mu, theta_sig, coeff ] #, corr, binary params = OrderedDict() for node in nodes: if node.initialize() is not None: params.update(node.initialize()) params = init_tparams(params) s_0 = rnn.get_init_state(batch_size) x_1_temp = x_1.fprop([x], params) y_1_temp = y_1.fprop([y], params) def inner_fn_train(x_t, y_t, s_tm1): phi_1_t = phi_1.fprop([x_t, s_tm1, y_t], params) phi_mu_t = phi_mu.fprop([phi_1_t], params) phi_sig_t = phi_sig.fprop([phi_1_t], params) prior_1_t = prior_1.fprop([x_t, s_tm1], params) prior_mu_t = prior_mu.fprop([prior_1_t], params) prior_sig_t = prior_sig.fprop([prior_1_t], params) z_t = Gaussian_sample(phi_mu_t, phi_sig_t) z_1_t = z_1.fprop([z_t], params) theta_1_t = theta_1.fprop([z_1_t, s_tm1], params) theta_mu_t = theta_mu.fprop([theta_1_t], params) theta_sig_t = theta_sig.fprop([theta_1_t], params) coeff_t = coeff.fprop([theta_1_t], params) #corr_t = corr.fprop([theta_1_t], params) #binary_t = binary.fprop([theta_1_t], params) pred = GMM_sample(theta_mu_t, theta_sig_t, coeff_t) #Gaussian_sample(theta_mu_t, theta_sig_t) s_t = rnn.fprop([[x_t, z_1_t, y_t], [s_tm1]], params) #y_pred = dissag_pred.fprop([s_t], params) return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, theta_mu_t, theta_sig_t, coeff_t, pred #, y_pred #corr_temp, binary_temp ((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp, theta_mu_temp, theta_sig_temp, coeff_temp, prediction), updates) =\ theano.scan(fn=inner_fn_train, sequences=[x_1_temp, y_1_temp], outputs_info=[s_0, None, None, None, None, None, None, None, None]) for k, v in updates.iteritems(): k.default_update = v #s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)# seems like this is for creating an additional dimension to s_0 theta_mu_temp.name = 'theta_mu_temp' theta_sig_temp.name = 'theta_sig_temp' coeff_temp.name = 'coeff' if (flgAgg == -1): prediction.name = 'x_reconstructed' mse = T.mean((prediction - x)**2) # CHECK RESHAPE with an assertion mae = T.mean(T.abs(prediction - x)) mse.name = 'mse' pred_in = x.reshape((x_shape[0] * x_shape[1], -1)) else: prediction.name = 'pred_' + str(flgAgg) mse = T.mean( (prediction - y)**2) # As axis = None is calculated for all mae = T.mean(T.abs_(prediction - y)) mse.name = 'mse' mae.name = 'mae' pred_in = y.reshape((y.shape[0] * y.shape[1], -1)) kl_temp = KLGaussianGaussian(phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp) x_shape = x.shape theta_mu_in = theta_mu_temp.reshape((x_shape[0] * x_shape[1], -1)) theta_sig_in = theta_sig_temp.reshape((x_shape[0] * x_shape[1], -1)) coeff_in = coeff_temp.reshape((x_shape[0] * x_shape[1], -1)) #corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1)) #binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1)) recon = GMM( pred_in, theta_mu_in, theta_sig_in, coeff_in ) # BiGMM(x_in, theta_mu_in, theta_sig_in, coeff_in, corr_in, binary_in) recon = recon.reshape((x_shape[0], x_shape[1])) recon.name = 'gmm_out' recon_term = recon.sum(axis=0).mean() recon_term.name = 'recon_term' kl_term = kl_temp.sum(axis=0).mean() kl_term.name = 'kl_term' nll_upper_bound = recon_term + kl_term #+ mse if (flgMSE): nll_upper_bound = nll_upper_bound + mse nll_upper_bound.name = 'nll_upper_bound' model.inputs = [x, mask, y, y_mask, scheduleSamplingMask] model.params = params model.nodes = nodes optimizer = Adam(lr=lr) header = "epoch,log,kl,nll_upper_bound,mse,mae\n" extension = [ GradientClipping(batch_size=batch_size), EpochCount(epoch, save_path, header), Monitoring( freq=monitoring_freq, ddout=[ nll_upper_bound, recon_term, kl_term, mse, mae, theta_mu_temp, prediction ], indexSep=5, instancesPlot=instancesPlot, #{0:[4,20],2:[5,10]},#, 80,150 data=[Iterator(valid_data, batch_size)], savedFolder=save_path), Picklize(freq=monitoring_freq, path=save_path), EarlyStopping(freq=monitoring_freq, path=save_path, channel=channel_name), WeightNorm() ] lr_iterations = {0: lr} mainloop = Training( name=pkl_name, data=Iterator(train_data, batch_size), model=model, optimizer=optimizer, cost=nll_upper_bound, outputs=[recon_term, kl_term, nll_upper_bound, mse, mae], n_steps=n_steps, extension=extension, lr_iterations=lr_iterations, k_speedOfconvergence=kSchedSamp) mainloop.run() fLog = open(save_path + '/output.csv', 'w') fLog.write(str(lr_iterations) + "\n") fLog.write(str(windows) + "\n") fLog.write("q_z_dim,p_z_dim,p_x_dim,x2s_dim,y2s_dim,z2s_dim\n") fLog.write("{},{},{},{},{},{}\n".format(q_z_dim, p_z_dim, p_x_dim, x2s_dim, y2s_dim, z2s_dim)) header = "epoch,log,kl,mse,mae\n" fLog.write(header) for i, item in enumerate(mainloop.trainlog.monitor['recon_term']): f = mainloop.trainlog.monitor['epoch'][i] a = mainloop.trainlog.monitor['recon_term'][i] b = mainloop.trainlog.monitor['kl_term'][i] d = mainloop.trainlog.monitor['mse'][i] e = mainloop.trainlog.monitor['mae'][i] fLog.write("{:d},{:.2f},{:.2f},{:.3f},{:.3f}\n".format(f, a, b, d, e))
def error_func(expected, predicted): return T.abs(expected - predicted)
def cost_matrix(self, y, y_hat): cost = tensor.abs(y - y_hat) return cost
def L1_regularization(weights): w_error = 0 for w in weights: w_error = w_error + tpo["reg_factor"] * T.mean(T.abs(w)) return w_error
def l1(param): return T.sum(T.abs(param))
def fit(self, X, y, train_splits = [0.6, 0.8], eta = 1e-4, lambda2 = 0, lambda1 = 0, mu = 0.9, gamma = 0.999, epsilon = 1e-10, batch_sz = 100, epochs = 500, show_fig = False): N, D = X.shape K = len(set(y)) Y = one_hot_encode(y) idx = np.random.permutation(N) X = X[idx,:].astype(np.float32) Y = Y[idx,:].astype(np.int32) y = y[idx].astype(np.int32) X_train = X[:int(N*train_splits[0]),:] Y_train = Y[:int(N*train_splits[0]),:] y_train = y[:int(N*train_splits[0])] X_cv = X[int(N*train_splits[0]):int(N*train_splits[1]),:] Y_cv = Y[int(N*train_splits[0]):int(N*train_splits[1]),:] y_cv = y[int(N*train_splits[0]):int(N*train_splits[1])] X_test = X[int(N*train_splits[1]):,:] Y_test = Y[int(N*train_splits[1]):,:] y_test = y[int(N*train_splits[1]):] N_train = len(X_train) N_cv = len(X_cv) N_test = len(X_test) X_tensor = T.matrix(name = "X", dtype = config.float32) Y_tensor = T.matrix(name = "Y", dtype = config.int32) y_tensor = T.vector(name = "y", dtype = config.int32) P = self.forward(X_tensor) self.hidden_layers = [] layer_num = 0 M1 = D for M2 in self.hidden_layer_sizes: self.hidden_layers.append(HiddenLayer(layer_num, M1, M2)) layer_num += 1 M1 = M2 W = np.random.randn(M1,K)/np.sqrt(M1) b = np.random.randn(K) self.W = theano.shared(W.astype(np.float32), "W{}".format(layer_num)) self.b = theano.shared(b.astype(np.float32), "b{}".format(layer_num)) self.params = [self.W, self.b] for h in self.hidden_layers: self.params += h.params vparams = [theano.shared(np.zeros(p.get_value().shape).astype(np.float32)) for p in self.params] Gparams = [theano.shared(np.ones(p.get_value().shape).astype(np.float32)) for p in self.params] L2_penalty = (lambda2/2)*T.sum([(p*p).sum for p in self.params]) L1_penalty = lambda1*T.sum([T.abs(p).sum for p in self.params]) objective = -T.sum(Y_tensor*T.log(P)) + L2_penalty + L1_penalty pred = self.predict(X_tensor) objective_op = theano.function( inputs = [X_tensor, Y_tensor], outputs = [objective] ) predict_op = theano.function( inputs = [X_tensor], outputs = [pred] ) updates = [ (G, gamma*G + (1 - gamma)*T.grad(objective, p)*T.grad(objective, p)) for p, G in zip(self.params, Gparams) ] + [ (v, mu*v - (eta/T.sqrt(G + epsilon))*T.grad(objective, p)) for p, v, G in zip(self.params, vparams, Gparams) ] + [ (p, p + mu*v - (eta/T.sqrt(G + epsion)*T.grad(objective, p)) for p, v, G in zip(self.params, vparams, Gparams)) ] train_op = theano.function( inputs = [X_tensor, Y_tensor], updates = updates ) n_batches = N_train//batch_sz J_train = [] J_cv = [] J_test = [] for epoch in range(epochs): idx = np.random.permutation(N_train) X_train = X_train[idx,:] Y_train = Y_train[idx,:] y_train = y_train[idx] for i in range(N_train): X_batch = X_train[(i*batch_sz):((i + 1)*batch_sz),:] Y_batch = Y_train[(i*batch_sz):((i + 1)*batch_sz),:] train_op(X_batch, Y_batch) if i % 10 == 0: j_train = objective_op(X_train, Y_train) j_cv = objective_op(X_cv, Y_cv) j_test = objective_op(X_test, Y_test) acc = accuracy(y_train, self.predict(X_train)) J_train.append(j_train/N_train) J_cv.append(j_cv/N_cv) J_test.append(j_test/N_test) print("epoch: {} of {} -- batch: {} of {} -- J train: {} -- training accuracy: {}".format(epoch + 1, epochs, i + 1, n_batches, j_train, acc)) if show_fig: plt.plot(J_train, label = "Training Error") plt.plot(J_cv, label = "Validation Error") plt.plot(J-test, label = "Test Error") plt.legend() plt.xlabel("Training Epochs") plt.ylabel("Error") plt.title("Training Curve") plt.show()
def get_l1_cost(self): return T.sum(T.abs(self.params[self.id + "full_w"]))
def main(args): trial = int(args['trial']) pkl_name = 'rnn_gauss_%d' % trial channel_name = 'valid_nll' data_path = args['data_path'] save_path = args['save_path'] flgMSE = int(args['flgMSE']) monitoring_freq = int(args['monitoring_freq']) epoch = int(args['epoch']) batch_size = int(args['batch_size']) x_dim = int(args['x_dim']) z_dim = int(args['z_dim']) y_dim = int(args['y_dim']) flgAgg = int(args['flgAgg']) rnn_dim = int(args['rnn_dim']) lr = float(args['lr']) debug = int(args['debug']) print "trial no. %d" % trial print "batch size %d" % batch_size print "learning rate %f" % lr print "saving pkl file '%s'" % pkl_name print "to the save path '%s'" % save_path x2s_dim = 340 s2x_dim = 340 target_dim = k #x_dim - 1 model = Model() train_data = UKdale(name='train', prep='normalize', cond=False, path=data_path, windows=windows, appliances=appliances, numApps=flgAgg, period=period, n_steps=n_steps, stride_train=stride_train, stride_test=stride_test) X_mean = train_data.X_mean X_std = train_data.X_std valid_data = UKdale(name='valid', prep='normalize', cond=False, path=data_path, X_mean=X_mean, X_std=X_std, windows=windows, appliances=appliances, numApps=flgAgg, period=period, n_steps=n_steps, stride_train=stride_train, stride_test=stride_test) init_W = InitCell('rand') init_U = InitCell('ortho') init_b = InitCell('zeros') init_b_sig = InitCell('const', mean=0.6) x, y = train_data.theano_vars() if debug: x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32) temp = np.ones((15, batch_size), dtype=np.float32) temp[:, -2:] = 0. mask.tag.test_value = temp x_1 = FullyConnectedLayer(name='x_1', parent=['x_t'], parent_dim=[x_dim], nout=x2s_dim, unit='relu', init_W=init_W, init_b=init_b) rnn = LSTM(name='rnn', parent=['x_1'], parent_dim=[x2s_dim], nout=rnn_dim, unit='tanh', init_W=init_W, init_U=init_U, init_b=init_b) theta_1 = FullyConnectedLayer(name='theta_1', parent=['s_tm1'], parent_dim=[rnn_dim], nout=s2x_dim, unit='relu', init_W=init_W, init_b=init_b) theta_mu = FullyConnectedLayer(name='theta_mu', parent=['theta_1'], parent_dim=[s2x_dim], nout=target_dim, unit='linear', init_W=init_W, init_b=init_b) theta_sig = FullyConnectedLayer(name='theta_sig', parent=['theta_1'], parent_dim=[s2x_dim], nout=target_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) corr = FullyConnectedLayer(name='corr', parent=['theta_1'], parent_dim=[s2x_dim], nout=1, unit='tanh', init_W=init_W, init_b=init_b) binary = FullyConnectedLayer(name='binary', parent=['theta_1'], parent_dim=[s2x_dim], nout=1, unit='sigmoid', init_W=init_W, init_b=init_b) nodes = [rnn, x_1, theta_1, theta_mu, theta_sig] #, corr, binary params = OrderedDict() for node in nodes: if node.initialize() is not None: params.update(node.initialize()) params = init_tparams(params) s_0 = rnn.get_init_state(batch_size) x_1_temp = x_1.fprop([x], params) def inner_fn(x_t, s_tm1): s_t = rnn.fprop([[x_t], [s_tm1]], params) theta_1_t = theta_1.fprop([s_t], params) theta_mu_t = theta_mu.fprop([theta_1_t], params) theta_sig_t = theta_sig.fprop([theta_1_t], params) coeff_t = coeff.fprop([theta_1_t], params) pred = Gaussian_sample(theta_mu_t, theta_sig_t) return s_t, theta_mu_t, theta_sig_t, coeff_t, pred ((s_temp, theta_mu_temp, theta_sig_temp, coeff_temp, pred_temp), updates) = theano.scan(fn=inner_fn, sequences=[x_1_temp], outputs_info=[s_0, None, None, None, None]) for k, v in updates.iteritems(): k.default_update = v s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0) ''' theta_1_temp = theta_1.fprop([s_temp], params) theta_mu_temp = theta_mu.fprop([theta_1_temp], params) theta_sig_temp = theta_sig.fprop([theta_1_temp], params) corr_temp = corr.fprop([theta_1_temp], params) binary_temp = binary.fprop([theta_1_temp], params) ''' x_shape = x.shape x_in = x.reshape((x_shape[0] * x_shape[1], -1)) theta_mu_in = theta_mu_temp.reshape((x_shape[0] * x_shape[1], -1)) theta_sig_in = theta_sig_temp.reshape((x_shape[0] * x_shape[1], -1)) corr_in = corr_temp.reshape((x_shape[0] * x_shape[1], -1)) binary_in = binary_temp.reshape((x_shape[0] * x_shape[1], -1)) if (flgAgg == -1): prediction.name = 'x_reconstructed' mse = T.mean((prediction - x)**2) # CHECK RESHAPE with an assertion mae = T.mean(T.abs(prediction - x)) mse.name = 'mse' pred_in = x.reshape((x_shape[0] * x_shape[1], -1)) else: pred_temp = pred_temp.reshape((pred_temp.shape[0], pred_temp.shape[1])) pred_temp.name = 'pred_' + str(flgAgg) #y[:,:,flgAgg].reshape((y.shape[0],y.shape[1],1)) mse = T.mean((pred_temp - y.T)**2) # CHECK RESHAPE with an assertion mae = T.mean(T.abs_(pred_temp - y.T)) mse.name = 'mse' mae.name = 'mae' pred_in = y.reshape((x.shape[0] * x.shape[1], -1), ndim=2) recon = Gaussian(pred_in, theta_mu_in, theta_sig_in) recon = recon.reshape((x_shape[0], x_shape[1])) #recon = recon * mask recon_term = recon.sum(axis=0).mean() recon_term.name = 'nll' max_x = x.max() mean_x = x.mean() min_x = x.min() max_x.name = 'max_x' mean_x.name = 'mean_x' min_x.name = 'min_x' max_theta_mu = theta_mu_in.max() mean_theta_mu = theta_mu_in.mean() min_theta_mu = theta_mu_in.min() max_theta_mu.name = 'max_theta_mu' mean_theta_mu.name = 'mean_theta_mu' min_theta_mu.name = 'min_theta_mu' max_theta_sig = theta_sig_in.max() mean_theta_sig = theta_sig_in.mean() min_theta_sig = theta_sig_in.min() max_theta_sig.name = 'max_theta_sig' mean_theta_sig.name = 'mean_theta_sig' min_theta_sig.name = 'min_theta_sig' model.inputs = [x, y] model.params = params model.nodes = nodes optimizer = Adam(lr=lr) extension = [ GradientClipping(batch_size=batch_size), EpochCount(epoch), Monitoring(freq=monitoring_freq, ddout=[ recon_term, max_theta_sig, mean_theta_sig, min_theta_sig, max_x, mean_x, min_x, max_theta_mu, mean_theta_mu, min_theta_mu ], data=[Iterator(valid_data, batch_size)]), Picklize(freq=monitoring_freq, path=save_path), EarlyStopping(freq=monitoring_freq, path=save_path, channel=channel_name), WeightNorm() ] mainloop = Training(name=pkl_name, data=Iterator(train_data, batch_size), model=model, optimizer=optimizer, cost=recon_term, outputs=[recon_term], extension=extension) mainloop.run() fLog = open(save_path + '/output.csv', 'w') fLog.write("log,mse,mae\n") for i, item in enumerate(mainloop.trainlog.monitor['nll_upper_bound']): a = mainloop.trainlog.monitor['recon_term'][i] d = mainloop.trainlog.monitor['mse'][i] e = mainloop.trainlog.monitor['mae'][i] fLog.write("{},{},{}\n".format(a, d, e))
def main(args): theano.optimizer = 'fast_compile' theano.config.exception_verbosity = 'high' trial = int(args['trial']) pkl_name = 'vrnn_gauss_%d' % trial channel_name = 'valid_nll_upper_bound' data_path = args['data_path'] save_path = args['save_path'] save_path = args['save_path'] period = int(args['period']) n_steps = int(args['n_steps']) stride_train = int(args['stride_train']) stride_test = int(args['stride_test']) monitoring_freq = int(args['monitoring_freq']) epoch = int(args['epoch']) batch_size = int(args['batch_size']) x_dim = int(args['x_dim']) z_dim = int(args['z_dim']) rnn_dim = int(args['rnn_dim']) lr = float(args['lr']) debug = int(args['debug']) print "trial no. %d" % trial print "batch size %d" % batch_size print "learning rate %f" % lr print "saving pkl file '%s'" % pkl_name print "to the save path '%s'" % save_path q_z_dim = 150 p_z_dim = 150 p_x_dim = 250 x2s_dim = 10 #250 z2s_dim = 10 #150 target_dim = x_dim #(x_dim-1) model = Model() Xtrain, ytrain, Xval, yval = fetch_ukdale(data_path, windows, appliances, numApps=flgAgg, period=period, n_steps=n_steps, stride_train=stride_train, stride_test=stride_test) train_data = UKdale( name='train', prep='normalize', cond=True, # False #path=data_path, inputX=Xtrain, labels=ytrain) X_mean = train_data.X_mean X_std = train_data.X_std valid_data = UKdale( name='valid', prep='normalize', cond=True, # False #path=data_path, X_mean=X_mean, X_std=X_std, inputX=Xval, labels=yval) init_W = InitCell('rand') init_U = InitCell('ortho') init_b = InitCell('zeros') init_b_sig = InitCell('const', mean=0.6) x, y = train_data.theano_vars() if debug: x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32) temp = np.ones((15, batch_size), dtype=np.float32) temp[:, -2:] = 0. mask.tag.test_value = temp x_1 = FullyConnectedLayer( name='x_1', parent=['x_t'], #OrderDict parent['x_t'] = x_dim parent_dim=[x_dim], nout=x2s_dim, unit='relu', init_W=init_W, init_b=init_b) z_1 = FullyConnectedLayer(name='z_1', parent=['z_t'], parent_dim=[z_dim], nout=z2s_dim, unit='relu', init_W=init_W, init_b=init_b) rnn = LSTM(name='rnn', parent=['x_1', 'z_1'], parent_dim=[x2s_dim, z2s_dim], nout=rnn_dim, unit='tanh', init_W=init_W, init_U=init_U, init_b=init_b) phi_1 = FullyConnectedLayer( name='phi_1', ## encoder parent=['x_1', 's_tm1'], parent_dim=[x2s_dim, rnn_dim], nout=q_z_dim, unit='relu', init_W=init_W, init_b=init_b) phi_mu = FullyConnectedLayer(name='phi_mu', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='linear', init_W=init_W, init_b=init_b) phi_sig = FullyConnectedLayer(name='phi_sig', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) prior_1 = FullyConnectedLayer(name='prior_1', parent=['s_tm1'], parent_dim=[rnn_dim], nout=p_z_dim, unit='relu', init_W=init_W, init_b=init_b) prior_mu = FullyConnectedLayer(name='prior_mu', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='linear', init_W=init_W, init_b=init_b) prior_sig = FullyConnectedLayer(name='prior_sig', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) theta_1 = FullyConnectedLayer( name='theta_1', ### decoder parent=['z_1', 's_tm1'], parent_dim=[z2s_dim, rnn_dim], nout=p_x_dim, unit='relu', init_W=init_W, init_b=init_b) theta_mu = FullyConnectedLayer(name='theta_mu', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit='linear', init_W=init_W, init_b=init_b) theta_sig = FullyConnectedLayer(name='theta_sig', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) corr = FullyConnectedLayer( name='corr', ## rho parent=['theta_1'], parent_dim=[p_x_dim], nout=1, unit='tanh', init_W=init_W, init_b=init_b) binary = FullyConnectedLayer(name='binary', parent=['theta_1'], parent_dim=[p_x_dim], nout=1, unit='sigmoid', init_W=init_W, init_b=init_b) nodes = [ rnn, x_1, z_1, phi_1, phi_mu, phi_sig, prior_1, prior_mu, prior_sig, theta_1, theta_mu, theta_sig ] #, corr, binary params = OrderedDict() for node in nodes: if node.initialize() is not None: params.update( node.initialize() ) #Initialize values of the W matrices according to dim of parents params = init_tparams(params) s_0 = rnn.get_init_state(batch_size) x_1_temp = x_1.fprop([x], params) def inner_fn(x_t, s_tm1): phi_1_t = phi_1.fprop([x_t, s_tm1], params) phi_mu_t = phi_mu.fprop([phi_1_t], params) phi_sig_t = phi_sig.fprop([phi_1_t], params) prior_1_t = prior_1.fprop([s_tm1], params) prior_mu_t = prior_mu.fprop([prior_1_t], params) prior_sig_t = prior_sig.fprop([prior_1_t], params) z_t = Gaussian_sample(phi_mu_t, phi_sig_t) z_1_t = z_1.fprop([z_t], params) theta_1_t = theta_1.fprop([z_1_t, s_tm1], params) theta_mu_t = theta_mu.fprop([theta_1_t], params) theta_sig_t = theta_sig.fprop([theta_1_t], params) pred = Gaussian_sample(theta_mu_t, theta_sig_t) s_t = rnn.fprop([[x_t, z_1_t], [s_tm1]], params) return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, z_t, z_1_t, theta_1_t, theta_mu_t, theta_sig_t, pred ((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp, z_temp, z_1_temp, theta_1_temp, theta_mu_temp, theta_sig_temp, pred_temp), updates) =\ theano.scan(fn=inner_fn, sequences=[x_1_temp], #non_sequences unchanging variables #The tensor(s) to be looped over should be provided to scan using the sequence keyword argument outputs_info=[s_0, None, None, None, None, None, None, None, None, None, None])#Initialization occurs in outputs_info #=None This indicates to scan that it does not need to pass the prior result to _fn ''' The general order of function parameters to: sequences (if any), prior result(s) (if needed), non-sequences (if any) ''' for k, v in updates.iteritems(): print("Update") k.default_update = v s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0) s_temp.name = 'h_1' #gisse z_temp.name = 'z' z_1_temp.name = 'z_1' #gisse #theta_1_temp = theta_1.fprop([z_1_temp, s_temp], params) #theta_mu_temp = theta_mu.fprop([theta_1_temp], params) theta_mu_temp.name = 'theta_mu' #theta_sig_temp = theta_sig.fprop([theta_1_temp], params) theta_sig_temp.name = 'theta_sig' x_pred_temp.name = 'x_reconstructed' #corr_temp = corr.fprop([theta_1_temp], params) #corr_temp.name = 'corr' #binary_temp = binary.fprop([theta_1_temp], params) #binary_temp.name = 'binary' if (flgAgg == -1): prediction.name = 'x_reconstructed' mse = T.mean((prediction - x)**2) # CHECK RESHAPE with an assertion mae = T.mean(T.abs(prediction - x)) mse.name = 'mse' pred_in = x.reshape((x_shape[0] * x_shape[1], -1)) else: prediction.name = 'pred_' + str(flgAgg) mse = T.mean((prediction - y[:, :, flgAgg].reshape( (y.shape[0], y.shape[1], 1)))**2) # CHECK RESHAPE with an assertion mae = T.mean( T.abs_(prediction - y[:, :, flgAgg].reshape((y.shape[0], y.shape[1], 1)))) mse.name = 'mse' mae.name = 'mae' pred_in = y[:, :, flgAgg].reshape((x.shape[0] * x.shape[1], -1), ndim=2) kl_temp = KLGaussianGaussian(phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp) #x_shape = x.shape #x_in = x.reshape((x_shape[0]*x_shape[1], -1)) theta_mu_in = theta_mu_temp.reshape((x_shape[0] * x_shape[1], -1)) theta_sig_in = theta_sig_temp.reshape((x_shape[0] * x_shape[1], -1)) #corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1)) #binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1)) recon = Gaussian( pred_in, theta_mu_in, theta_sig_in ) # BiGauss(x_in, theta_mu_in, theta_sig_in, corr_in, binary_in) # second term for the loss function recon = recon.reshape((x_shape[0], x_shape[1])) #recon = recon * mask recon_term = recon.sum(axis=0).mean() recon_term.name = 'recon_term' #kl_temp = kl_temp * mask kl_term = kl_temp.sum(axis=0).mean() kl_term.name = 'kl_term' nll_upper_bound = recon_term + kl_term nll_upper_bound.name = 'nll_upper_bound' max_x = x.max() mean_x = x.mean() min_x = x.min() max_x.name = 'max_x' mean_x.name = 'mean_x' min_x.name = 'min_x' max_theta_mu = theta_mu_in.max() mean_theta_mu = theta_mu_in.mean() min_theta_mu = theta_mu_in.min() max_theta_mu.name = 'max_theta_mu' mean_theta_mu.name = 'mean_theta_mu' min_theta_mu.name = 'min_theta_mu' max_theta_sig = theta_sig_in.max() mean_theta_sig = theta_sig_in.mean() min_theta_sig = theta_sig_in.min() max_theta_sig.name = 'max_theta_sig' mean_theta_sig.name = 'mean_theta_sig' min_theta_sig.name = 'min_theta_sig' max_phi_sig = phi_sig_temp.max() mean_phi_sig = phi_sig_temp.mean() min_phi_sig = phi_sig_temp.min() max_phi_sig.name = 'max_phi_sig' mean_phi_sig.name = 'mean_phi_sig' min_phi_sig.name = 'min_phi_sig' max_prior_sig = prior_sig_temp.max() mean_prior_sig = prior_sig_temp.mean() min_prior_sig = prior_sig_temp.min() max_prior_sig.name = 'max_prior_sig' mean_prior_sig.name = 'mean_prior_sig' min_prior_sig.name = 'min_prior_sig' prior_sig_output = prior_sig_temp prior_sig_output.name = 'prior_sig_o' phi_sig_output = phi_sig_temp phi_sig_output.name = 'phi_sig_o' model.inputs = [x, mask] model.params = params model.nodes = nodes optimizer = Adam(lr=lr) extension = [ GradientClipping(batch_size=batch_size), EpochCount(epoch), Monitoring( freq=monitoring_freq, ddout=[ nll_upper_bound, recon_term, kl_term, mse, mae, max_phi_sig, mean_phi_sig, min_phi_sig, max_prior_sig, mean_prior_sig, min_prior_sig, max_theta_sig, mean_theta_sig, min_theta_sig, max_x, mean_x, min_x, max_theta_mu, mean_theta_mu, min_theta_mu, #0-17 #binary_temp, corr_temp, theta_mu_temp, theta_sig_temp, #17-20 s_temp, z_temp, z_1_temp, x_pred_temp #phi_sig_output,phi_sig_output ], ## added in order to explore the distributions indexSep=22, indexDDoutPlot=[(0, theta_mu_temp), (2, z_t_temp), (3, prediction)], instancesPlot=[0, 150], #, 80,150 savedFolder=save_path, data=[Iterator(valid_data, batch_size)]), Picklize(freq=monitoring_freq, path=save_path), EarlyStopping(freq=monitoring_freq, path=save_path, channel=channel_name), WeightNorm() ] mainloop = Training(name=pkl_name, data=Iterator(train_data, batch_size), model=model, optimizer=optimizer, cost=nll_upper_bound, outputs=[nll_upper_bound], extension=extension) mainloop.run() fLog = open(save_path + '/output.csv', 'w') fLog.write("log,kl,nll_upper_bound,mse,mae\n") for i, item in enumerate(mainloop.trainlog.monitor['nll_upper_bound']): a = mainloop.trainlog.monitor['recon_term'][i] b = mainloop.trainlog.monitor['kl_term'][i] c = mainloop.trainlog.monitor['nll_upper_bound'][i] d = mainloop.trainlog.monitor['mse'][i] e = mainloop.trainlog.monitor['mae'][i] fLog.write("{},{},{},{},{}\n".format(a, b, c, d, e))
def get_l1_weight_decay(self, coeff): if isinstance(coeff, str): coeff = float(coeff) assert isinstance(coeff, float) or hasattr(coeff, 'dtype') return coeff * (T.abs(self.W1).sum() + T.abs(self.W2).sum())
def main(args): #theano.optimizer='fast_compile' #theano.config.exception_verbosity='high' trial = int(args['trial']) pkl_name = 'vrnn_gmm_%d' % trial channel_name = 'nll_upper_bound' data_path = args['data_path'] save_path = args['save_path'] #+'/gmm/'+datetime.datetime.now().strftime("%y-%m-%d_%H-%M") flgMSE = int(args['flgMSE']) genCase = int(args['genCase']) period = int(args['period']) n_steps = int(args['n_steps']) stride_train = int(args['stride_train']) stride_test = n_steps#int(args['stride_test']) monitoring_freq = int(args['monitoring_freq']) epoch = int(args['epoch']) batch_size = int(args['batch_size']) x_dim = int(args['x_dim']) y_dim = int(args['y_dim']) flgAgg = int(args['flgAgg']) z_dim = int(args['z_dim']) rnn_dim = int(args['rnn_dim']) k = int(args['num_k']) #a mixture of K Gaussian functions lr = float(args['lr']) debug = int(args['debug']) num_sequences_per_batch = int(args['numSequences']) #based on appliance typeLoad = int(args['typeLoad']) target_inclusion_prob = float(args['target_inclusion_prob']) print "trial no. %d" % trial print "batch size %d" % batch_size print "learning rate %f" % lr print "saving pkl file '%s'" % pkl_name print "to the save path '%s'" % save_path q_z_dim = 150 p_z_dim = 150 p_x_dim = 200 x2s_dim = 100 y2s_dim = 100 z2s_dim = 100 target_dim = k#x_dim #(x_dim-1)*k model = Model() Xtrain, ytrain, Xval, yval, Xtest, ytest, reader = fetch_ukdale(data_path, windows, appliances,numApps=flgAgg, period=period, n_steps= n_steps, stride_train = stride_train, stride_test = stride_test, typeLoad= typeLoad, flgAggSumScaled = 1, flgFilterZeros = 1, seq_per_batch=num_sequences_per_batch, target_inclusion_prob=target_inclusion_prob) instancesPlot = {0:[4,20], 2:[5,10]} #for now use hard coded instancesPlot for kelly sampling if(typeLoad==0): instancesPlot = reader.build_dict_instances_plot(listDates, batch_size, Xval.shape[0]) train_data = UKdale(name='train', prep='normalize', cond=True,# False #path=data_path, inputX=Xtrain, labels=ytrain) X_mean = train_data.X_mean X_std = train_data.X_std valid_data = UKdale(name='valid', prep='normalize', cond=True,# False #path=data_path, X_mean=X_mean, X_std=X_std, inputX=Xval, labels = yval) test_data = UKdale(name='valid', prep='normalize', cond=True,# False #path=data_path, X_mean=X_mean, X_std=X_std, inputX=Xtest, labels = ytest) init_W = InitCell('rand') init_U = InitCell('ortho') init_b = InitCell('zeros') init_b_sig = InitCell('const', mean=0.6) x, mask, y , y_mask = train_data.theano_vars() if (genCase ==1): inputX = x[:-1,:] targetX = x[1:,:] n_steps = n_steps-1 else: inputX = x targetX = x x.name = 'x_original' if debug: x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32) temp = np.ones((15, batch_size), dtype=np.float32) temp[:, -2:] = 0. mask.tag.test_value = temp x_1 = FullyConnectedLayer(name='x_1', parent=['x_t'], parent_dim=[x_dim], nout=x2s_dim, unit='relu', init_W=init_W, init_b=init_b) y_1 = FullyConnectedLayer(name='y_1', parent=['y_t'], parent_dim=[y_dim], nout=y2s_dim, unit='relu', init_W=init_W, init_b=init_b) z_1 = FullyConnectedLayer(name='z_1', parent=['z_t'], parent_dim=[z_dim], nout=z2s_dim, unit='relu', init_W=init_W, init_b=init_b) rnn = LSTM(name='rnn', parent=['x_1', 'z_1','y_1'], parent_dim=[x2s_dim, z2s_dim, y_dim], nout=rnn_dim, unit='tanh', init_W=init_W, init_U=init_U, init_b=init_b) phi_1 = FullyConnectedLayer(name='phi_1', parent=['x_1', 's_tm1','y_1'], parent_dim=[x2s_dim, rnn_dim,y2s_dim], nout=q_z_dim, unit='relu', init_W=init_W, init_b=init_b) phi_mu = FullyConnectedLayer(name='phi_mu', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='linear', init_W=init_W, init_b=init_b) phi_sig = FullyConnectedLayer(name='phi_sig', parent=['phi_1'], parent_dim=[q_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) prior_1 = FullyConnectedLayer(name='prior_1', parent=['s_tm1'], parent_dim=[rnn_dim], nout=p_z_dim, unit='relu', init_W=init_W, init_b=init_b) prior_mu = FullyConnectedLayer(name='prior_mu', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='linear', init_W=init_W, init_b=init_b) prior_sig = FullyConnectedLayer(name='prior_sig', parent=['prior_1'], parent_dim=[p_z_dim], nout=z_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) theta_1 = FullyConnectedLayer(name='theta_1', parent=['z_1', 's_tm1'], parent_dim=[z2s_dim, rnn_dim], nout=p_x_dim, unit='relu', init_W=init_W, init_b=init_b) theta_mu = FullyConnectedLayer(name='theta_mu', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit='linear', init_W=init_W, init_b=init_b) theta_sig = FullyConnectedLayer(name='theta_sig', parent=['theta_1'], parent_dim=[p_x_dim], nout=target_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) coeff = FullyConnectedLayer(name='coeff', parent=['theta_1'], parent_dim=[p_x_dim], nout=k, unit='softmax', init_W=init_W, init_b=init_b) corr = FullyConnectedLayer(name='corr', parent=['theta_1'], parent_dim=[p_x_dim], nout=k, unit='tanh', init_W=init_W, init_b=init_b) binary = FullyConnectedLayer(name='binary', parent=['theta_1'], parent_dim=[p_x_dim], nout=1, unit='sigmoid', init_W=init_W, init_b=init_b) nodes = [rnn, x_1, y_1, z_1, #dissag_pred, phi_1, phi_mu, phi_sig, prior_1, prior_mu, prior_sig, theta_1, theta_mu, theta_sig, coeff]#, corr, binary params = OrderedDict() for node in nodes: if node.initialize() is not None: params.update(node.initialize()) params = init_tparams(params) s_0 = rnn.get_init_state(batch_size) x_1_temp = x_1.fprop([x], params) y_1_temp = y_1.fprop([y], params) def inner_fn_val(x_t, s_tm1): prior_1_t = prior_1.fprop([s_tm1], params) prior_mu_t = prior_mu.fprop([prior_1_t], params) prior_sig_t = prior_sig.fprop([prior_1_t], params) z_t = Gaussian_sample(prior_mu_t, prior_sig_t) z_1_t = z_1.fprop([z_t], params) theta_1_t = theta_1.fprop([z_1_t, s_tm1], params) theta_mu_t = theta_mu.fprop([theta_1_t], params) theta_sig_t = theta_sig.fprop([theta_1_t], params) coeff_t = coeff.fprop([theta_1_t], params) pred_t = GMM_sample(theta_mu_t, theta_sig_t, coeff_t) #Gaussian_sample(theta_mu_t, theta_sig_t) pred_1_t = y_1.fprop([pred_t], params) s_t = rnn.fprop([[x_t, z_1_t, pred_1_t], [s_tm1]], params) #y_pred = dissag_pred.fprop([s_t], params) return s_t, prior_mu_t, prior_sig_t, z_t, z_1_t, theta_1_t, theta_mu_t, theta_sig_t, coeff_t, pred_t#, y_pred #corr_temp, binary_temp ((s_temp_val, prior_mu_temp_val, prior_sig_temp_val, z_t_temp_val, z_1_temp_val, theta_1_temp_val, theta_mu_temp_val, theta_sig_temp_val, coeff_temp_val, prediction_val), updates_val) =\ theano.scan(fn=inner_fn_val, sequences=[x_1_temp], outputs_info=[s_0, None, None, None, None, None, None, None, None, None]) for k, v in updates_val.iteritems(): k.default_update = v s_temp_val = concatenate([s_0[None, :, :], s_temp_val[:-1]], axis=0) def inner_fn_train(x_t, y_t, s_tm1): phi_1_t = phi_1.fprop([x_t, s_tm1,y_t], params) phi_mu_t = phi_mu.fprop([phi_1_t], params) phi_sig_t = phi_sig.fprop([phi_1_t], params) prior_1_t = prior_1.fprop([s_tm1], params) prior_mu_t = prior_mu.fprop([prior_1_t], params) prior_sig_t = prior_sig.fprop([prior_1_t], params) z_t = Gaussian_sample(phi_mu_t, phi_sig_t) z_1_t = z_1.fprop([z_t], params) theta_1_t = theta_1.fprop([z_1_t, s_tm1], params) theta_mu_t = theta_mu.fprop([theta_1_t], params) theta_sig_t = theta_sig.fprop([theta_1_t], params) coeff_t = coeff.fprop([theta_1_t], params) #corr_t = corr.fprop([theta_1_t], params) #binary_t = binary.fprop([theta_1_t], params) pred = GMM_sample(theta_mu_t, theta_sig_t, coeff_t) #Gaussian_sample(theta_mu_t, theta_sig_t) s_t = rnn.fprop([[x_t, z_1_t, y_t], [s_tm1]], params) #y_pred = dissag_pred.fprop([s_t], params) return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, z_t, z_1_t, theta_1_t, theta_mu_t, theta_sig_t, coeff_t, pred#, y_pred #corr_temp, binary_temp ((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp,z_t_temp, z_1_temp, theta_1_temp, theta_mu_temp, theta_sig_temp, coeff_temp, prediction), updates) =\ theano.scan(fn=inner_fn_train, sequences=[x_1_temp, y_1_temp], outputs_info=[s_0, None, None, None, None, None, None, None, None, None, None, None]) for k, v in updates.iteritems(): k.default_update = v s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)# seems like this is for creating an additional dimension to s_0 s_temp.name = 'h_1'#gisse z_1_temp.name = 'z_1'#gisse z_t_temp.name = 'z' theta_mu_temp.name = 'theta_mu_temp' theta_sig_temp.name = 'theta_sig_temp' coeff_temp.name = 'coeff' if (flgAgg == -1 ): prediction.name = 'x_reconstructed' mse = T.mean((prediction - x)**2) # CHECK RESHAPE with an assertion mae = T.mean( T.abs(prediction - x) ) mse.name = 'mse' pred_in = x.reshape((x_shape[0]*x_shape[1], -1)) else: prediction.name = 'pred_'+str(flgAgg) mse = T.mean((prediction - y)**2) # As axis = None is calculated for all mae = T.mean( T.abs_(prediction - y) ) mse.name = 'mse' mae.name = 'mae' pred_in = y.reshape((y.shape[0]*y.shape[1],-1)) kl_temp = KLGaussianGaussian(phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp) x_shape = x.shape theta_mu_in = theta_mu_temp.reshape((x_shape[0]*x_shape[1], -1)) theta_sig_in = theta_sig_temp.reshape((x_shape[0]*x_shape[1], -1)) coeff_in = coeff_temp.reshape((x_shape[0]*x_shape[1], -1)) #corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1)) #binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1)) recon = GMM(pred_in, theta_mu_in, theta_sig_in, coeff_in)# BiGMM(x_in, theta_mu_in, theta_sig_in, coeff_in, corr_in, binary_in) recon = recon.reshape((x_shape[0], x_shape[1])) recon.name = 'gmm_out' recon_term = recon.sum(axis=0).mean() recon_term.name = 'recon_term' kl_term = kl_temp.sum(axis=0).mean() kl_term.name = 'kl_term' nll_upper_bound = recon_term + kl_term #+ mse if (flgMSE): nll_upper_bound = nll_upper_bound + mse nll_upper_bound.name = 'nll_upper_bound' ######################## TEST (GENERATION) TIME prediction_val.name = 'generated__'+str(flgAgg) mse_val = T.mean((prediction_val - y)**2) # As axis = None is calculated for all mae_val = T.mean( T.abs_(prediction_val - y) ) mse_val.name = 'mse_val' mae_val.name = 'mae_val' pred_in_val = y.reshape((y.shape[0]*y.shape[1],-1)) theta_mu_in_val = theta_mu_temp_val.reshape((x_shape[0]*x_shape[1], -1)) theta_sig_in_val = theta_sig_temp_val.reshape((x_shape[0]*x_shape[1], -1)) coeff_in_val = coeff_temp_val.reshape((x_shape[0]*x_shape[1], -1)) recon_val = GMM(pred_in_val, theta_mu_in_val, theta_sig_in_val, coeff_in_val)# BiGMM(x_in, theta_mu_in, theta_sig_in, coeff_in, corr_in, binary_in) recon_val = recon_val.reshape((x_shape[0], x_shape[1])) recon_val.name = 'gmm_out_val' recon_term_val= recon_val.sum(axis=0).mean() recon_term_val.name = 'recon_term_val' model.inputs = [x, mask, y, y_mask] model.params = params model.nodes = nodes optimizer = Adam( lr=lr ) header = "epoch,log,kl,nll_upper_bound,mse,mae\n" extension = [ GradientClipping(batch_size=batch_size), EpochCount(epoch, save_path, header), Monitoring(freq=monitoring_freq, ddout=[nll_upper_bound, recon_term, kl_term, mse, mae, theta_mu_temp,prediction], indexSep=5, instancesPlot = instancesPlot, #{0:[4,20],2:[5,10]},#, 80,150 data=[Iterator(valid_data, batch_size)], savedFolder = save_path), Picklize(freq=monitoring_freq, path=save_path), EarlyStopping(freq=monitoring_freq, path=save_path, channel=channel_name), WeightNorm() ] lr_iterations = {0:lr, 75:(lr/10), 150:(lr/100)} mainloop = Training( name=pkl_name, data=Iterator(train_data, batch_size), model=model, optimizer=optimizer, cost=nll_upper_bound, outputs=[recon_term, kl_term, nll_upper_bound, mse, mae], extension=extension, lr_iterations=lr_iterations ) mainloop.run() z_t_temp_val.name='z_temp_val' s_temp_val.name='s_temp_val' theta_mu_temp_val.name='mu_temp' data=Iterator(test_data, batch_size) test_fn = theano.function(inputs=[x, y],#[x, y], #givens={x:Xtest}, #on_unused_input='ignore', #z=( ,200,1) allow_input_downcast=True, outputs=[z_t_temp_val, s_temp_val, theta_mu_temp_val, prediction_val, recon_term_val, mse_val, mae_val]#prediction_val, mse_val, mae_val ,updates=updates_val#, allow_input_downcast=True, on_unused_input='ignore' ) testOutput = [] numBatchTest = 0 for batch in data: outputGeneration = test_fn(batch[0], batch[2]) testOutput.append(outputGeneration[4:]) #{0:[4,20], 2:[5,10]} #if (numBatchTest==0): ''' plt.figure(1) plt.plot(np.transpose(outputGeneration[0],[1,0,2])[4]) plt.savefig(save_path+"/vrnn_dis_generated{}_z_0-4".format(numBatchTest)) plt.clf() plt.figure(2) plt.plot(np.transpose(outputGeneration[1],[1,0,2])[4]) plt.savefig(save_path+"/vrnn_dis_generated{}_s_0-4".format(numBatchTest)) plt.clf() plt.figure(3) plt.plot(np.transpose(outputGeneration[2],[1,0,2])[4]) plt.savefig(save_path+"/vrnn_dis_generated{}_theta_0-4".format(numBatchTest)) plt.clf() ''' plt.figure(4) plt.plot(np.transpose(outputGeneration[3],[1,0,2])[4]) plt.plot(np.transpose(batch[2],[1,0,2])[4]) plt.savefig(save_path+"/vrnn_dis_generated{}_RealAndPred_0-4".format(numBatchTest)) plt.clf() plt.figure(4) plt.plot(np.transpose(batch[0],[1,0,2])[4]) plt.savefig(save_path+"/vrnn_dis_generated{}_Realagg_0-4".format(numBatchTest)) plt.clf() numBatchTest+=1 testOutput = np.asarray(testOutput) print(testOutput.shape) recon_test = this_mean = testOutput[:, 0].mean() mse_test = this_mean = testOutput[:, 1].mean() mae_test = this_mean = testOutput[:, 2].mean() fLog = open(save_path+'/output.csv', 'w') fLog.write(str(lr_iterations)+"\n") fLog.write(str(windows)+"\n") fLog.write("logTest,mseTest,maeTest\n") fLog.write("{},{},{}\n".format(recon_test,mse_test,mae_test)) fLog.write("q_z_dim,p_z_dim,p_x_dim,x2s_dim,y2s_dim,z2s_dim\n") fLog.write("{},{},{},{},{},{}\n".format(q_z_dim,p_z_dim,p_x_dim,x2s_dim,y2s_dim,z2s_dim)) header = "epoch,log,kl,mse,mae\n" fLog.write(header) for i , item in enumerate(mainloop.trainlog.monitor['recon_term']): f = mainloop.trainlog.monitor['epoch'][i] a = mainloop.trainlog.monitor['recon_term'][i] b = mainloop.trainlog.monitor['kl_term'][i] d = mainloop.trainlog.monitor['mse'][i] e = mainloop.trainlog.monitor['mae'][i] fLog.write("{:d},{:.2f},{:.2f},{:.3f},{:.3f}\n".format(f,a,b,d,e))
def main(args): theano.optimizer = 'fast_compile' theano.config.exception_verbosity = 'high' trial = int(args['trial']) pkl_name = 'rnn_gmm_%d' % trial channel_name = 'valid_nll' data_path = args['data_path'] save_path = args['save_path'] flgMSE = int(args['flgMSE']) period = int(args['period']) n_steps = int(args['n_steps']) stride_train = int(args['stride_train']) stride_test = n_steps # int(args['stride_test']) monitoring_freq = int(args['monitoring_freq']) epoch = int(args['epoch']) batch_size = int(args['batch_size']) x_dim = int(args['x_dim']) z_dim = int(args['z_dim']) y_dim = int(args['y_dim']) flgAgg = int(args['flgAgg']) rnn_dim = int(args['rnn_dim']) k = int(args['num_k']) lr = float(args['lr']) debug = int(args['debug']) print "trial no. %d" % trial print "batch size %d" % batch_size print "learning rate %f" % lr print "saving pkl file '%s'" % pkl_name print "to the save path '%s'" % save_path x2s_dim = 50 #300 s2x_dim = 50 #300 target_dim = k #(x_dim-1)*k model = Model() Xtrain, ytrain, Xval, yval = fetch_ukdale(data_path, windows, appliances, numApps=flgAgg, period=period, n_steps=n_steps, stride_train=stride_train, stride_test=stride_test) print("Inside: ", Xtrain.shape, ytrain.shape, Xval.shape, yval.shape) train_data = UKdale( name='train', prep='normalize', cond=True, # False #path=data_path, inputX=Xtrain, labels=ytrain) X_mean = train_data.X_mean X_std = train_data.X_std valid_data = UKdale( name='valid', prep='normalize', cond=True, # False #path=data_path, X_mean=X_mean, X_std=X_std, inputX=Xval, labels=yval) init_W = InitCell('rand') init_U = InitCell('ortho') init_b = InitCell('zeros') init_b_sig = InitCell('const', mean=0.6) x, mask, y, y_mask = train_data.theano_vars() #mask, y_mask ''' if debug: x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32) temp = np.ones((15, batch_size), dtype=np.float32) temp[:, -2:] = 0. mask.tag.test_value = temp ''' x_1 = FullyConnectedLayer(name='x_1', parent=['x_t'], parent_dim=[x_dim], nout=x2s_dim, unit='relu', init_W=init_W, init_b=init_b) rnn = LSTM(name='rnn', parent=['x_1'], parent_dim=[x2s_dim], nout=rnn_dim, unit='tanh', init_W=init_W, init_U=init_U, init_b=init_b) theta_1 = FullyConnectedLayer(name='theta_1', parent=['s_tm1'], parent_dim=[rnn_dim], nout=s2x_dim, unit='relu', init_W=init_W, init_b=init_b) theta_mu = FullyConnectedLayer(name='theta_mu', parent=['theta_1'], parent_dim=[s2x_dim], nout=target_dim, unit='linear', init_W=init_W, init_b=init_b) theta_sig = FullyConnectedLayer(name='theta_sig', parent=['theta_1'], parent_dim=[s2x_dim], nout=target_dim, unit='softplus', cons=1e-4, init_W=init_W, init_b=init_b_sig) coeff = FullyConnectedLayer(name='coeff', parent=['theta_1'], parent_dim=[s2x_dim], nout=k, unit='softmax', init_W=init_W, init_b=init_b) ''' corr = FullyConnectedLayer(name='corr', parent=['theta_1'], parent_dim=[s2x_dim], nout=k, unit='tanh', init_W=init_W, init_b=init_b) binary = FullyConnectedLayer(name='binary', parent=['theta_1'], parent_dim=[s2x_dim], nout=1, unit='sigmoid', init_W=init_W, init_b=init_b) ''' nodes = [rnn, x_1, theta_1, theta_mu, theta_sig, coeff] params = OrderedDict() for node in nodes: if node.initialize() is not None: params.update(node.initialize()) params = init_tparams(params) s_0 = rnn.get_init_state(batch_size) x_shape = x.shape x_1_temp = x_1.fprop([x], params) def inner_fn(x_t, s_tm1): s_t = rnn.fprop([[x_t], [s_tm1]], params) theta_1_t = theta_1.fprop([s_t], params) theta_mu_t = theta_mu.fprop([theta_1_t], params) theta_sig_t = theta_sig.fprop([theta_1_t], params) coeff_t = coeff.fprop([theta_1_t], params) pred = GMM_sample(theta_mu_t, theta_sig_t, coeff_t) return s_t, theta_mu_t, theta_sig_t, coeff_t, pred ((s_temp, theta_mu_temp, theta_sig_temp, coeff_temp, pred_temp), updates) = theano.scan(fn=inner_fn, sequences=[x_1_temp], outputs_info=[s_0, None, None, None, None]) for k, v in updates.iteritems(): k.default_update = v s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0) ''' theta_1_temp = theta_1.fprop([s_temp], params) theta_mu_temp = theta_mu.fprop([theta_1_temp], params) theta_sig_temp = theta_sig.fprop([theta_1_temp], params) coeff_temp = coeff.fprop([theta_1_temp], params) corr_temp = corr.fprop([theta_1_temp], params) binary_temp = binary.fprop([theta_1_temp], params) ''' x_shape = x.shape x_in = x.reshape((x_shape[0] * x_shape[1], -1)) theta_mu_in = theta_mu_temp.reshape((x_shape[0] * x_shape[1], -1)) theta_sig_in = theta_sig_temp.reshape((x_shape[0] * x_shape[1], -1)) coeff_in = coeff_temp.reshape((x_shape[0] * x_shape[1], -1)) #corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1)) #binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1)) if (flgAgg == -1): pred_temp.name = 'x_reconstructed' mse = T.mean((pred_temp - x)**2) # CHECK RESHAPE with an assertion mae = T.mean(T.abs(pred_temp - x)) mse.name = 'mse' pred_in = x.reshape((x_shape[0] * x_shape[1], -1)) else: #pred_temp = pred_temp.reshape((pred_temp.shape[0], pred_temp.shape[1])) pred_temp.name = 'pred_' + str(flgAgg) #y[:,:,flgAgg].reshape((y.shape[0],y.shape[1],1)) mse = T.mean((pred_temp - y)**2) # CHECK RESHAPE with an assertion mae = T.mean(T.abs_(pred_temp - y)) mse.name = 'mse' mae.name = 'mae' pred_in = y.reshape((y.shape[0] * y.shape[1], -1)) recon = GMM(pred_in, theta_mu_in, theta_sig_in, coeff_in) #, binary_in recon.name = 'recon' recon = recon.reshape((y.shape[0], y.shape[1])) #recon = recon * y_mask #(200, 1000), (1000, 200) recon_term = recon.sum(axis=0).mean() recon_term.name = 'nll' max_x = x.max() mean_x = x.mean() min_x = x.min() max_x.name = 'max_x' mean_x.name = 'mean_x' min_x.name = 'min_x' max_theta_mu = theta_mu_in.max() mean_theta_mu = theta_mu_in.mean() min_theta_mu = theta_mu_in.min() max_theta_mu.name = 'max_theta_mu' mean_theta_mu.name = 'mean_theta_mu' min_theta_mu.name = 'min_theta_mu' ''' max_theta_sig = theta_sig_in.max() mean_theta_sig = theta_sig_in.mean() min_theta_sig = theta_sig_in.min() max_theta_sig.name = 'max_theta_sig' mean_theta_sig.name = 'mean_theta_sig' min_theta_sig.name = 'min_theta_sig' coeff_max = coeff_in.max() coeff_min = coeff_in.min() coeff_mean_max = coeff_in.mean(axis=0).max() coeff_mean_min = coeff_in.mean(axis=0).min() coeff_max.name = 'coeff_max' coeff_min.name = 'coeff_min' coeff_mean_max.name = 'coeff_mean_max' coeff_mean_min.name = 'coeff_mean_min' ''' model.inputs = [x, mask, y, y_mask] model.params = params model.nodes = nodes optimizer = Adam(lr=lr) extension = [ GradientClipping(batch_size=batch_size), EpochCount(epoch), Monitoring( freq=monitoring_freq, ddout=[ recon_term, mse, mae, #max_theta_sig, mean_theta_sig, min_theta_sig, max_x, mean_x, min_x, max_theta_mu, mean_theta_mu, min_theta_mu, #coeff_max, coeff_min, coeff_mean_max, coeff_mean_min,#16 theta_mu_temp, theta_sig_temp, pred_temp, coeff_temp, s_temp ], indexSep=9, indexDDoutPlot=[(0, theta_mu_temp), (2, pred_temp)], instancesPlot=[10, 100], #, 80,150 savedFolder=save_path, data=[Iterator(valid_data, batch_size)]), Picklize(freq=monitoring_freq, path=save_path), EarlyStopping(freq=monitoring_freq, path=save_path, channel=channel_name), WeightNorm() ] lr_iterations = {0: lr, 20: (lr / 10), 150: (lr / 100), 200: (lr / 1000)} mainloop = Training(name=pkl_name, data=Iterator(train_data, batch_size), model=model, optimizer=optimizer, cost=recon_term, outputs=[recon_term], extension=extension, lr_iterations=lr_iterations) mainloop.run() fLog = open(save_path + '/output.csv', 'w') fLog.write("log,mse,mae\n") for i, item in enumerate(mainloop.trainlog.monitor['nll']): a = mainloop.trainlog.monitor['nll'][i] d = mainloop.trainlog.monitor['mse'][i] e = mainloop.trainlog.monitor['mae'][i] fLog.write("{},{},{}\n".format(a, d, e))
def get_l1_weight_decay(self, coeff): if isinstance(coeff, str): coeff = float(coeff) assert isinstance(coeff, float) or hasattr(coeff, "dtype") W, = self.transformer.get_params() return coeff * T.abs(W).sum()