def train_and_get_result(_df, _dft,  store_item_nbrs, model, total_features):
	df = _df.copy()
	df_t = _dft.copy()
	RES = []
	total = 0
	for sno, ino in store_item_nbrs:
		if(sno == 35):
			continue
		res = pd.DataFrame()
		df1 = df[(df.store_nbr == sno) & (df.item_nbr == ino)]
		X_train, y_train = ut.get_train_data(df1)
		X_train = X_train.drop(['store_nbr', 'item_nbr'], axis=1)
		y_train = y_train[X_train.index.values]

		df2 = df_t[(df_t.store_nbr == sno) & (df_t.item_nbr == ino)]
		X_predict = ut.get_test_data(df2)
		res['date'] = X_predict['date']
		res['store_nbr'] = X_predict['store_nbr']
		res['item_nbr'] = X_predict['item_nbr']
		X_predict = X_predict.drop(['date', 'store_nbr', 'item_nbr'], axis=1)

		X_train = X_train[total_features[total].tolist()]
		X_predict = X_predict[total_features[total].tolist()]

		regr = ut.get_regression_model(model, len(X_train.values))
		regr.fit(ut.get_processed_X(X_train.values), y_train.values)
		res['log1p'] = np.maximum(regr.predict(ut.get_processed_X(X_predict.values)), 0.)
		RES.append(res)
		total += 1
	result = pd.concat(RES)
	return result
Beispiel #2
0
def comprehensive_features_analyse(_df, store_item_nbrs):
	df = _df.copy()
	plt.figure(figsize=(16, 26))
	X, y = ut.get_train_data(df)
	feature_list = X.columns.values[2:]
	total_weight = 0
	total_rank = pd.DataFrame()
	total_features = []
	total = 0
	for sno, ino in store_item_nbrs:
		if(sno == 35):
			continue
		X_1 = X[(X.store_nbr == sno) & (X.item_nbr == ino)]
		X_1 = X_1.drop(['store_nbr', 'item_nbr'], axis=1)
		y_1 = y[X_1.index.values]
		y_1 = y_1.values
		weight = y_1[y_1 > 0].shape[0]
		total_weight += weight
		features = feature_list
		rank, selection_feature = train_and_analyse(X_1, y_1, sno, ino)
		if(len(total_rank) == 0):
			total_rank = rank
		total_features.append(selection_feature)
		total_rank += rank * weight
		total += 1
		print('done', total)
	total_rank /= total_weight
	# total_rank.plot.barh(stacked=False)
	total_rank.to_pickle('Analyse/total_rank_time-specific')
	# plt.show()
	# plt.close()

	return total_features
def comprehensive_features_analyse(_df, store_item_nbrs):
	df = _df.copy()
	X, y = ut.get_train_data(df)
	feature_list = X.columns.values[2:]
	importance_value = np.zeros(len(feature_list))
	total_weight = 0
	total_rank = pd.DataFrame()
	total_features = []
	total = 0
	for sno, ino in store_item_nbrs:
		if(sno == 35):
			continue
		X_1 = X[(X.store_nbr == sno) & (X.item_nbr == ino)]
		X_1 = X_1.drop(['store_nbr', 'item_nbr'], axis=1)
		y_1 = y[X_1.index.values]
		X_1 = ut.get_processed_X(X_1.values)
		y_1 = y_1.values
		weight = y_1[y_1 > 0].shape[0]
		total_weight += weight
		features = feature_list
		rank, selection_feature = train_and_analyse(X_1, y_1, features)
		if(len(total_rank) == 0):
			total_rank = rank
		total_features.append(selection_feature)
		total_rank += rank * weight
		total += 1
		print('done', total)
	total_rank /= total_weight
	# total_rank.plot.barh(stacked=True)
	total_rank.to_pickle('total_rank')
	# plt.show()
	# plt.close()

	return total_features
Beispiel #4
0
def f_regression_feature_analyse(_df, store_item_nbrs):
	df = _df.copy()
	p.figure()
	X, y = ut.get_train_data(df)
	feature_list = X.columns.values[2:]
	importance_value = np.zeros(len(feature_list))
	total = 0
	for sno, ino in store_item_nbrs:
		if(sno == 35):
			continue
		X_1 = X[(X.store_nbr == sno) & (X.item_nbr == ino)]
		X_1 = X_1.drop(['store_nbr','item_nbr'], axis=1)
		y_1 = y[X_1.index.values]
		features = feature_list
		F, _ = f_regression(X_1.values, y_1.values)
		importance = get_importance(np.nan_to_num(F))
		print(importance)
		# to draw the each (sno, ino) pic need to uncomment underline code
		# draw_feature_importance(importance, features, sno, ino)
		importance_value += len(X_1.index) * np.array(importance)
		total = total + len(X_1.index)
		print(importance_value)

	importance_value = importance_value / total
	draw_total_average_importance(importance_value, feature_list)
def main(n_aggregation, dim_feature, n_epochs, batch_size, eps):
    W = np.random.normal(0, 0.4, [dim_feature, dim_feature])
    A = np.random.normal(0, 0.4, dim_feature)
    b = np.array([0.])
    model = GraphNeuralNetwork(W, A, b, n_aggregation=n_aggregation)
    optimizer = Adam(model)

    dataset = util.get_train_data('../../datasets')
    train_data, valid_data = util.random_split(dataset, train_ratio=0.5)
    print('train_size: %d, valid_size: %d' %
          (len(train_data), len(valid_data)))

    for epoch in range(n_epochs):
        train_loss = util.AverageMeter()
        train_acc = util.AverageMeter()
        for graphs, labels in util.get_shuffled_batches(
                train_data, batch_size):
            grads_flat = 0
            for graph, label in zip(graphs, labels):
                x = np.zeros([len(graph), dim_feature])
                x[:, 0] = 1
                grads_flat += calc_grads(model, graph, x, label,
                                         bce_with_logit, eps) / batch_size

                outputs = model(graph, x)
                train_loss.update(bce_with_logit(outputs, label), 1)
                train_acc.update((sigmoid(outputs) > 0.5) == label, 1)

            optimizer.update(grads_flat)

        valid_loss, valid_acc = test(model, valid_data, dim_feature)
        print(
            'epoch: %d, train_loss: %f, train_acc: %f, valid_loss: %f, vald_acc: %f'
            % (epoch, train_loss.avg, train_acc.avg, valid_loss, valid_acc))
Beispiel #6
0
def train_and_get_test(_df, store_item_nbrs, model, total_features):
	df = _df.copy()
	regrs = []
	tests = []
	total = 0
	score_total = []
	for sno, ino in store_item_nbrs:
		if(sno == 35):
			continue
		df1 = df[(df.store_nbr == sno) & (df.item_nbr == ino)]
		df_test, df_train = ut.get_random_test_and_train(df1)
		X_train, y_train = ut.get_train_data(df_train)
		X_train = X_train.drop(['store_nbr', 'item_nbr'], axis=1)
		y_train = y_train[X_train.index.values]

		X_train = X_train[ut.get_features()]
		
		regr = ut.get_regression_model(model, len(X_train))

		# regr.fit(ut.get_processed_X(X_train.values), y_train.values)
		scores = []
		scores = cross_val_score(regr, X_train.values, y_train.values, scoring="mean_squared_error", cv=10)
		print('done, ', total)
		print(-np.mean(scores))
		score_total.append(-np.mean(scores))
		regrs.append(regr)
		tests.append(df_test)
		total += 1
	print('total_score: {}'.format(np.mean(score_total)))

	return regrs, tests
def train_and_get_test(_df, store_item_nbrs, model, total_features):
	df = _df.copy()
	regrs = []
	tests = []
	total = 0
	for sno, ino in store_item_nbrs:
		if(sno == 35):
			continue
		df1 = df[(df.store_nbr == sno) & (df.item_nbr == ino)]
		df_test, df_train = ut.get_random_test_and_train(df1)
		X_train, y_train = ut.get_train_data(df_train)
		X_train = X_train.drop(['store_nbr', 'item_nbr'], axis=1)
		y_train = y_train[X_train.index.values]

		X_train = X_train[total_features[total].tolist()]
		
		regr = ut.get_regression_model(model, len(X_train))

		regr.fit(ut.get_processed_X(X_train.values), y_train.values)
		print('done, ', total)
		regrs.append(regr)
		tests.append(df_test)
		total += 1

	return regrs, tests
    def test_get_train_data(self):
        print('get_train_data')
        data_dir = '../../datasets'
        train_data = util.get_train_data(data_dir)

        self.assertEqual(len(train_data), 2000)
        for p in train_data:
            self.assertEqual(len(p), 2)
            self.assertEqual(len(p[0].shape), 2)
            self.assertEqual(p[0].shape[0], p[0].shape[1])
            self.assertTrue(p[1] in [0, 1])
Beispiel #9
0
def main(n_aggregation, dim_feature, n_epochs, batch_size, eps, outputfile):
    W = np.random.normal(0, 0.4, [dim_feature, dim_feature])
    A = np.random.normal(0, 0.4, dim_feature)
    b = np.array([0.])
    model = GraphNeuralNetwork(W, A, b, n_aggregation=n_aggregation)
    optimizer = Adam(model)

    # Training
    train_data = util.get_train_data('../../datasets')
    print('train_size: %d' % len(train_data))
    for epoch in range(n_epochs):
        train_loss = util.AverageMeter()
        train_acc = util.AverageMeter()
        for graphs, labels in util.get_shuffled_batches(
                train_data, batch_size):
            grads_flat = 0
            for graph, label in zip(graphs, labels):
                x = np.zeros([len(graph), dim_feature])
                x[:, 0] = 1
                grads_flat += calc_grads(model, graph, x, label,
                                         bce_with_logit, eps) / batch_size

                outputs = model(graph, x)
                train_loss.update(bce_with_logit(outputs, label), 1)
                train_acc.update((sigmoid(outputs) > 0.5) == label, 1)

            optimizer.update(grads_flat)

        print('epoch: %d, train_loss: %f, train_acc: %f' %
              (epoch, train_loss.avg, train_acc.avg))

    # Prediction
    test_data = util.get_test_data('../../datasets')
    with open(outputfile, 'w') as o:
        for graph in test_data:
            x = np.zeros([len(graph), dim_feature])
            x[:, 0] = 1
            logit = model(graph, x)
            pred = sigmoid(logit) > 0.5
            o.write(str(int(pred[0])) + '\n')
Beispiel #10
0
def video_vae_encode():
    # Settings.
    data, labels = get_train_data()
    model_dir = 'out_train/model@10000'
    img_height, img_width = 64, 64
    input_dim = [64, 64, 3]
    latent_dim = 100
    num_classes = 10
    batch_size = 32
    display_interval = 1024
    output_path = 'vae_video.mp4'
    # Model.
    vae_model = VAE(vae_face_conv, input_dim, latent_dim)
    vae_model.load_model(model_dir)
    # Encode model with video side-by-side.
    temp_hdf5 = h5py.File('temp.h5', 'w')
    temp_hdf5.create_dataset('data', [len(data)] + input_dim, 'u1')
    for i in range(0, len(data), batch_size):
        start, end = i, min(i + batch_size, len(data))
        X_batch = data[start:end] / 255.
        Z_batch = vae_model.encode(X_batch)
        decoded_outputs = vae_model.decode(Z_batch)
        temp_hdf5['data'][start:end] = map(img_as_ubyte, decoded_outputs)
        if i % display_interval == 0:
            print(i, len(data))
    vwriter = skvideo.io.FFmpegWriter(output_path,
                                      outputdict={
                                          '-vcodec': 'libx264',
                                          '-pix_fmt': 'yuv420p'
                                      })
    for i in range(len(data)):
        frame, decoded_frame = data[i], img_as_ubyte(temp_hdf5['data'][i])
        composite_frame = np.hstack([frame, decoded_frame])
        vwriter.writeFrame(composite_frame)
        if i % display_interval == 0:
            print(i, len(data))
    os.remove('temp.h5')
Beispiel #11
0
def test_and_get_res(regrs, tests, total_features):
	rmse_total = 0
	se_total = 0
	num_items = 0
	num_test = 0
	total_R_square = 0

	for regr, df_test in zip(regrs, tests):
		X_test, y_test = ut.get_train_data(df_test)
		sno = set(X_test.store_nbr.values)
		ino = set(X_test.item_nbr.values)
		X_test = X_test.drop(['store_nbr','item_nbr'], axis=1)
		y_test = y_test[X_test.index.values]

		X_test = X_test[total_features[num_items].tolist()]

		prediction = regr.predict(ut.get_processed_X(X_test.values))
		prediction = np.maximum(prediction, 0.)
		rmse = np.sqrt(((y_test.values - prediction) ** 2).mean())
		se = ((y_test.values - prediction) ** 2).sum()

		total_R_square += r2_score(y_test.values, prediction)
		if((r2_score(y_test.values, prediction).item() < 0.0) | (r2_score(y_test.values, prediction).item() > 0.8)):
			print(r2_score(y_test.values, prediction), 'sno: {}, ino: {}, features: {}'.format(sno, ino, total_features[num_items].tolist()))
		rmse_total += rmse
		se_total += se
		num_items += 1
		num_test += len(y_test.values)
	# get root-mean-square error total
	print('rmse_test_total: ', rmse_total)
	# get standard deviation total
	print('se_total: ', se_total)
	print('num_items: ', num_items, 'len_of_test: ', num_test)
	print('Average rmse: ', rmse_total / num_items)
	print('Average se: ', se_total / num_test)
	print('Average r-square: ', total_R_square/ num_items)
import cv2
import util
import numpy as np
import matplotlib.pyplot as plt

from sklearn.ensemble import RandomForestClassifier

train_imgs, train_labels = util.get_train_data()
test_imgs, test_labels = util.get_test_data()

random_forest_model = RandomForestClassifier()
random_forest_model.fit(train_imgs, train_labels)
random_forest_results = random_forest_model.predict(test_imgs)

util.evaluate(random_forest_results, test_labels)

test_imgs = test_imgs.reshape(-1, 28, 28)
np.random.shuffle(test_imgs)

for i in range(10):
    util.visualize(test_imgs[i], random_forest_model)
Beispiel #13
0
        return np.mean(P == Y)

    def tile_data(self, X, tilesize=49):
        #divide MNIST images into 7x7px tiles
        print("Creating input image tiles")
        N, D = X.shape
        numTile = int(D / tilesize)
        xTile = np.zeros((N, numTile))
        for i in range(numTile):
            tiledata = X[:, i * tilesize:(i + 1) * tilesize]
            xTile[:, i] = np.mean(tiledata, axis=1)  #what do this
        return xTile


if __name__ == '__main__':
    X, Y = get_train_data()
    N = int(len(Y) / 4)
    X = X[:N]
    Y = Y[:N]  # divide data in quarter
    Ntrain = int(len(Y) * .8)  #use 80% of data for train, 20% for valid
    Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
    Xvalid, Yvalid = X[Ntrain:], Y[Ntrain:]

    model = DecisionTree(max_depth=100)
    xTile = m.tile_data(Xtrain)
    t0 = datetime.now()
    model.fit(Xtrain, Ytrain)

    print("Training time ", datetime.now() - t0)

    t0 = datetime.now()
    return scaler.transform(data)

def print_scores(y_test, pred):
    x_test['Sales_true'] = y_test
    x_test['Sales_pred'] = pred
    x_test['Country'] = util.country[x_test.index].copy()
    x_test['Date'] = util.date[x_test.index].copy()
    util.convert_from_usd(x_test, columns=['Sales_true', 'Sales_pred'])
    y_test = x_test['Sales_true']
    pred = x_test['Sales_pred']

    x_test.drop(['Sales_true', 'Sales_pred', 'Country', 'Date'], axis=1, inplace=True)
    print(r2_score(y_test, pred))
    print(util.SMAPE(y_test, pred))
    
train_data = util.get_train_data()
x = train_data.drop('Sales', axis=1)
y = train_data['Sales']
testx, test_merge = util.get_test_data()

x_train, x_test, y_train, y_test = util.get_train_test_data(train_data, test_size=0.20)

model1 = RandomForestRegressor(n_estimators=500,
                               max_depth=25)
#model1.fit(x_train, y_train)
model1.fit(x, y)

model2 = GradientBoostingRegressor(n_estimators=300, 
                                  max_depth = 15,
                                  max_features = 0.9,
                                  min_impurity_decrease = 0.5)
def main():
    #Get train and test data
    XTrain, YTrain = get_train_data()
    YTrain_ind = y2indicator(YTrain)
    XTrain = reshape(XTrain)

    XTest, YTest = get_test_data()
    YTest_ind = y2indicator(YTest)
    XTest = reshape(XTest)

    N, K = YTrain_ind.shape
    lr = np.float32(0.001)
    mu = np.float32(0.99)
    reg = np.float32(0.01)
    poolsz = (2, 2)
    M = 100
    batch_sz = 500
    no_batches = int(N / batch_sz)

    #Initial random weights
    W1_shape = (5, 5, 3, 20)
    W1_init = init_filter(W1_shape, poolsz)
    b1_init = np.zeros([W1_shape[3]])

    W2_shape = (5, 5, 25, 50)
    W2_init = init_filter(W2_shape, poolsz)
    b2_init = np.zeros([W2_shape[3]])

    W3_init = np.random.randn(W2_shape[3] * 8 * 8,
                              M) / np.sqrt(W2_shape[3] * 8 * 8 + M)
    b3_init = np.zeros([M])

    W4_init = np.random.randn(M, K) / np.sqrt(M + K)
    b4_init = np.zeros([K])

    #Tensorflow variables
    X = tf.placeholder(name='X', dtype='float32', shape=(batch_sz, 32, 32, 3))
    Y = tf.placeholder(name='Y', dtype='float32', shape=(batch_sz, K))
    W1 = tf.Variable(W1_init.astype(np.float32), name='W1')
    b1 = tf.Variable(b1_init.astype(np.float32), name='b1')
    W2 = tf.Variable(W2_init.astype(np.float32), name='W2')
    b2 = tf.Variable(b2_init.astype(np.float32), name='b2')
    W3 = tf.Variable(W3_init.astype(np.float32), name='W3')
    b3 = tf.Variable(b3_init.astype(np.float32), name='b3')
    W4 = tf.Variable(W4_init.astype(np.float32), name='W4')
    b4 = tf.Variable(b4_init.astype(np.float32), name='b4')

    #Forward prop
    Z1 = convpool(X, W1, b1)
    Z2 = convpool(Z1, W2, b2)
    Z2_shape = Z2.get_shape().as_list()
    Z2_flat = tf.reshape(Z2, [Z2_shape[0], np.prod(Z2_shape[1:])])
    Z3 = tf.nn.relu(tf.matmul(Z2_flat, W3) + b3)
    pY = tf.matmul(Z3, W4) + b4

    #Cost and prediction
    cost = tf.reduce_sum(
        tf.nn.softmax_cross_entropy_with_logits(logits=pY, labels=Y))

    #Train function
    train = tf.train.RMSPropOptimizer(lr, decay=0.99,
                                      momentum=mu).minimize(cost)

    #Get prediction
    pred = tf.argmax(pY, axis=1)

    init = tf.global_variables_initializer()
    with tf.Session() as session:
        session.run(init)
        for i in range(100):
            for n in range(no_batches):
                #get current batches
                XBatch = XTrain[n * batch_sz:(n * batch_sz + batch_sz), :]
                YBatch_ind = YTrain_ind[n * batch_sz:(n * batch_sz +
                                                      batch_sz), :]
                #Forward prop
                session.run(train, feed_dict={X: XBatch, Y: YBatch_ind})

                if (n % 200 == 0):
                    YBatch = YTrain[n * batch_sz:(n * batch_sz + batch_sz)]
                    c = session.run(cost, feed_dict={X: XBatch, Y: YBatch_ind})
                    P = session.run(pred, feed_dict={X: XBatch})
                    er = error_rate(P, YBatch)
                    print("Iteration: ", i, "Cost: ", c, "Error rate: ", er)
Beispiel #16
0
def main():
	#Get train and test data
	XTrain, YTrain = get_train_data()
	YTrain_ind = y2indicator(YTrain)
	XTrain = reshape(XTrain)
	XTest, YTest = get_test_data()
	YTest_ind = y2indicator(YTest)
	XTest = reshape(XTest)

	N,K = YTrain_ind.shape
	M=100
	lr = np.float32(0.000001)
	reg = np.float32(0.01)
	mu = np.float32(0.99)
	poolsize = (2,2)
	batch_sz = 500
	no_batches = int(N/batch_sz)

	#Initial random weight values
	W1_shape = (20, 3, 5, 5)
	W1_init = init_filter(W1_shape, poolsize)
	b1_init = np.zeros([W1_shape[0]])

	W2_shape = (50, 20, 5, 5)
	W2_init = init_filter(W2_shape, poolsize)
	b2_init = np.zeros([W2_shape[0]])

	W3_init = np.random.randn(W2_shape[0]*5*5, M)/np.sqrt(W2_shape[0]*5*5 + M)
	b3_init = np.zeros([M])

	W4_init = np.random.randn(M,K)/np.sqrt(M+K)
	b4_init = np.zeros([K])
	
	#Create theano variables
	X = T.tensor4('X', dtype='float32')			#inputs
	Y = T.matrix('Y')
	W1 = theano.shared(W1_init.astype(np.float32), 'W1')		#Weights
	b1 = theano.shared(b1_init.astype(np.float32), 'b1')
	W2 = theano.shared(W2_init.astype(np.float32), 'W2')
	b2 = theano.shared(b2_init.astype(np.float32), 'b2')
	W3 = theano.shared(W3_init.astype(np.float32), 'W3')
	b3 = theano.shared(b3_init.astype(np.float32), 'b3')
	W4 = theano.shared(W4_init.astype(np.float32), 'W4')
	b4 = theano.shared(b4_init.astype(np.float32), 'b4')

	dW1 = theano.shared(np.zeros(W1_init.shape, dtype=np.float32))	#Momentum variables
	db1 = theano.shared(np.zeros(b1_init.shape, dtype=np.float32))
	dW2 = theano.shared(np.zeros(W2_init.shape, dtype=np.float32))
	db2 = theano.shared(np.zeros(b2_init.shape, dtype=np.float32))
	dW3 = theano.shared(np.zeros(W3_init.shape, dtype=np.float32))
	db3 = theano.shared(np.zeros(b3_init.shape, dtype=np.float32))
	dW4 = theano.shared(np.zeros(W4_init.shape, dtype=np.float32))
	db4 = theano.shared(np.zeros(b4_init.shape, dtype=np.float32))

	#Forward prop equations
	Z1 = convpool(X, W1, b1)			#2 Conv-pool layer
	Z2 = convpool(Z1, W2, b2)
	Z3 = relu(Z2.flatten(ndim=2).dot(W3) + b3)		#Fully connected NN
	P = T.nnet.softmax(Z3.dot(W4) + b4)

	#Cost and prediction equations
	params = (W1, b1, W2, b2, W3, b3, W4, b4)
	reg_cost = reg*np.sum([(param*param).sum() for param in params])
	cost = (Y * T.log(P)).sum() + reg_cost
	pred = T.argmax(P, axis=1)

	#Update Weights
	W1_update = W1 + mu*dW1 + lr*T.grad(cost, W1)
	b1_update = b1 + mu*db1 + lr*T.grad(cost,b1)
	W2_update = W2 + mu*dW2 + lr*T.grad(cost, W2)
	b2_update = b2 + mu*db2 + lr*T.grad(cost,b2)
	W3_update = W3 + mu*dW3 + lr*T.grad(cost, W3)
	b3_update = b3 + mu*db3 + lr*T.grad(cost,b3)
	W4_update = W4 + mu*dW4 + lr*T.grad(cost, W4)
	b4_update = b4 + mu*db4 + lr*T.grad(cost,b4)

	#Gradient updates for momentum
	dW1_update = mu*dW1 + lr*T.grad(cost, W1)
	db1_update = mu*db1 + lr*T.grad(cost, b1)
	dW2_update = mu*dW2 + lr*T.grad(cost, W2)
	db2_update = mu*db2 + lr*T.grad(cost, b2)
	dW3_update = mu*dW3 + lr*T.grad(cost, W3)
	db3_update = mu*db3 + lr*T.grad(cost, b3)
	dW4_update = mu*dW4 + lr*T.grad(cost, W4)
	db4_update = mu*db4 + lr*T.grad(cost, b4)

	#Train function
	train = theano.function(
		inputs=[X,Y],
		updates=[ (W1, W1_update),
			(b1, b1_update),
			(W2, W2_update),
			(b2, b2_update),
			(W3, W3_update),
			(b3, b3_update),
			(W4, W4_update),
			(b4, b4_update),
			(dW1, dW1_update),
			(db1, db1_update),
			(dW2, dW2_update),
			(db2, db2_update),
			(dW3, dW3_update),
			(db3, db3_update),
			(dW4, dW4_update),
			(db4, db4_update),
		 ])

	#Get cost and prediction function
	get_res = theano.function(
		inputs=[X,Y],
		outputs=[cost,pred])

	#Run batch gradient descent
	costs = []
	for i in range(400):
		for n in range(no_batches):
			#get current batches
			XBatch = XTrain[n*batch_sz:(n*batch_sz + batch_sz), :]
			YBatch_ind = YTrain_ind[n*batch_sz:(n*batch_sz + batch_sz), :]
			#Forward prop
			train(XBatch, YBatch_ind)

			if(n%200 == 0):
				#YBatch = YTrain[n*batch_sz:(n*batch_sz + batch_sz)]
				c, P = get_res(XTest, YTest_ind)
				er = error_rate(P, YTest)	
				print("Iteration: ", i, "Cost: ", c, "Error rate: ", er)
Beispiel #17
0
def main():
    # Command line arguments
    parser = argparse.ArgumentParser(description='Train a nn model')
    parser.add_argument('data_dir',
                        type=str,
                        help='Path of the image dataset',
                        default="./flowers")
    parser.add_argument('--save_dir',
                        help='Directory to save checkpoints',
                        type=str)
    parser.add_argument(
        '--arch',
        help='Default is alexnet, choose from alexnet, densenet121, or vgg16',
        type=str)
    parser.add_argument('--learning_rate', help='Learning rate', type=float)
    parser.add_argument('--hidden_units', help='Hidden units', type=int)
    parser.add_argument('--epochs', help='Epochs', type=int)
    parser.add_argument('--gpu',
                        action='store_true',
                        help='Use GPU for inference if GPU is available')
    parser.add_argument('--dropout',
                        dest="dropout",
                        action="store",
                        default=0.5)

    args, _ = parser.parse_known_args()

    data_dir = args.data_dir

    save_dir = './'
    if args.save_dir:
        save_dir = args.save_dir

    arch = 'alexnet'
    if args.arch:
        arch = args.arch

    learning_rate = 0.01
    if args.learning_rate:
        learning_rate = args.learning_rate

    hidden_units = 120
    if args.hidden_units:
        hidden_units = args.hidden_units

    epochs = 3
    if args.epochs:
        epochs = args.epochs

    cuda = False
    if args.gpu:
        if torch.cuda.is_available():
            cuda = True
        else:
            print("GPU flag was set but no GPU is available in this machine.")

    dropout = args.dropout

    # Load the dataset
    trainloader, validloader, testloader = util.get_dataloaders(data_dir)

    model, optimizer, criterion = util.nn_setup(arch, dropout, hidden_units,
                                                learning_rate, cuda)

    util.train_network(model, optimizer, criterion, epochs, 40, trainloader,
                       validloader, cuda)
    print("Trainning model done.")

    train_data = util.get_train_data(data_dir)
    util.save_model(model, arch, train_data, optimizer, save_dir, hidden_units,
                    dropout, learning_rate, epochs)
    print("Trainned model saved.")
Beispiel #18
0
import math
import time

import numpy as np
import tensorflow as tf

import util
from svd import Svd

# 所有训练数据
data = util.get_train_data(lambda df: df[df['rating'] != -1])
# 数据数量
size = np.alen(data['user_id'])
# 批次
epoch = 5
# 一批数量
batch_size = 1000

user_batch = tf.placeholder(tf.int32, shape=[None], name='user_id')
item_batch = tf.placeholder(tf.int32, shape=[None], name='item_id')
rate_batch = tf.placeholder(tf.float32, shape=[None], name="rating")

svd = Svd(32, size, size)

infer, regularizer = svd.model(user_batch, item_batch)

global_step = tf.train.get_or_create_global_step()

cost, train_op = svd.optimization(infer, regularizer, rate_batch)

if __name__ == '__main__':