def sgd_lr_4sps(data,label,sample,param): start = time.time() L = len(sample) num_samples = L * (L-1) / 2 num_features = data.shape[1] * 2 print "#Samples:%d, #Features:%d"%(num_samples,num_features) weights = np.random.random(num_features) alpha = param['alpha'] iters = param['iters'] gamma = param['gamma'] for it in xrange(iters): print "iter%d "%it ic = 0 for pi,py in pair_dataset_sto_sparse_generator(data,label,sample): output = sigmoid(np.sum(weights[pi.indices])) err = output - py weights = weights - alpha * (pi.toarray().reshape(num_features) * err + gamma * weights) util.view_bar(ic,num_samples) ic += 1 print "train complete! took %.2fs"%(time.time()-start) return weights
def sgd_lr(data,label,param): start = time.time() num_samples = data.shape[0] * (data.shape[0]-1) / 2 num_features = data.shape[1] * 2 print "#Samples:%d, #Features:%d"%(num_samples,num_features) weights = np.random.random(num_features) alpha = param['alpha'] iters = param['iters'] gamma = param['gamma'] for it in xrange(iters): print "iter%d "%it ic = 0 for pi,py in pair_dataset_sto_generator(data,label): output = sigmoid(np.dot(pi, weights)) err = output - py weights = weights - alpha * (pi * err + gamma * weights) # sys.stdout.write(str(ic*100.0/num_samples)+'%\r') util.view_bar(ic,num_samples) ic += 1 print "train complete! took %.2fs"%(time.time()-start) return weights
def sgd_lr_4sps(data, label, sample, param): start = time.time() L = len(sample) num_samples = L * (L - 1) / 2 num_features = data.shape[1] * 2 print "#Samples:%d, #Features:%d" % (num_samples, num_features) weights = np.random.random(num_features) alpha = param['alpha'] iters = param['iters'] gamma = param['gamma'] for it in xrange(iters): print "iter%d " % it ic = 0 for pi, py in pair_dataset_sto_sparse_generator(data, label, sample): output = sigmoid(np.sum(weights[pi.indices])) err = output - py weights = weights - alpha * ( pi.toarray().reshape(num_features) * err + gamma * weights) util.view_bar(ic, num_samples) ic += 1 print "train complete! took %.2fs" % (time.time() - start) return weights
def pwlr_predict4sps_offline(train_data, train_label, sample, weights): start = time.time() print "Prediction Begin!" L = len(sample) test_inx = list( set([i for i in xrange(train_data.shape[0])]) - set(sample)) result = np.zeros(len(test_inx)) num_test = len(test_inx) ic = 0 for i in xrange(num_test): test_instance = train_data.getrow(test_inx[i]).toarray().tolist() for j in xrange(L): train_instance = train_data.getrow(sample[j]).toarray().tolist() sample_instance = sp.sparse.csr_matrix(test_instance + train_instance) result[i] += (1 + be_positive(np.sum(weights[sample_instance.indices])) ) * train_label[sample[j]] # sys.stdout.write(str((i+1)*(j+1)*100.0/(num_test*L))+'%\r') util.view_bar(ic, num_test * L) ic += 1 print "prediction complete! took %.2fs" % (time.time() - start) return result / L
def sgd_lr(data, label, param): start = time.time() num_samples = data.shape[0] * (data.shape[0] - 1) / 2 num_features = data.shape[1] * 2 print "#Samples:%d, #Features:%d" % (num_samples, num_features) weights = np.random.random(num_features) alpha = param['alpha'] iters = param['iters'] gamma = param['gamma'] for it in xrange(iters): print "iter%d " % it ic = 0 for pi, py in pair_dataset_sto_generator(data, label): output = sigmoid(np.dot(pi, weights)) err = output - py weights = weights - alpha * (pi * err + gamma * weights) # sys.stdout.write(str(ic*100.0/num_samples)+'%\r') util.view_bar(ic, num_samples) ic += 1 print "train complete! took %.2fs" % (time.time() - start) return weights
def pwlr_predict4sps_online(train_data,train_label,sample,test_data,weights): start = time.time() print "Prediction Begin!" L = len(sample) result = np.zeros(test_data.shape[0]) num_test = test_data.shape[0] ic = 0 for i in xrange(num_test): test_instance = test_data.getrow(i).toarray().tolist() for j in xrange(L): train_instance = train_data.getrow(sample[j]).toarray().tolist() sample_instance = sp.sparse.csr_matrix(test_instance + train_instance) result[i] += (1+be_positive(np.sum(weights[sample_instance.indices]))) * train_label[sample[j]] # sys.stdout.write(str((i+1)*(j+1)*100.0/(num_test*L))+'%\r') util.view_bar(ic,num_test*L) ic += 1 print "prediction complete! took %.2fs"%(time.time()-start) return result/L
def pwlr_predict(train_data,train_label,test_data,weights): start = time.time() print "Prediction Begin!" L = len(train_data) result = np.zeros(len(test_data)) num_test = len(test_data) ic = 0 for i in xrange(len(test_data)): test_instance = test_data[i] for j in xrange(L): train_instance = train_data[j] sample_instance = test_instance + train_instance result[i] += (1+be_positive(np.dot(sample_instance,weights))) * train_label[j] # sys.stdout.write(str((i+1)*(j+1)*100.0/(num_test*L))+'%\r') util.view_bar(ic,num_test*L) ic += 1 print "prediction complete! took %.2fs"%(time.time()-start) return result/L
def pwlr_predict(train_data, train_label, test_data, weights): start = time.time() print "Prediction Begin!" L = len(train_data) result = np.zeros(len(test_data)) num_test = len(test_data) ic = 0 for i in xrange(len(test_data)): test_instance = test_data[i] for j in xrange(L): train_instance = train_data[j] sample_instance = test_instance + train_instance result[i] += (1 + be_positive(np.dot(sample_instance, weights))) * train_label[j] # sys.stdout.write(str((i+1)*(j+1)*100.0/(num_test*L))+'%\r') util.view_bar(ic, num_test * L) ic += 1 print "prediction complete! took %.2fs" % (time.time() - start) return result / L
def train(length, width, image, check): x = tf.placeholder(tf.float32, [1, width, length, NUM_CHANNELS], name='x-input') y_ = tf.placeholder(tf.float32, [None, 4], name='y-input') regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATRION_RATE) y = process_image(x, False, regularizer) global_step = tf.Variable(0, trainable=False) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variable_averages_op = variable_averages.apply(tf.trainable_variables()) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) cross_entropy_mean = tf.reduce_mean(cross_entropy) loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, TRAINING_STEPS, LEARNING_RATE_DECAY, staircase=True) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_averages_op]): train_op = tf.no_op(name='train') saver = tf.train.Saver() with tf.Session() as sess: if os.path.exists('path/'): saver.restore(sess, 'path/train.ckpt') else: tf.global_variables_initializer().run() for step in range(TRAINING_STEPS): test, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: image, y_: check}) util.view_bar("processing step of " , step, 100) saver.save(sess, 'path/train.ckpt') return sess.run(y, feed_dict={x: image})