Пример #1
0
    def test(self):
        test_data = load_test()
        start_time = time.time()
        #open csv
        out = csv.writer(open("test.csv", "w"), delimiter=',')
        #create header csv
        out.writerow(['image_name', 'Type_1', 'Type_2', 'Type_3'])
        print test_data.images_count[0]
        for i in range(int(test_data.images_count[0])):
            print "testing:", i
            #get batch
            x_in, name = test_data.get_sample(0, i)
            #set feed_dict
            feed_dict_train = {self.x: [x_in]}
            #run session
            output = self.sess.run([self.y_pred], feed_dict=feed_dict_train)

            #add ouput to csb
            probs = output[0][0].tolist()
            probs[:0] = [name[len('./data/test/'):]]
            print probs
            out.writerow(probs)
        end_time = time.time()
        time_dif = end_time - start_time
        print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
Пример #2
0
argparser = argparse.ArgumentParser()
argparser.add_argument("-p", "--predictions", type=str, help='frame-level predictions for dev/test sets')
args = argparser.parse_args()

print args.predictions
predictions = pickle.load(open(args.predictions))
print predictions.keys()

dev_predictions, test_predictions = predictions['val'], predictions['test']

for split in ['dev', 'test']:

  # read model predictions
  frame_predictions = dev_predictions if split == 'dev' else test_predictions
  # read neuron ids of each frame
  _, one_hot_label, neuron_ids = load_dev() if split == 'dev' else load_test()
  print 'split=', split
  print 'len(frame_predictions) = ', len(frame_predictions)
  print 'len(neuron_ids) = ', len(neuron_ids)
  assert(len(frame_predictions) == len(neuron_ids))

  # group frame predictions of the same neuron to make a neuron-level prediction
  prev_neuron_id = ''
  neuron_prediction = {}
  neuron_gold = {}
  current_neuron_frames = []
  correct_predictions, all_predictions = 0.0, 0.0
  confusion_matrix = defaultdict(float)
  for i in xrange(len(frame_predictions)):
    if i >= len(neuron_ids): break
    frame_prediction = frame_predictions[i]
Пример #3
0
import load_data
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

#xtf.reset_default_graph()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#######################################################################################################

CIFAR = load_data.load_all()
data = CIFAR['data']
label = CIFAR['label']
label = np.asarray(label)

CIFAR = load_data.load_test()
test_data = CIFAR['data']
test_label = CIFAR['label']
test_label = np.asarray(test_label)

#####################################################################################################

num_training = 40000
num_validation = 10000
num_test = 10000

logs_path = '/home/naman/Repositories/CIFAR-10-Recognition/Tensorflow/examples/5'

#######################################################################################################

data = np.reshape(data, (num_training + num_validation, 32, 32, 3))
Пример #4
0
                       type=str,
                       help='frame-level predictions for dev/test sets')
args = argparser.parse_args()

print args.predictions
predictions = pickle.load(open(args.predictions))
print predictions.keys()

dev_predictions, test_predictions = predictions['val'], predictions['test']

for split in ['dev', 'test']:

    # read model predictions
    frame_predictions = dev_predictions if split == 'dev' else test_predictions
    # read neuron ids of each frame
    _, one_hot_label, neuron_ids = load_dev() if split == 'dev' else load_test(
    )
    print 'split=', split
    print 'len(frame_predictions) = ', len(frame_predictions)
    print 'len(neuron_ids) = ', len(neuron_ids)
    assert (len(frame_predictions) == len(neuron_ids))

    # group frame predictions of the same neuron to make a neuron-level prediction
    prev_neuron_id = ''
    neuron_prediction = {}
    neuron_gold = {}
    current_neuron_frames = []
    correct_predictions, all_predictions = 0.0, 0.0
    confusion_matrix = defaultdict(float)
    for i in xrange(len(frame_predictions)):
        if i >= len(neuron_ids): break
        frame_prediction = frame_predictions[i]
Пример #5
0
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import load_data as loader
from svm import SVMclassifier
import matplotlib.pyplot as plt
import numpy as np

plt.rcParams['figure.figsize'] = (10.0, 8.0)  # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'

data, label = loader.load_all()
test_data, test_label = loader.load_test()

results = {}
best_val = -1
best_svm = None
learning_rates = [5e-6]  #[1e-6,5e-6,1e-5,7e-5,3e-4,6e-4,1e-3]
regularization_strengths = [1000
                            ]  #[0,1,10,1e2,1e3,1e4,1e5,1e6,1e7,1e-1,1e-2,1e-3]
batch_sizes = [600]  #[32,64,128,150,300,600,1000]
for lr in learning_rates:
    for rs in regularization_strengths:
        for bs in batch_sizes:
            model = SVMclassifier()
            model.add_data(data[:48000], label[:48000], data[48000:],
                           label[48000:], 10)
            model.InitializePars()
            model.set_lr(lr)
            model.set_reg(rs)
Пример #6
0
import sys
import cPickle as pickle
from load_data import load_dev, load_test
import numpy as np

prefix = sys.argv[1]
details = int(sys.argv[2])
(X_test, Y_test, _) = load_test()

meta_dict = pickle.load(open(prefix + '.meta'))

accuracy = meta_dict['accuracy']
gold_classes = np.argmax(Y_test, axis=1)

print accuracy
if details:
    pred_classes = meta_dict['pred_classes']
    length = meta_dict['length']
    correct = meta_dict['correct']

    for l in correct:
        print("label index {} # of predictions {} accuracy {:10.4f}".format(
            l, length[l], correct[l] * 1.0 / length[l]))
Пример #7
0
p = parser.parse_args()

def sample_training(label_split, n):
	sample_x = []
	sample_y = []
	for label in label_split:
		indexes = np.random.randint(0, len(label_split[label]), n)
		for i in indexes:
			x,y = label_split[label][i]
			sample_x += [x]
			sample_y += [y]

	return np.stack(sample_x, axis = 0).reshape((len(label_split)*n,sample_x[0].shape[0],1)), np.stack(sample_y, axis = 0)

(X_train, Y_train, _) = load_dev()
(X_test, Y_test, _) = load_test()

X_train = np.asarray(X_train, dtype = theano.config.floatX).reshape((X_train.shape[0],X_train.shape[1],1))
Y_train = np.asarray(Y_train, dtype = bool)

label_split = defaultdict(list)
for i,label in enumerate(np.argmax(Y_train, axis = 1)):
	label_split[label] += [(X_train[i], Y_train[i])]

X_test = np.asarray(X_test, dtype = theano.config.floatX).reshape((X_test.shape[0],X_test.shape[1],1))
Y_test = np.asarray(Y_test, dtype = bool)

Ydim = Y_train.shape[1]
Xdim = X_train.shape[1]

print('Build model...')
import load_data
import glob
import numpy as np
from settings import IMG_ROWS, IMG_COLS, BATCH_SIZE
from models import load_keras_model

test_data, test_id = load_data.load_test(IMG_ROWS, IMG_COLS)
test_data = test_data.reshape(test_data.shape[0], 1, IMG_ROWS, IMG_COLS)

model = load_keras_model('./data/convnet_keras.json',
                         './data/convnet_keras.h5')

test_pred = model.predict(test_data, batch_size=BATCH_SIZE)

np.savez('./output/submission_data', test_pred, test_id)

# TEST SEVERAL MODELS
# structs = glob.glob('./data/*.json')
# weights = glob.glob('./data/*.h5')
# ?
# predictions = []
# for s, w in zip(structs, weights):
# predictions.append(test_pred)
# np.mean(predictions, axis=0)
# model = load_keras_model(s, w)
Пример #9
0
            #print ('Sampled in',next_index)
            #print ('next_index',np.shape(next_chars),next_chars)
            #get the sampled char
            next_char = indices_char[next_index]

            generated += next_char
            sentence = sentence[1:] + next_char
            #print ('**** sentence **** at No.',i,sentence)
            sys.stdout.write(next_char)
            sys.stdout.flush()
        print()
    '''
   


X_test, y_test, true_ids = load_data.load_test(chars,1)

for index,x in enumerate(X_test):
   #print ('shape of x',np.shape(x))
   seq = x
   prediction_result = []
   print ('Inputs,', true_ids[index])
   #print('At test sample %s, Shape of testing x %s' % (index, np.shape(x)))
   for i in range(5):


       new_x = np.zeros((1, maxlen, len(chars)))
       for t, id in enumerate(seq):
           #print('input', id)  # input o
           new_x[0, t, id] = 1.
       #new_x = np.expand_dims(x, axis=0)

beam = 1
samples = 100
if __name__ == '__main__':
    mmm, encoder = seq2seq(latent_dim, dim, vocabs, pad)
    encoder.load_weights('seq2seq_cp_weights.h5', by_name=True)

    decoder = decoder_model(latent_dim, pad)
    decoder.load_weights('seq2seq_cp_weights.h5', by_name=True)

    attention = attention_inference(latent_dim, max_sentence)
    # attention.load_weights('seq2seq_cp_weights.h5', by_name=True)

    # print(encoder.summary(), decoder.summary())
    x_test, test_answer, index_id, id_index = load_test()
    x_test = x_test.reshape(samples * 80, dim)
    x_test = (x_test - x_test.mean(axis=0)) / (x_test.std(axis=0) + 0.001)
    x_test = x_test.reshape(samples, 80, dim)

    output_filename = 'beam_output_sentences.txt'
    ff = open(output_filename, 'w')
    f = open('MLDS_hw2_data/testing_id.txt', 'r')

    for id in f.readlines():
        id = id.strip()
        i = id_index[id]
        test_output = decode_sequence(x_test[i:i + 1], encoder, attention,
                                      decoder)
        test_output = trim(test_output)
Пример #11
0
    "max_depth": [2, 4, 6],
    "n_estimators": [50, 100, 200]
},
                      verbose=1)
reg_cv.fit(x_train, y_train)

reg = xgb.Regressor(**reg_cv.best_params_)
reg.fit(x_train, y_train)

#モデルの保存
import pickle

pickle.dump(reg, open("reg_model.pkl", "wb"))

#test読み込み
test = load_test()

for feat in fac:
    test[feat] = pd.factorize(test[feat], sort=True)[0]

test = np.array(test)
test = xgb.DMatrix(test)

#モデルに当てはめる
pred_test = reg.predict(test)
submission = pd.DataFrame({"id": test_id, "unit_sales": pd.Series(pred_test)})
submission.to_csv("sub4.csv", index=False)

#feature importance
import matplotlib.pyplot as plt