Esempio n. 1
0
def main():
    z = image_load()
    model = models.model2(batch_size=1)
    model.load_state_dict(torch.load("optim_weights.pth"))
    #    test(model)
    feat = torch.tensor(z.get_segment()).float()
    softmax = nn.Softmax(dim=1)
    number = ''
    probab = 1
    target_no = '1234567890'
    for i in feat:
        if len(argv) - 1:
            plt.imshow(i)
            plt.show()
        output = model.forward(i.view(1, 1, *i.shape))
        output = softmax(output)
        x = output.topk(1)[1].item()
        y = output.topk(1)[0].item()
        probab *= y
        number += str(x)

    print(number)
    k = 0
    for i, j in zip(number, target_no):
        if i == j:
            k += 1
    print("Accuracy", k)
    print("probability ", probab)
Esempio n. 2
0
    def __init__(self, window: 'window', video_source=0, replay=False):
        self.window = window
        self.window.title(settings.get_config("window_title"))
        self.replay = replay

        if self.replay:
            # Load previous data instead of from camera.
            self.data_gen = loadDataset.dataset_generator(
                './dataset/imgs/scissor_frames',
                './dataset/csvs/scissor.csv',
                repeat=True)
            self.inputDimension = [480, 640, 3]
        else:
            # open video source (by default this will try to open the computer webcam)
            self.video_source = video_source
            self.vid = MyVideoCapture(self.video_source)
            self.inputDimension = [self.vid.height, self.vid.width, 3]

        image_size = settings.get_config("image_input_size")
        self.maxListSize = settings.get_config("image_history_length")
        self.imgList = [np.zeros(image_size)] * self.maxListSize

        # Create a canvas that can fit the above video source size
        self.canvas = Canvas(window,
                             height=self.inputDimension[0],
                             width=self.inputDimension[1])
        self.canvas.pack()
        self.photo = None
        self.strVar = StringVar(value="None")
        self.lblClassification = Label(window,
                                       textvariable=self.strVar,
                                       font=("Helvetica", 16))
        self.lblClassification.pack(anchor=CENTER, expand=True)

        # Set up frame counters and limiters. No more than (fps) frames per second.
        # Also set up label to display current frame rate.
        self.fps = settings.get_config("max_fps")
        self.fps_counter = window_utils.SimpleFPSCounter()
        self.fps_limiter = window_utils.SimpleFPSLimiter(fps=self.fps)
        self.fps_value = StringVar()
        self.fps_label = Label(window,
                               textvariable=self.fps_value,
                               font=("Helvetica", 16))
        self.fps_label.pack(anchor=CENTER, expand=True)

        # Initialize Tensorflow Models
        tf.reset_default_graph()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.session = tf.Session(config=config)
        self.model1 = models.model1(image_size, self.maxListSize)
        self.model2 = models.model2(image_size)
        saver = tf.train.Saver()
        saver.restore(
            self.session,
            os.path.join(os.getcwd(), "savedmodels\\both\\models.ckpt"))

        # _main_loop() will "recursively" call itself at most (fps) times per second.
        self._main_loop()
        self.window.mainloop()
def menu():
    print("************IBM Stock Price Predictor**************")
    print()

    choice = input("""
                1: Model 1 - Random Forest Regressor
                2: Model 2 - Long Short-Term Memory Neural Network
                Please enter your choice: """)

    if choice == "1":
        model1(X, Y)
    elif choice == "2":
        model2(X, Y)
    else:
        print("You must only select either 1 or 2 \nPlease try again")
        sys.exit(1)
Esempio n. 4
0
def initialize_model(model_num):
    if model_num == 1:
        return models.model1(input_size, num_classes)
    elif model_num == 2:
        return models.model2(input_size, num_classes)
    elif model_num == 3:
        return models.model3(input_size, num_classes)
    elif model_num == 4:
        return models.model4(input_size, num_classes)
    elif model_num == 5:
        return models.model5(input_size, num_classes)
    elif model_num == 6:
        return models.model6(input_size, num_classes)
    elif model_num == 7:
        return models.model7(input_size, num_classes)
Esempio n. 5
0
def initialize_model(model_num, vocab_size, embed_size):
    if model_num == 1:
        return models.model1(vocab_size, embed_size)
    elif model_num == 2:
        return models.model2(vocab_size, embed_size)
    elif model_num == 3:
        return models.model3(vocab_size, embed_size)
    elif model_num == 4:
        return models.model4(vocab_size, embed_size)
    elif model_num == 5:
        return models.model5(vocab_size, embed_size)
    elif model_num == 6:
        return models.model6(vocab_size, embed_size)
    elif model_num == 7:
        return models.model7(vocab_size, embed_size)
Esempio n. 6
0
def initialize_model(model_num):
    if model_num == 1:
        return models.model1()
    elif model_num == 2:
        return models.model2()
    elif model_num == 3:
        return models.model3()
    elif model_num == 4:
        return models.model4()
    elif model_num == 5:
        return models.model5()
    elif model_num == 6:
        return models.model6()
    elif model_num == 7:
        return models.model7()
Esempio n. 7
0
    def __init__(self):
        self.model = models.model2()
        transform = transforms.Compose([
                        transforms.RandomCrop((28 , 28)) ,
                        transforms.ToTensor() , 
                        transforms.Normalize((0.5,) , (0.5,)), 
                        ])

        dataset = datasets.MNIST('MNIST_data/'  , train = True , transform = transform)
        len_d =  len(dataset)
        num_data = list(range(len_d))
        np.random.shuffle(num_data)
        split = int(len_d*0.6)
        split_end = int(len_d*0.8)
        subset_train = subsample(num_data[:split])
        subset_valid = subsample(num_data[split:split_end])
        self.trainloader = torch.utils.data.DataLoader(dataset, batch_size = 64, sampler = subset_train)
        self.validloader = torch.utils.data.DataLoader(dataset , batch_size = 64 , sampler = subset_valid) 
        self.len_v = len(self.validloader)
        self.len_t = len(self.trainloader)
        with open("test_idx", "wb") as f:
            pickle.dump(num_data[split_end:], f)
Esempio n. 8
0
z = np.zeros((n,m1))
d = np.array(([0], [-0.75], [-0.5])) # alternative specific variable coefficients

trainingSets = 100
errors = np.empty([trainingSets, 2]) 
for i in range(trainingSets):
    # sample each price independently and uniformly at random from the interval [0, 10]
    pblue = random.uniform(1, 10)
    pred = random.uniform(1, 10)
    w = np.array(([0], [pblue], [pred]))
    # ground truth model
    results, prob, utilities = simulate(n, z, w, b, d, a, sample)
    # fit model 1
    model1Estimate, beta = model1(results, n, pblue, pred) #xxx
    # fit model 2
    model2Estimate = model2(results, n, pblue, pred) #xxx
    #print(model1Estimate)
    #print(model2Estimate)
    # compute sales
    trueShare = math.exp(utilities[0,1])/(1 + math.exp(utilities[0,1]))
    model1Share = math.exp(float(model1Estimate[1]))/(1 + math.exp(float(model1Estimate[1])))
    model2Share = math.exp(float(model2Estimate[1]))/(1 + math.exp(float(model2Estimate[1])))
    # compute errors
    errors.itemset((i, 0), math.fabs(trueShare-model1Share)/trueShare)
    errors.itemset((i, 1), math.fabs(trueShare-model2Share)/trueShare)

# plot histogram
plotHistogram(errors[:,0], errors[:,1], label1=r'Model 1: $V_j = \beta p_j$', label2=r'Model 2: $V_j = \delta_j p_j$', title='Histogram of relative errors', xlabel='Value', ylabel='Frequency')
plotHistogramII([errors[:,0], errors[:,1]], label=[r'Model 1: $V_j = \beta p_j$', r'Model 2: $V_j = \delta_j p_j$'], title='Histogram of relative errors', xlabel='Value', ylabel='Frequency', bins=20)

Esempio n. 9
0
nb_words = min(max_nb_words, len(word_index))
embedding_matrix = pickle.load(open('glovemat840B.300d.pickle', 'rb'))
print('Null word embeddings: %d' %
      np.sum(np.sum(embedding_matrix, axis=1) == 0))

q1_data = pad_sequences(q1_seqs, maxlen=max_seq_length)
q2_data = pad_sequences(q2_seqs, maxlen=max_seq_length)
tq1_data = pad_sequences(tq1_seqs, maxlen=max_seq_length)
tq2_data = pad_sequences(tq2_seqs, maxlen=max_seq_length)
labels = np.asarray(df['is_duplicate'], dtype=int)
print('Q1 shape: ', q1_data.shape)
print('Q2 shape: ', q2_data.shape)
print('Labels shape: ', labels.shape)

#m1=model1(embedding_matrix, max_seq_length, nb_words)
m2 = model2(embedding_matrix, max_seq_length, nb_words)

loss = 'binary_crossentropy'
optimizer = 'adam'
metrics = ['accuracy']
#m1.compile(loss=loss, optimizer=optimizer, metrics=metrics)
m2.compile(loss=loss, optimizer=optimizer, metrics=metrics)
#m1.load_weights("4-0.373832924005.h5")
m2.load_weights(
    "W8-{'val_loss': [0.38170239355064683], 'val_acc': [0.8256449578405709], 'loss': [0.30116606968005971], 'acc': [0.86464061825938154]}.h5"
)
preds = m2.predict([tq1_data, tq2_data], verbose=True)
preds += m2.predict([tq2_data, tq1_data], verbose=True)
preds /= 2

#dfp=pd.read_csv("s5.csv")
Esempio n. 10
0
   # KTF.set_session(get_session())
   # usage: python train.py input_params.json
    _, params_file = argv
    with open(params_file, 'r') as fp:
        params = json.load(fp)
    print params
    nb_epoch = params['nb_epoch']
    initial_epoch = params['initial_epoch']
  
    base_lr = params['base_lr']
    print 'starting training for {} epoch with initial epoch number: {}'.format(nb_epoch, initial_epoch)
    batch_size = params['batch_size']
    assert (K.image_dim_ordering() == 'tf')
    weights_path = params['weights_path']
 
    model = model2(weights_path=weights_path)
    adagrad = Adagrad(lr=base_lr)
    loss_weights = params['loss_weights']
    loss = {}
    metrics = {}
    attrs = ['BalacingElements', 'ColorHarmony', 'Content', 'DoF',
             'Light', 'MotionBlur', 'Object', 'RuleOfThirds', 'VividColor', 'Repetition', 'Symmetry']
   
    for attr in attrs:
   	loss[attr] = 'mean_squared_error'
        metrics[attr] = 'mean_squared_error'

    loss['score'] = 'mean_squared_error'
    metrics['score'] = 'mean_squared_error'
    
    model.compile(loss=loss, optimizer=adagrad, metrics=metrics,
Esempio n. 11
0
import random
import sys
import pickle
import glob
import copy

import numpy as np
from keras.callbacks import LambdaCallback
from keras.optimizers import SGD, RMSprop, Adam

from models import model, model2

encoder, autoencoder = model2()
buff = None

pkl_path = 'dataset/c_to_i.pkl'
data_path = 'dataset/corpus.distinct.txt'


def callbacks(epoch, logs):
    global buff
    buff = copy.copy(logs)
    print("epoch", epoch)
    print("logs", logs)


def train():
    with open(pkl_path, 'rb') as f:
        c_i = pickle.load(f)

    x, y = [], []
Esempio n. 12
0
better_schools_index = list(
    map(distances.index,
        heapq.nsmallest(10, distances)))  # 选取可选大学中,与预测大学距离最近的10个大学索引
better_schools = []  # 可选大学中与预测大学最近的前十个大学命名为better_schools
for better_school_index in better_schools_index:
    better_schools.append(schools[better_school_index])

print('better_schools:', better_schools)

###################################################################################
'''
流程3:对于这10个大学,衡量它们的经纬度和排名因素,计算大学得分
'''

mymodel2 = model2().double()
scores = []  # 存储better大学们的得分
for school in better_schools:
    school_student_distance = np.sqrt((school[1] - X_np[0])**2 +
                                      (school[2] - X_np[1])**2)
    school_info = [school_student_distance, school[3]]
    school_info_model = torch.from_numpy(np.array(school_info))
    scores.append(mymodel2(school_info_model))
print('scores:', scores)

###################################################################################
'''
流程4:选取前五个得分最低的大学,并实现冲一冲、稳一稳、保一保功能
'''

# 现在scores里都是每个better_school的得分了
Esempio n. 13
0
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')


for bs in batch_size:
    # Dataset Loader (Input Pipeline)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=bs,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=bs,
                                              shuffle=False)

    for lr in learning_rate:
#        model = utils.initialize_model(model_num)
        model = models.model2()

        if torch.cuda.is_available():
            print('GPU detected - Enabling Cuda!')
            model = model.cuda()
        else:
            print('No GPU detected!')

        # Calculate number of model parameters
        print('Model: {}, number of parameters = {}'.format(model.name, sum(p.numel() for p in model.parameters())))

        run_name = "{}, lr={}, bs={}".format(model.name, lr, bs)
        file_path = os.path.join(save_dir,run_name + '.pkl')

        criterion = model.loss
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)
Esempio n. 14
0
    elif mode == 'val':
        _file = '../data/testing/val.pkl'
        gt_file = 'imgListValidationRegression_.csv'
    else:
        _file = '../data/testing/test.pkl'
        gt_file = 'imgListTestNewRegression_.csv'

    assert(exists(_file))
    data = joblib.load(_file)
    groundTruth = pd.read_csv(gt_file, header=0, delimiter=',')
    n = groundTruth.shape[0]
    predAtt = pd.DataFrame(index=groundTruth.index, columns=groundTruth.columns)
    x = data[0]
    y_true = data[1]

    model = model2(weights_path=weights_file)
    y_predict = model.predict(x, batch_size=batch_size, verbose=1)

    attrs = ['BalacingElements', 'ColorHarmony', 'Content', 'DoF',
             'Light', 'MotionBlur', 'Object', 'RuleOfThirds', 'VividColor', 'Repetition', 'Symmetry', 'score']
    for i,attr in enumerate(attrs):
	attr_true = y_true[attr]
	attr_predict = y_predict[i]
	rho, p_value = spr(attr_true, attr_predict)
        error = mse(attr_true, attr_predict)
        print "for {} the spr correlation: {} with p value {} and error value: {}".format(attr, rho, p_value, error)

        attr_predict = pd.Series(y_predict[i].reshape(n))
        predAtt[attr] = attr_predict.values
  
    predAtt['ImageFile'] = groundTruth['ImageFile']