예제 #1
0
    def __init__(self, window: 'window', video_source=0, replay=False):
        self.window = window
        self.window.title(settings.get_config("window_title"))
        self.replay = replay

        if self.replay:
            # Load previous data instead of from camera.
            self.data_gen = loadDataset.dataset_generator(
                './dataset/imgs/scissor_frames',
                './dataset/csvs/scissor.csv',
                repeat=True)
            self.inputDimension = [480, 640, 3]
        else:
            # open video source (by default this will try to open the computer webcam)
            self.video_source = video_source
            self.vid = MyVideoCapture(self.video_source)
            self.inputDimension = [self.vid.height, self.vid.width, 3]

        image_size = settings.get_config("image_input_size")
        self.maxListSize = settings.get_config("image_history_length")
        self.imgList = [np.zeros(image_size)] * self.maxListSize

        # Create a canvas that can fit the above video source size
        self.canvas = Canvas(window,
                             height=self.inputDimension[0],
                             width=self.inputDimension[1])
        self.canvas.pack()
        self.photo = None
        self.strVar = StringVar(value="None")
        self.lblClassification = Label(window,
                                       textvariable=self.strVar,
                                       font=("Helvetica", 16))
        self.lblClassification.pack(anchor=CENTER, expand=True)

        # Set up frame counters and limiters. No more than (fps) frames per second.
        # Also set up label to display current frame rate.
        self.fps = settings.get_config("max_fps")
        self.fps_counter = window_utils.SimpleFPSCounter()
        self.fps_limiter = window_utils.SimpleFPSLimiter(fps=self.fps)
        self.fps_value = StringVar()
        self.fps_label = Label(window,
                               textvariable=self.fps_value,
                               font=("Helvetica", 16))
        self.fps_label.pack(anchor=CENTER, expand=True)

        # Initialize Tensorflow Models
        tf.reset_default_graph()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.session = tf.Session(config=config)
        self.model1 = models.model1(image_size, self.maxListSize)
        self.model2 = models.model2(image_size)
        saver = tf.train.Saver()
        saver.restore(
            self.session,
            os.path.join(os.getcwd(), "savedmodels\\both\\models.ckpt"))

        # _main_loop() will "recursively" call itself at most (fps) times per second.
        self._main_loop()
        self.window.mainloop()
예제 #2
0
def menu():
    print("************IBM Stock Price Predictor**************")
    print()

    choice = input("""
                1: Model 1 - Random Forest Regressor
                2: Model 2 - Long Short-Term Memory Neural Network
                Please enter your choice: """)

    if choice == "1":
        model1(X, Y)
    elif choice == "2":
        model2(X, Y)
    else:
        print("You must only select either 1 or 2 \nPlease try again")
        sys.exit(1)
예제 #3
0
def initialize_model(model_num):
    if model_num == 1:
        return models.model1(input_size, num_classes)
    elif model_num == 2:
        return models.model2(input_size, num_classes)
    elif model_num == 3:
        return models.model3(input_size, num_classes)
    elif model_num == 4:
        return models.model4(input_size, num_classes)
    elif model_num == 5:
        return models.model5(input_size, num_classes)
    elif model_num == 6:
        return models.model6(input_size, num_classes)
    elif model_num == 7:
        return models.model7(input_size, num_classes)
예제 #4
0
def initialize_model(model_num, vocab_size, embed_size):
    if model_num == 1:
        return models.model1(vocab_size, embed_size)
    elif model_num == 2:
        return models.model2(vocab_size, embed_size)
    elif model_num == 3:
        return models.model3(vocab_size, embed_size)
    elif model_num == 4:
        return models.model4(vocab_size, embed_size)
    elif model_num == 5:
        return models.model5(vocab_size, embed_size)
    elif model_num == 6:
        return models.model6(vocab_size, embed_size)
    elif model_num == 7:
        return models.model7(vocab_size, embed_size)
예제 #5
0
def initialize_model(model_num):
    if model_num == 1:
        return models.model1()
    elif model_num == 2:
        return models.model2()
    elif model_num == 3:
        return models.model3()
    elif model_num == 4:
        return models.model4()
    elif model_num == 5:
        return models.model5()
    elif model_num == 6:
        return models.model6()
    elif model_num == 7:
        return models.model7()
예제 #6
0
a = np.reshape(np.array(([0, 3.584, 1.99])), (1, n)) # intercepts
b = np.zeros((1,m1))
z = np.zeros((n,m1))
d = np.array(([0], [-0.75], [-0.5])) # alternative specific variable coefficients

trainingSets = 100
errors = np.empty([trainingSets, 2]) 
for i in range(trainingSets):
    # sample each price independently and uniformly at random from the interval [0, 10]
    pblue = random.uniform(1, 10)
    pred = random.uniform(1, 10)
    w = np.array(([0], [pblue], [pred]))
    # ground truth model
    results, prob, utilities = simulate(n, z, w, b, d, a, sample)
    # fit model 1
    model1Estimate, beta = model1(results, n, pblue, pred) #xxx
    # fit model 2
    model2Estimate = model2(results, n, pblue, pred) #xxx
    #print(model1Estimate)
    #print(model2Estimate)
    # compute sales
    trueShare = math.exp(utilities[0,1])/(1 + math.exp(utilities[0,1]))
    model1Share = math.exp(float(model1Estimate[1]))/(1 + math.exp(float(model1Estimate[1])))
    model2Share = math.exp(float(model2Estimate[1]))/(1 + math.exp(float(model2Estimate[1])))
    # compute errors
    errors.itemset((i, 0), math.fabs(trueShare-model1Share)/trueShare)
    errors.itemset((i, 1), math.fabs(trueShare-model2Share)/trueShare)

# plot histogram
plotHistogram(errors[:,0], errors[:,1], label1=r'Model 1: $V_j = \beta p_j$', label2=r'Model 2: $V_j = \delta_j p_j$', title='Histogram of relative errors', xlabel='Value', ylabel='Frequency')
plotHistogramII([errors[:,0], errors[:,1]], label=[r'Model 1: $V_j = \beta p_j$', r'Model 2: $V_j = \delta_j p_j$'], title='Histogram of relative errors', xlabel='Value', ylabel='Frequency', bins=20)
예제 #7
0
print(Xvalid[-1])
print(len(Xtest))
print(Xtest[-1])

print(ytrain)
print(len(ytrain))
print(yvalid)
print(len(yvalid))
#print(ytest)
print(len(ytest))

# define vocabulary size (largest integer value)
vocab_size = len(tokenizer.word_index) + 1

# define model
model = models.model1(vocab_size, max_length)
epochs = 20

log_dir = "logs/"
model_name = "model_1"
fit_dir = log_dir + "fit/" + str(
    model_name) + "_" + datetime.datetime.now().strftime("%Y%m%d-%H%M")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=fit_dir,
                                                      histogram_freq=1)

# compile network
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
# fit network
#model.fit(Xtrain, ytrain, epochs=100, validation_data=(Xvalid, yvalid), verbose=2)
예제 #8
0
schools = rows[1:]
for college in schools:
    college[1] = float(college[1])
    college[2] = float(college[2])
    college[3] = float(college[3])

# X 为当前学生数据 (经度,纬度,排名),为示范数据
X_np = np.array([0.4, 0.2, 0.1])  # np.ndarray格式
X = torch.from_numpy(X_np)  # torch.Tensor格式

###################################################################################
'''
流程1:输出预测的大学值
'''

mymodel1 = model1().double()
predict_school = mymodel1(X)
print('predict_school:', predict_school)

###################################################################################
'''
流程2:对于所有的大学,选与流程1输出大学距离最近的前10所大学
'''

# 下面利用流程1的预测大学值,从这些可选大学中再做筛选

distances = []  # 用于存储每所可选大学与预测大学的距离
for school in schools:
    school = torch.from_numpy(np.array(school[1:4]))
    distances.append(torch.dist(school, predict_school))
예제 #9
0
def LPV_results(subjectID, pla, fileName, sizeReduction):
    """
    Trains LPV model on K0,K1,K3 and predicts K2 for a given model
    INPUT:
      subjectID -- int
      sizeReduction -- int
    """
    pd.options.mode.chained_assignment = None
    dw = dataloader.load_P00X(subjectID)

    bi = 50

    rrv_all = []
    rrv_split = []

    #creating the trial id
    k_base = copy.deepcopy(
        dw.kinematics.query('TrialID == 0')).reset_index(drop=True)
    k_tr = copy.deepcopy(
        dw.kinematics.query('TrialID == [0,1,2]')).reset_index(drop=True)
    k_tr = pd.concat([
        k_tr[k_tr['TrialID'] == 0][:-sizeReduction],
        k_tr[k_tr['TrialID'] == 1][:-sizeReduction],
        k_tr[k_tr['TrialID'] == 2][:-sizeReduction]
    ],
                     ignore_index=True)
    k_val = copy.deepcopy(
        dw.kinematics.query('TrialID == [3]')).reset_index(drop=True)

    #phaser phase estimate
    k_base = find_phase(k_base)
    k_tr = find_phase(k_tr)
    k_val = find_phase(k_val)
    k_base = k_base.drop(columns=['pelvis_tx', 'pelvis_ty', 'pelvis_tz'])
    k_tr = k_tr.drop(columns=['pelvis_tx', 'pelvis_ty', 'pelvis_tz'])
    k_val = k_val.drop(columns=['pelvis_tx', 'pelvis_ty', 'pelvis_tz'])
    #model input
    Z_base, phi_base, t_base, l_base = models.amp_input_final(k_base)
    Z_tr, phi_tr, t_tr, l_tr = models.amp_input_final(k_tr)
    Z_val, phi_val, t_val, l_val = models.amp_input_final(k_val)
    #Removing the AFO columns in the training set
    I_afo = [
        l_tr.index('AFO_L'),
        l_tr.index('AFO_R'),
        l_tr.index('d_AFO_L'),
        l_tr.index('d_AFO_R')
    ]
    I = np.array(range(np.shape(Z_tr)[1]))
    Z_base = Z_base[:, np.delete(I, I_afo)]
    ###
    # Z_tr = Z_tr[:,np.delete(I,I_afo)]
    # Z_val = Z_val[:,np.delete(I,I_afo)]
    # l_tr = [l_tr[i] for i in np.delete(I,I_afo)]
    # l_val = [l_val[i] for i in np.delete(I,I_afo)]
    ###
    #sample the torques
    Z_tr, phi_tr, l_tr = models.sample_torque(Z_tr, phi_tr, l_tr, pla)
    Z_val, phi_val, l_val = models.sample_torque(Z_val, phi_val, l_val, pla)
    #fs model
    f_base = models.fs_model(Z_base, Z_val, phi_base, phi_val, function=True)

    #Remove the phase mean
    Z_tr[:, np.delete(I, I_afo)] -= np.array([f_base(phi) for phi in phi_tr])
    Z_val[:, np.delete(I, I_afo)] -= np.array([f_base(phi) for phi in phi_val])
    # Z_tr -= np.array([f_base(phi) for phi in phi_tr])
    # Z_val -= np.array([f_base(phi) for phi in phi_val])
    #studentize data
    mean = np.mean(Z_tr, axis=0)
    std = np.std(Z_tr, axis=0)
    Z_tr = (Z_tr - mean) / std
    Z_val = (Z_val - mean) / std

    Y_t, X_t, I_t = util.set_mapper(phi_tr, Z_tr, pla)
    Y_v, X_v, I_v = util.set_mapper(phi_val, Z_val, pla)

    print('Finding Coefficients')
    C = models.model1(Y_t, X_t, phi_tr[I_t])
    print('Creating Fourier Series')
    fs = models.model1_function(C)
    m, n = np.shape(X_v)
    Yh = np.zeros((m, n))
    print('Predicting')
    A = fs(phi_val[I_v])
    for i in range(m):
        Yh[i, :] = np.dot(util.affenize(X_v[i, :][np.newaxis, :]), A[:, :, i])

    #Save RRV statistic
    file_name = 'P' + '%03d' % (subjectID) + '_LPV_' + fileName + '_' + str(
        28800 - sizeReduction) + '.pckl'
    print(
        'Average error predicting from initial condition with the average model: '
        + str(np.mean(np.abs(Yh - Y_v))))
    RRV = util.bootstrap_rrv(Yh, Y_v, bi)
    Nstates = int(len(l_val) / 2)
    results = pd.DataFrame({"RRV": np.mean(RRV[:, :Nstates], axis=0)},
                           index=l_val[:Nstates])
    print(results)
    results.to_pickle(os.path.join('results', file_name))