예제 #1
0
def run_method_2():
    # Replicates the method outlined in the Google Doc
    source = data_source.DataSource()
    T = 100
    K = 10

    zero_vec = np.zeros(10)
    ws = [zero_vec, zero_vec]
    gs = [zero_vec]
    for t in range(T):
        w_cur = ws[-1]
        w_prev = ws[-2]
        g_prev = gs[-1]

        # Generate K random machines to perform the modified DSVRG updates
        inner_results = [
            machine.Machine(source, data_count=100).execute_modified_DSVRG(
                w_cur, w_prev, g_prev) for _ in range(K)
        ]
        w_next = np.mean([w for (w, _) in inner_results])
        g_cur = np.mean([g for (_, g) in inner_results])

        ws.append(w_next)
        gs.append(g_cur)
    pass
예제 #2
0
	def loadData(self):
		r = data.DataSource()
		def update(r):
			print r
		r.loadData(update, const.COLUMN_ID, const.COLUMN_DATE, const.COLUMN_VIDEOSID, videoSid = 'tvn8opw0c3pq')
예제 #3
0
파일: play.py 프로젝트: Ngoguey42/NetPagle
                               lw=2) for cen in centroids
    ])
    tags = ['x', 'y'] + [title for (_, title) in hm.images]
    show_many_images(
        imgs,
        tags,
        patchess,
    )


c = tf.ConfigProto()
c.gpu_options.allow_growth = True
sess = tf.Session(config=c)
keras.backend.tensorflow_backend.set_session(sess)

ds = data_source.DataSource(PREFIX)
m = model.Model(os.path.join(PREFIX, sys.argv[1]), ds)

# print(m.eval_accuracies())
for i, name in enumerate(TEST_NAMES):
    # exit()
    # exit()
    # for name, x, y, i in zip(ds.names, ds.xtrain, ds.ytrain, range(100000)):
    # n = ds.names
    # np.random.shuffle(n)
    # for i, name in enumerate(n):
    # for i, name in enumerate([
    # 	'17-10-24-23-10-39_blue-thunderbluff-courtyard-scroll0_marilyn',
    # 	'17-10-24-23-28-52_blue-darnassus-auctionhouse-scroll0_gina',
    # 	'17-10-24-23-36-13_blue-darnassus-temple-scroll0_kelly-occlusion',
    # 	'17-10-28-21-24-48_red-stonetalon-sunrock-scroll10_anna',
예제 #4
0
        self.model.fit(train_data, label_data)

    def predict(self, test_data, test_label):
        expected = test_label
        predicted = self.model.predict(test_data)
        predicted_prob = self.model.predict_proba(test_data)
        print "predicted_prob"
        print predicted_prob
        print "==========================="
        print(metrics.classification_report(expected, predicted))
        #print(metrics.confusion_matrix(expected, predicted))
        print(metrics.roc_auc_score(expected, predicted))
        print self.model.coef_[0]


if __name__ == "__main__":
    file_name = "ccf_offline_stage1_train.csv"
    data = data_source.DataSource()
    (features, labels) = data.load_normalize_data(file_name)
    print features
    print labels
    #sys.exit()
    (X_train, X_test, y_train,
     y_test) = cross_validation.train_test_split(features,
                                                 labels,
                                                 test_size=0.3)

    model = Model()
    model.train(X_train, y_train)
    model.predict(X_test, y_test)