示例#1
0
 def plot_labeled_indexes(self,X_transformed,Y,W,labeledIndexes,title="labeled indexes"):  
     #Plot 1: labeled indexes
     if self.args["can_plot"]:       
         
         vertex_opt = plt.vertexplotOpt(Y=Y,mode="discrete",size=7,labeledIndexes=labeledIndexes)
         plt.plotGraph(X_transformed,plot_dim=2,W=W, vertex_opt= vertex_opt,edge_width=1,\
                       interactive = False,title=title)
示例#2
0
 def experiment_LDST(self,X_transformed,Y,W,labeledIndexes, mu = 99.0, tuning_iter=2, plot=True,Y_noisy=None):
     
     if Y_noisy is None:
         Y_noisy = Y
             
     classif_LDST, Y_tuned, l_tuned = self.LDST(W=W, Y=Y_noisy, mu=mu, labeledIndexes=labeledIndexes,tuning_iter=tuning_iter)
     classif_LDST = np.argmax(classif_LDST,axis=1)
     
     
     
     
     
     if self.args["can_plot"] and plot:
         ''' PLOT Tuned Y '''   
         vertex_opt = plt.vertexplotOpt(Y=Y_tuned,mode="discrete",size=7,labeledIndexes=l_tuned)
         plt.plotGraph(X_transformed,plot_dim=2,W=W, vertex_opt= vertex_opt,edge_width=1,\
                       interactive = False,title="LDST: Tuned Y")
         ''' PLOT LDST result '''             
         vertex_opt = plt.vertexplotOpt(Y=classif_LDST,mode="discrete",size=7)
         plt.plotGraph(X_transformed,plot_dim=2,W=W, vertex_opt= vertex_opt,edge_width=1,\
                       interactive = False,title="LDST result")
     
     acc = gutils.accuracy(classif_LDST, Y)
     
     return acc
示例#3
0
 def experiment_LP(self,X_transformed,Y,W,labeledIndexes, plot=True,Y_noisy=None ):  
     if Y_noisy is None:
         Y_noisy = Y      
     classif_LP = np.argmax(self.LP(W=W, Y=Y_noisy, labeledIndexes=labeledIndexes),axis=1)
     
     if self.args["can_plot"] and plot:                
         vertex_opt = plt.vertexplotOpt(Y=classif_LP,mode="discrete",size=7)
         plt.plotGraph(X_transformed,plot_dim=2,W=W, vertex_opt= vertex_opt,edge_width=1,\
                       interactive = False,title="LP result")
     
     acc = gutils.accuracy(classif_LP, Y)
     return acc
示例#4
0
    def experiment_RF(self,X,X_transformed,Y,W,labeledIndexes, plot=True,Y_noisy=None):
        if Y_noisy is None:
            Y_noisy = Y

        #RF Classifier
        rf = RandomForestClassifier(n_estimators=100).fit(X[labeledIndexes,:],Y_noisy[labeledIndexes])
        rf_pred = np.array(rf.predict(X))
        if self.args["can_plot"] and plot:                
            vertex_opt = plt.vertexplotOpt(Y=rf_pred,mode="discrete",size=7)
            plt.plotGraph(X_transformed,plot_dim=2,W=W, vertex_opt= vertex_opt,edge_width=1,\
                          interactive = False,title="RF result")
        acc = gutils.accuracy(rf_pred, Y)
        return acc    
示例#5
0
def question_1():
    # Specify hyper-parameters

    agent = Agent()
    environment = Environment()
    rlglue = RLGlue(environment, agent)

    num_episodes = 200
    num_runs = 50
    max_eps_steps = 100000

    steps = np.zeros([num_runs, num_episodes])

    for r in range(num_runs):
        print("run number : ", r)
        rlglue.rl_init()
        for e in range(num_episodes):
            #print("Episode number: "+str(e))
            rlglue.rl_episode(max_eps_steps)
            steps[r, e] = rlglue.num_ep_steps()
            #print("Number of steps: "+str(steps))
            # print(steps[r, e])
    np.save('steps', steps)
    plotGraph()
    
    del agent, environment, rlglue
    agent = Agent()
    environment = Environment()
    rlglue = RLGlue(environment, agent)

    num_episodes = 1000
    num_runs = 1
    max_eps_steps = 100000

    steps = np.zeros([num_runs, num_episodes])

    for r in range(num_runs):
        print("run number : ", r)
        rlglue.rl_init()
        for e in range(num_episodes):
            print("Episode number: "+str(e))
            rlglue.rl_episode(max_eps_steps)
            steps[r, e] = rlglue.num_ep_steps()
            #print("Number of steps: "+str(steps))
            # print(steps[r, e])
    #np.save('steps', steps)
    #plotGraph()
    rlglue.rl_agent_message("plot3DGraph")
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument("-a", dest="a",help="Part 'a' of elliptical curve: y^2 = x^3 + ax + b")
    parser.add_argument("-b", dest="b",help="Part 'b' of elliptical curve: y^2 = x^3 + ax + b")
    parser.add_argument("-x1",dest="x1",help="")
    parser.add_argument("-y1",dest="y1",help="")
    parser.add_argument("-x2",dest="x2",help="")
    parser.add_argument("-y2",dest="y2",help="")

    args = parser.parse_args()

    # converting input of type strings to fractions
    a=f.Fraction(args.a)
    b=f.Fraction(args.b)
    x1=f.Fraction(args.x1)
    y1=f.Fraction(args.y1)
    x2=f.Fraction(args.x2)
    y2=f.Fraction(args.y2)
    
	# checking whether points lie on the elliptical curve or not
    checkFlag = p.checkPointsOnCurve(x1,y1,x2,y2,a,b)
	# If boolean value is true do the following
    if(checkFlag): 
	
	    # comparing (x1,y1) and (x2,y2) if they are not equal normal slope value will be calculated
		#or else differential slope will be calculated
        p.compareCoordinates(x1,y1,x2,y2,a)
		
		#finding x3 value by substituting values of m,x1,x2 in equation
        x3 = p.getNewXValue(x1,y1,x2,y2)
		
		#finding y3 value by substituting values of m,x3,x1,y1 in equation
        y3=  p.getNewYValue(x1,y1,x2,y2,x3)
		
		# printing x3,y3 values
        print("Value of x3 value is :",x3)
        print("Value of y3 value is :",y3)
		
		#plotting graph
        p.plotGraph(x1,y1,x2,y2,x3,y3,a,b)
示例#7
0
def main():
    data=[]
    pts=[]
    # q = myutils.genRandomPoint(lowerBnd, upperBnd)
    q = myutils.Point(500,500,0)
    
    # ****************
    # all points randomly generated
    # ****************
    # jobs = (N+jobSize-1)/jobSize
    # for x in xrange(0,jobs):
    #     ptSet = []
    #     for y in xrange(0,jobSize):
    #         p = myutils.genRandomPoint(lowerBnd, upperBnd)
    #         ptSet.append(p)
    #         pts.append(p)
    #         # myutils.dispPoint(rand)
    #     ptSet.append(q)
    #     data.append(ptSet)


    # ****************
    # 3 almost circular clusters with random points
    # ****************
    jobs = (N+jobSize-1)/jobSize
    for x in xrange(0,jobs):
        ptSet = []
        for y in xrange(0,5*jobSize/12):
            p = myutils.genRandomPtCircle(10000, myutils.Point(10,10,-1), 1)
            ptSet.append(p)
            pts.append(p)
        for y in xrange(0,5*jobSize/12):
            p = myutils.genRandomPtCircle(10000, myutils.Point(-15000,-15000,-1), 2)
            ptSet.append(p)
            pts.append(p)
        for y in xrange(0,jobSize/6):
            p = myutils.genRandomPtCircle(1000, myutils.Point(100,100,-1), 0)
            ptSet.append(p)
            pts.append(p)
        ptSet.append(q)
        data.append(ptSet)
    datasource = dict(enumerate(data))
    print "Done"

    s = mincemeat.Server()
    s.datasource = datasource
    s.mapfn = mapfn
    s.reducefn = reducefn
    results = s.run_server(password="******")

    freq = []
    for x in xrange(0,C):
        freq.append(0)

    for pt in results[0]:
        myutils.dispPoint(pt[0])
        freq[pt[0][2]] = freq[pt[0][2]]+1

    predClass = freq.index(max(freq))
    print predClass

    plot.plotGraph(pts,myutils.Point(q[0],q[1],predClass))
示例#8
0
#For example the node x will be in (0,0) position
position = {
    'x': [0, 0],
    'y': [2, 0],
    'z1': [1, 1],
    'z2': [0, 1],
    'z3': [0, 2],
    'z4': [2, 2],
    'z5': [2, 1],
    'z6': [1, 0]
}
#Draw up the list of node and define the graph
nodeList = [X, Y, Z1, Z2, Z3, Z4, Z5, Z6]
G = graph('G', nodeList, DAG=nodeout, pos=position)
#Plot the graph
plot.plotGraph(G)
#Plot all possible path between two node
plot.allPathPlot(G, X, Y)
#Define if between two variable there are backdoor path
BackDoor = bk.backdoor(G, X, [Z1, Z2], Y, Plot=True)
#Define if between two variable there are frontdoor path
FrontDoor = front_door.frontdoor(G, X, [Z6], Y, PlotAll=True)
# =============================================================================
#
# X=node('x')
# Y=node('y')
# U=node('u')
# S=node('s')
#
# X.set_nodein([U])
# Y.set_nodein([S, U])
示例#9
0
def go(_):

    ds_dict = {}
    nxt_dict = {}
    it_dict = {}
    bs_dict = {
        "labeled": FLAGS.batch_size,
        "unlabeled": FLAGS.ul_batch_size,
        "test": FLAGS.ul_batch_size
    }
    size_dict = {"labeled": FLAGS.num_labeled}
    writer_dict = {}

    example_shape = None

    g1 = tf.Graph()

    with g1.as_default():

        with tf.device("/gpu:0"):
            with tf.variable_scope("Vars", reuse=tf.AUTO_REUSE):
                print("Reading dataset...")
                if FLAGS.dataset == "cifar10":
                    cifar_dict = load_whole_dataset(os.path.join(
                        FLAGS.dataset_dir, "train.tfrecords"),
                                                    get_images=False,
                                                    get_zcas=True,
                                                    get_labels=True,
                                                    get_emb=False,
                                                    ds_size=50000)
                    train_X = cifar_dict.pop("zca")
                    train_Y = cifar_dict.pop("label")
                    size_dict["X_shape"] = list(train_X[0, :].shape)
                    size_dict["Y_shape"] = list(train_Y[0, :].shape)

                    cifar_dict = load_whole_dataset(os.path.join(
                        FLAGS.dataset_dir, "test.tfrecords"),
                                                    get_images=False,
                                                    get_zcas=True,
                                                    get_labels=True,
                                                    get_emb=False,
                                                    ds_size=10000)
                    test_X = cifar_dict.pop("zca")
                    test_Y = cifar_dict.pop("label")
                    ds_dict["test"] = tf.data.Dataset.from_tensor_slices({
                        "X":
                        test_X,
                        "Y":
                        test_Y
                    })
                    size_dict["test"] = test_X.shape[0]
                elif FLAGS.dataset == "svhn":
                    svhn_dict = load_whole_dataset(os.path.join(
                        FLAGS.dataset_dir, "train.tfrecords"),
                                                   get_images=True,
                                                   get_zcas=False,
                                                   get_labels=True,
                                                   get_emb=False,
                                                   ds_size=73257)
                    train_X = svhn_dict.pop("image")
                    train_Y = svhn_dict.pop("label")

                    svhn_dict = load_whole_dataset(os.path.join(
                        FLAGS.dataset_dir, "test.tfrecords"),
                                                   get_images=True,
                                                   get_zcas=False,
                                                   get_labels=True,
                                                   get_emb=False,
                                                   ds_size=26032)
                    test_X = svhn_dict.pop("image")
                    test_Y = svhn_dict.pop("label")
                    ds_dict["test"] = tf.data.Dataset.from_tensor_slices({
                        "X":
                        test_X,
                        "Y":
                        test_Y
                    })
                    size_dict["test"] = test_X.shape[0]

                if FLAGS.dataset == "cifar10" or FLAGS.dataset == "svhn":

                    #Form labeled and unlabeled datasets
                    perm = np.random.RandomState(
                        seed=FLAGS.label_seed).permutation(
                            np.arange(train_X.shape[0]))
                    train_X_l = train_X[perm[0:FLAGS.num_labeled], :]
                    train_Y_l = train_Y[perm[0:FLAGS.num_labeled], :]
                    train_X_ul = train_X[perm[FLAGS.num_labeled:], :]
                    train_Y_ul = train_Y[perm[FLAGS.num_labeled:], :]
                    del train_X

                    init_conf_vals = gutils.init_matrix(
                        np.argmax(train_Y, axis=1), perm[0:FLAGS.num_labeled])
                    CACHE_F = tf.get_variable(name="cache_f",
                                              initializer=tf.constant(
                                                  init_conf_vals,
                                                  dtype=tf.float32),
                                              trainable=True)

                    size_dict["X_shape"] = list(train_X_l[0, :].shape)
                    size_dict["Y_shape"] = list(init_conf_vals[0, :].shape)
                    print(train_Y.shape)

                    #Create variables for initial labels
                    PRIOR_Y_l = tf.Variable(
                        init_conf_vals[perm[0:FLAGS.num_labeled], :],
                        name="prior_yl")

                    #Update dicts
                    ds_dict["labeled"] = tf.data.Dataset.from_tensor_slices({
                        "X":
                        train_X_l,
                        "Y":
                        train_Y_l,
                        "ID":
                        np.reshape(perm[0:FLAGS.num_labeled], [-1, 1])
                    })
                    ds_dict["unlabeled"] = tf.data.Dataset.from_tensor_slices({
                        "X":
                        train_X_ul,
                        "Y":
                        train_Y_ul,
                        "ID":
                        np.reshape(perm[FLAGS.num_labeled:], [-1, 1])
                    })
                    size_dict["unlabeled"] = train_X_ul.shape[0]
                    print("Reading dataset...Done!")

                    #Load affmat
                    print("Loading Affmat...")
                    K, AFF = load_whole_affmat(
                        FLAGS.affmat_path,
                        ds_size=50000 if FLAGS.dataset == "cifar10" else 73257)
                    K = K[:, 0:FLAGS.affmat_k, :]
                    AFF = AFF[:, 0:FLAGS.affmat_k]
                    K = tf.constant(K, dtype=tf.int64)
                    AFF = tf.exp(-np.square(AFF) /
                                 (2 * FLAGS.affmat_sigma * FLAGS.affmat_sigma))
                    AFF = tf.get_variable(name="Affinity_matrix",
                                          initializer=tf.cast(
                                              AFF, dtype=tf.float32))
                    print("Loading Affmat...Done!")
                else:
                    #Extract info for toy_dict
                    toy_dict = toy.getTFDataset(FLAGS.dataset,
                                                FLAGS.num_labeled,
                                                FLAGS.label_seed)
                    df_x = toy_dict.pop(
                        "df_x"
                    )  #Used to create Affinity Mat and infer num_unlabeled.
                    df_y_l = toy_dict.pop("df_y_l")  #Used to create var
                    df_y = toy_dict.pop("df_y")  #Used to create var

                    init_conf_vals = toy_dict.pop(
                        "INIT")  #Used to infer Y shape
                    ds_dict = {
                        "labeled": toy_dict["labeled"],
                        "unlabeled": toy_dict["unlabeled"]
                    }

                    #Create variable for initial labels and F cache
                    PRIOR_Y_l = tf.Variable(df_y_l, name="prior_yl")
                    CACHE_F = tf.get_variable(name="cache_f",
                                              initializer=init_conf_vals,
                                              trainable=True)

                    #Update dicts
                    size_dict["X_shape"] = list(df_x[0, :].shape)
                    size_dict["Y_shape"] = list(init_conf_vals[0, :].shape)
                    size_dict["unlabeled"] = df_x.shape[0] - FLAGS.num_labeled
                    print("Reading dataset...Done!")

                    #Load affmat
                    print("Loading Affmat...")
                    W= AffMatGenerator(dist_func="gaussian",mask_func="knn",k=FLAGS.affmat_k,sigma=FLAGS.affmat_sigma).\
                                                generateAffMat(df_x)
                    #Convert K to [:,0:K,2] array, i.e. 2D array of [i,j] pairs
                    K = np.zeros((W.shape[0], FLAGS.affmat_k, 2),
                                 dtype=np.int64)
                    for i in np.arange(W.shape[0]):
                        K[i, :, 0] = i
                    K[:, :, 1] = np.argsort(-W, axis=1)[:, :FLAGS.affmat_k]
                    #Create corresponding [:,0:K] tensor containing the distances
                    AFF = np.zeros((W.shape[0], FLAGS.affmat_k))
                    for i in np.arange(W.shape[0]):
                        for j in np.arange(FLAGS.affmat_k):
                            AFF[i, j] = W[K[i, j, 0], K[i, j, 1]]
                            assert (AFF[i, j] > 0)

                    AFF = tf.get_variable(name="Affinity_matrix",
                                          initializer=tf.cast(
                                              AFF, dtype=tf.float32))

                    print("Loading Affmat...Done!")

                #Create D variable
                D = tf.get_variable(name="D",dtype=tf.float32,
                                    initializer=tf.math.reduce_sum(
                                        tf.get_variable("Affinity_matrix"),axis=1)\
                                    )

                size_dict[
                    "train"] = size_dict["labeled"] + size_dict["unlabeled"]

                #Make the datasets shuffle,repeat,batch
                ds_dict["train_eval"] = ds_dict["labeled"].concatenate(
                    ds_dict["unlabeled"]).batch(FLAGS.ul_batch_size)
                ds_dict["unlabeled_eval"] = ds_dict["unlabeled"].batch(
                    FLAGS.batch_size)
                ds_dict["labeled_eval"] = ds_dict["labeled"].batch(
                    FLAGS.ul_batch_size)
                ds_dict["unlabeled"] = ds_dict["unlabeled"].shuffle(
                    1000).repeat().batch(FLAGS.ul_batch_size)
                ds_dict["labeled"] = ds_dict["labeled"].shuffle(
                    1000).repeat().batch(FLAGS.batch_size)
                if "test" in ds_dict.keys():
                    ds_dict["test_eval"] = ds_dict["test"].batch(
                        FLAGS.batch_size)
                    ds_dict["test"] = ds_dict["test"].shuffle(
                        size_dict["test"]).repeat().batch(FLAGS.ul_batch_size)

                #Create variable for initial unlabeled
                TRAIN_Y_UL = tf.Variable(np.zeros(
                    (size_dict["unlabeled"], size_dict["Y_shape"][0])),
                                         name="y_ul")

                for key, value in ds_dict.items():
                    it_dict[key] = value.make_initializable_iterator()
                    nxt_dict[key] = it_dict[key].get_next()

        with tf.device("/gpu:0"):

            with tf.variable_scope("Vars", reuse=tf.AUTO_REUSE):
                #Define config
                tfconfig = tf.ConfigProto(allow_soft_placement=True)
                tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.7

                with tf.Session(config=tfconfig) as sess:

                    print("Setting placeholders...")
                    #PLACEHOLDERS
                    LGC_ALPHA = tf.placeholder_with_default(
                        tf.cast(0.00001, tf.float32),
                        shape=[],
                        name="learning_rate")

                    lr = tf.placeholder_with_default(tf.cast(
                        FLAGS.learning_rate, tf.float32),
                                                     shape=[],
                                                     name="learning_rate")
                    mom = tf.placeholder_with_default(tf.cast(
                        FLAGS.mom1, tf.float32),
                                                      shape=[],
                                                      name="momentum")
                    X_l = tf.placeholder(dtype=tf.float32,
                                         shape=[None] + size_dict["X_shape"],
                                         name="placeholder/X_l")
                    Y_l = tf.placeholder(dtype=tf.float32,
                                         shape=[None] + size_dict["Y_shape"],
                                         name="placeholder/Y_l")
                    X_u = tf.placeholder(dtype=tf.float32,
                                         shape=[None] + size_dict["X_shape"],
                                         name="placeholder/X_ul")
                    Y_u = tf.placeholder(dtype=tf.float32,
                                         shape=[None] + size_dict["Y_shape"],
                                         name="placeholder/Y_ul")
                    ID_l = tf.placeholder(dtype=tf.int64,
                                          shape=[None],
                                          name="placeholder/ID_l")
                    ID_u = tf.placeholder(dtype=tf.int64,
                                          shape=[None],
                                          name="placeholder/ID_u")

                    print("Setting placeholders...Done!")

                    print("Setting writers...")
                    #Create FileWriter
                    if not FLAGS.log_dir:
                        writer_dict["train"] = None
                        writer_dict["labeled"] = None
                        writer_dict["unlabeled"] = None
                        writer_dict["test"] = None
                    else:
                        writer_dict["train"] = tf.summary.FileWriter(
                            FLAGS.log_dir + "/train")
                        writer_dict["labeled"] = tf.summary.FileWriter(
                            FLAGS.log_dir + "/labeled")
                        writer_dict["unlabeled"] = tf.summary.FileWriter(
                            FLAGS.log_dir + "/unlabeled")
                        writer_dict["test"] = tf.summary.FileWriter(
                            FLAGS.log_dir + "/test")
                    print("Setting writers...Done!")

                    print("Setting training graph...")
                    #Build training_graph
                    loss, train_op, cache_op, _, extra = build_training_graph(
                        is_training=True,
                        X_l=X_l,
                        Y_l=Y_l,
                        X_u=X_u,
                        ID_l=ID_l,
                        ID_u=ID_u,
                        K=K,
                        lr=lr,
                        mom=mom,
                        lgc_alpha=LGC_ALPHA)

                    print("Setting training graph...Done!")

                    print("Setting test graph...")
                    # Build eval graph
                    eval_loss, _, _, eval_logit, _ = build_training_graph(
                        is_training=False,
                        X_l=X_l,
                        Y_l=Y_l,
                        X_u=X_u,
                        ID_l=ID_l,
                        ID_u=ID_u,
                        K=K,
                        lr=lr,
                        mom=mom,
                        lgc_alpha=LGC_ALPHA)

                    print("Setting test graph... Done!")
                    print((tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)))
                    print((tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS)))

                    #####################################################################################
                    mon_sess = sess

                    for var in tf.global_variables():
                        print("Initializing Variable {}: shape {}".format(
                            var.name, var.shape, flush=True))
                        mon_sess.run(var.initializer)
                        #print(mon_sess.run(var))
                        print("Initializing Variable {}: shape {}...Done!".
                              format(var.name, var.shape))
                    '''
                    saver = tf.train.Saver()
                    save_path = saver.save(sess, FLAGS.train_dir)
                    print("Model saved in path: %s" % save_path)
                    '''

                    for ep in range(FLAGS.num_epochs):
                        print("EPOCH:{}".format(ep))

                        #Adjust decay if necessary
                        if ep < FLAGS.epoch_decay_start:
                            feed_dict = {
                                lr: FLAGS.learning_rate,
                                mom: FLAGS.mom1,
                                LGC_ALPHA: FLAGS.lgc_alpha
                            }
                            print("MOMENTUM:{},lr:".format(
                                FLAGS.mom1, FLAGS.lgc_alpha))
                        else:
                            decayed_lr = ((FLAGS.num_epochs - ep) /
                                          float(FLAGS.num_epochs -
                                                FLAGS.epoch_decay_start)
                                          ) * FLAGS.learning_rate
                            feed_dict = {
                                lr: FLAGS.learning_rate,
                                mom: FLAGS.mom1,
                                LGC_ALPHA: FLAGS.lgc_alpha
                            }

                        #Initialize loss,time and iterator
                        start = time.time()
                        mon_sess.run([
                            it_dict["labeled"].initializer,
                            it_dict["unlabeled"].initializer
                        ])
                        losses_dict = {}
                        for k in loss.keys():
                            losses_dict[k] = 0.0
                        #Run training examples
                        for i in range(FLAGS.num_iter_per_epoch):
                            nxt_l, nxt_u = mon_sess.run(
                                [nxt_dict["labeled"], nxt_dict["unlabeled"]])
                            feed_dict[X_l] = nxt_l["X"]
                            feed_dict[Y_l] = nxt_l["Y"]

                            feed_dict[ID_l] = np.reshape(nxt_l["ID"], [-1])

                            feed_dict[X_u] = nxt_u["X"]
                            #feed_dict[Y_u] = nxt_u["Y"]
                            feed_dict[ID_u] = np.reshape(nxt_u["ID"], [-1])
                            if ep < FLAGS.epoch_decay_start:
                                _, batch_loss = mon_sess.run(
                                    [train_op, loss], feed_dict=feed_dict)

                            else:
                                _, _, batch_loss = mon_sess.run(
                                    [train_op, cache_op, loss],
                                    feed_dict=feed_dict)
                            for k, v in batch_loss.items():
                                losses_dict[k] += v

                        #Print elapsed time, get global step
                        end = time.time()
                        current_global_step = tf.train.get_global_step().eval(
                            mon_sess)

                        #Get mean of losses
                        for k, v in batch_loss.items():
                            losses_dict[k] /= FLAGS.num_iter_per_epoch

                        #Add Summary
                        summary = tf.Summary()
                        for k, v in batch_loss.items():
                            summary.value.add(tag=k, simple_value=v)
                        writer_dict["train"].add_summary(
                            summary, current_global_step)

                        print("Epoch:", ep, "; LGC loss",
                              losses_dict["lgc_loss"], "; LGC_sup_loss",
                              losses_dict["lgc_supervised_loss"],
                              "; LGC_unsup_loss",
                              losses_dict["lgc_unsupervised_loss"],
                              "; sup_acc", losses_dict["mean_acc"],
                              "; elapsed_time:", end - start)
                        ''' EVAL Procedure '''
                        if (
                                ep + 1
                        ) % FLAGS.eval_freq == 0 or ep + 1 == FLAGS.num_epochs:

                            def eval(KEY, is_writing=True):
                                sum_loss = 0
                                start = time.time()
                                mon_sess.run(it_dict[KEY +
                                                     "_eval"].initializer)
                                pred_Y = None
                                actual_Y = None
                                IDs = None
                                #Run eval examples
                                while True:
                                    try:
                                        nxt = mon_sess.run(nxt_dict[KEY +
                                                                    "_eval"])
                                        feed_dict[X_l] = nxt["X"]
                                        feed_dict[Y_l] = nxt["Y"]
                                        feed_dict[ID_l] = np.reshape(
                                            nxt["ID"], [-1])
                                        mean_acc, logit_l = mon_sess.run(
                                            [
                                                eval_loss["mean_acc"],
                                                eval_logit
                                            ],
                                            feed_dict=feed_dict)
                                        sum_loss += mean_acc * nxt["X"].shape[0]
                                        if pred_Y is None:
                                            pred_Y = logit_l
                                            actual_Y = nxt["Y"]
                                            IDs = nxt["ID"]
                                        else:
                                            pred_Y = np.concatenate(
                                                [pred_Y, logit_l])
                                            actual_Y = np.concatenate(
                                                [actual_Y, nxt["Y"]])
                                            IDs = np.concatenate(
                                                [IDs, nxt["ID"]])

                                    except tf.errors.OutOfRangeError:
                                        break
                                #Print elapsed time, get global step
                                end = time.time()
                                current_global_step = tf.train.get_global_step(
                                ).eval(mon_sess)

                                #Add Summary
                                summary = tf.Summary()
                                summary.value.add(tag="acc",
                                                  simple_value=sum_loss /
                                                  size_dict[KEY])
                                if is_writing:
                                    writer_dict[KEY].add_summary(
                                        summary, current_global_step)
                                print("Eval {}: {} accuracy ".format(
                                    KEY, sum_loss / size_dict[KEY]))

                                #Sort pred w.r.t ids
                                IDs = np.reshape(IDs, [-1]).tolist()
                                return (pred_Y, actual_Y, IDs)

                            eval("labeled")
                            eval("unlabeled")
                            #if "test" in ds_dict.keys():
                            #    eval("test")

                            if not FLAGS.dataset in ["svhn", "cifar10"]:
                                pred_Y, actual_Y, pred_ids = eval("train")

                                pred_Y[pred_ids, :] = pred_Y
                                actual_Y[pred_ids, :] = actual_Y

                                pred_Y = np.argmax(mon_sess.run(CACHE_F),
                                                   axis=1)
                                actual_Y = np.argmax(actual_Y, axis=1)

                                labeledIndexes = np.zeros([pred_Y.shape[0]],
                                                          dtype=np.bool)
                                labeledIndexes[
                                    pred_ids[0:FLAGS.num_labeled]] = True

                                if (ep + 1) == FLAGS.eval_freq:
                                    vertex_opt = sslplot.vertexplotOpt(pred_Y,size=5,\
                                                                       labeledIndexes=labeledIndexes)
                                    sslplot.plotGraph(
                                        df_x,
                                        W,
                                        vertex_opt,
                                        online=False,
                                        interactive=False,
                                        title="NN pred - labeled",
                                        plot_filename="0.png")
                                vertex_opt = sslplot.vertexplotOpt(pred_Y,size=5,\
                                                                   labeledIndexes=np.logical_not(labeledIndexes))
                                sslplot.plotGraph(
                                    df_x,
                                    W,
                                    vertex_opt,
                                    online=False,
                                    interactive=False,
                                    title="NN pred - unlabeled",
                                    plot_filename=str(current_global_step) +
                                    ".png")
示例#10
0
 def plot_true_classif(self,X_transformed,Y,W):
     #Plot 2: True classif
     if self.args["can_plot"]:          
         vertex_opt = plt.vertexplotOpt(Y=Y,mode="discrete",size=7)
         plt.plotGraph(X_transformed,plot_dim=2,W=W, vertex_opt= vertex_opt,edge_width=1,\
                       interactive = False,title="True classes")
示例#11
0
def result():
    error = None
    if request.method == "POST":
        customersAccount = session['fidor_customer']
        customerDetails = customersAccount['data'][0]
        customerInformation = customerDetails['customers'][0]

        tickerCode = request.form['stockSymbol']

        # Refer to plot.py for graph plotting function that utilises 2 APIs - 
        # plotly to plot the candlestick chart with information form Alphavantage)
        # exception are to ignore erros that occurs after every plotted graph
        try:
            plotGraph(tickerCode)
        except Exception:
            pass

        url = "https://financialmodelingprep.com/api/company/profile/"+ tickerCode +""

        payload = ""
        headers = {
            'cache-control': "no-cache",
            'Postman-Token': "e0003f95-e8c6-4cea-8142-99336827454d"
        }

        response = requests.request("GET", url, data=payload, headers=headers)

        # API Returns JSON with <pre> tags at the front and back
        # removed the tags before formatting it as JSON
        stockData = json.loads(response.text[5:-5]) 

        # To get all relevant stock data
        name = stockData[tickerCode]["companyName"]
        latestStockPrices = stockData[tickerCode]["Price"]
        beta = stockData[tickerCode]["Beta"]
        avgVolume = stockData[tickerCode]["VolAvg"]
        changePerc = stockData[tickerCode]["ChangesPerc"]
        exchange = stockData[tickerCode]["exchange"]
        industry = stockData[tickerCode]["industry"]
        sector = stockData[tickerCode]["sector"]
        website = stockData[tickerCode]["website"]
        ceo = stockData[tickerCode]["CEO"]
        desc = stockData[tickerCode]["description"]

        # Used to change colour of card in HTML based on positive/negative changes
        if "+" in changePerc:
            positive = True
        else:
            positive = False

        # Used to get News from NEWSAPI 
        news_url = "https://newsapi.org/v2/everything"

        today = datetime.now().strftime('%Y-%m-%d')
        yesterday = (datetime.now()- timedelta(days=1)).strftime('%Y-%m-%d')

        news_querystring = {"q":tickerCode,"from":yesterday,"to":today,"sortBy":"popularity","apiKey":"346d2c3a1c3a4c69abf68e33624b6311"}

        news_payload = ""
        news_headers = {
            'cache-control': "no-cache",
            'Postman-Token': "07bfdeb7-256c-4b75-9672-9b5a776b42d4"
        }

        news_response = requests.request("GET", news_url, data=news_payload, headers=news_headers, params=news_querystring)
        newsJSON = json.loads(news_response.text)
        news = newsJSON['articles']

        source = list()
        title = list()
        description = list()
        date = list()
        url = list()

        # Appending the news data to the relevant list
        try:
            for x in news:
                source.append(x['source']['name']) 
                title.append(x['title'])
                description.append(x['description'])
                date.append(x['publishedAt'][0:10])
                url.append(x['url'])
        except Exception:
            pass

        return render_template('equity.html', tCode=tickerCode, sName = name,sPrice = latestStockPrices, 
            sBeta = beta, sVolume = avgVolume, sChangeP = changePerc, sPositive = positive,
            sExchange = exchange, sIndustry = industry, sSector = sector, sWebsite = website, sCEO =  ceo, 
            sDesc = desc, fFirstName = customerInformation["first_name"], fLastName = customerInformation["last_name"], 
            eBalance = (customerDetails['balance']/100), eNewsSource = source, eNewsTitle = title, eNewsDesc = description,
            eNewsDate = date, eNewsURL = url)