Exemplo n.º 1
0
def Training_click(orgPath, batch_size, epochs, feature=None, valPath=None):
    orgLRN_Path = orgPath[0]
    orgTrg_Path = orgPath[1]

    #---------------
    # 型変換
    #---------------
    if batch_size != int:
        batch_size = int(batch_size)
    if epochs != int:
        epochs = int(epochs)

    # データの読み込み
    #訓練データの読み込み
    x = dataLoad(orgLRN_Path, float)
    t = dataLoad(orgTrg_Path, float)

    # Validationデータ
    if valPath != None:
        valRLN_Path = valPath[0]
        valTrg_Path = valPath[1]

        x_val = dataLoad(valRLN_Path, float)
        t_val = dataLoad(valTrg_Path, float)

        validation = (x_val, t_val)
    else:
        validation = None

    nn(x, t, batch_size, epochs, feature, validation)
Exemplo n.º 2
0
    def __init__(self):

        # load network configuration
        self.conf_ = conf()
        self.conf_.start()

        # get train data
        pre_proc = pre_processor()
        train_x, train_y, val_x, val_y = pre_proc.get_data()

        self.nn = nn(self.conf_)

        # Build Computation Graph
        self.nn.define_tensors()
        self.nn.forward_prop()
        self.nn.compute_loss()
        self.nn.compute_acc()
        self.nn.back_prop()

        # initialize tensors
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
        self.sess.run(tf.local_variables_initializer())

        # start training
        self.start(train_x, train_y, val_x, val_y)
Exemplo n.º 3
0
def run_ml(k=1):
    total = len(data)
    num_test = int(total * 0.3)

    train_data = []
    train_labels = []
    test_data = []
    test_labels = []

    train_data = data[:]
    train_labels = labels[:]

    n = total
    for i in range(num_test):
        index = int(random.random() * n)
        n -= 1
        test_data.append(train_data.pop(index))
        test_labels.append(train_labels.pop(index))
    # train
    import nn
    classifier_nn = nn.nn(train_data, train_labels)
    import knn
    classifier_knn = knn.knn(train_data, train_labels)
    # test
    for i in range(len(test_data)):
        d = test_data[i]
        res.append(
            [classifier_nn.test(d),
             classifier_knn.test(d, k), test_labels[i]])
Exemplo n.º 4
0
def nn_analysis(statsqueue, imagennqueue, mailqueue, nnpath, nnareas_crop,
                nnareas_targetarea, detection_crop, detection_targetarea):

    excludeobjects = []
    excludeobjects.append("bench")

    yy = nn(nnpath['frozeninferencegraph'], nnpath['frozeninferenceconfig'],
            nnpath['labelmap'], 0.4, 0.4, excludeobjects)

    last_time = time.time()
    isInteresting = 0

    while True:

        #if imagennqueue.qsize() > 200:
        #    print("Warning imagennqueue : " + str(imagennqueue.qsize()))

        datann = imagennqueue.get()
        if not (datann is None):
            imagetime = datann[0]
            image = datann[1]
            imagenn = image.copy()

            elapsed_time = imagetime - last_time

            if (elapsed_time > 5):
                isInteresting = 0

            if nnareas_crop > 0:
                imagenn = imagenn[
                    nnareas_targetarea[1]:nnareas_targetarea[3],
                    nnareas_targetarea[0]:nnareas_targetarea[
                        2]]  #[680:1335, 600:1600] #from 600;680 to 1935;1335

            imagenn_small = imutils.resize(imagenn,
                                           width=min(800, imagenn.shape[1]))
            #objectsdetected = yy.run(imagenn)
            objectsdetected = yy.run(imagenn_small)

            for obj in objectsdetected:
                if (obj[0] == 'person') or (obj[0] == 'car') or (
                        obj[0] == 'bicycle') or (obj[0] == 'motorcycle') or (
                            obj[0] == 'bus') or (obj[0] == 'truck'):
                    isInteresting = 1

            if (elapsed_time > 1) and (isInteresting == 1):
                if detection_crop > 0:
                    image = image[
                        detection_targetarea[1]:detection_targetarea[3],
                        detection_targetarea[0]:
                        detection_targetarea[2]]  #[680:1335, 320:1935]

                now = datetime.datetime.now()
                ret, tmpimg = cv2.imencode(".jpg",
                                           np.asarray(image))  #mettre image
                mailqueue.put([tmpimg, now.strftime('%Y-%m-%d-%H-%M-%S')])

                last_time = imagetime

    print("Exiting process nn")
Exemplo n.º 5
0
def run_ml(k = 1):
	total = len(data)
	num_test = int(total * 0.3)

	train_data = []
	train_labels = []
	test_data  = []
	test_labels = []

	train_data   = data[:]
	train_labels = labels[:]

	n = total
	for i in range(num_test):
		index = int(random.random() * n)
		n -= 1
		test_data.append(train_data.pop(index))
		test_labels.append(train_labels.pop(index))
	# train
	import nn
	classifier_nn = nn.nn(train_data,train_labels)
	import knn
	classifier_knn = knn.knn(train_data,train_labels)
	# test
	for i in range(len(test_data)):
		d = test_data[i]
		res.append([classifier_nn.test(d), classifier_knn.test(d,k), test_labels[i]])
Exemplo n.º 6
0
def train_nn(selected_efps):
    # Find the "first" EFP that is most similar to the NN(LL) predictions
    # Train a simple NN with this first choice
    X_train, y_train = data_grabber(selected_efps=selected_efps,
                                    split="train",
                                    normalize=True)
    X_test, y_test = data_grabber(selected_efps=selected_efps,
                                  split="test",
                                  normalize=True)
    X_val, y_val = data_grabber(selected_efps=selected_efps,
                                split="valid",
                                normalize=True)

    # Try different network designs according to number of hidden layers and units
    model_file = f"{model_dir}/model_l_{layers}_n_{nodes}.h5"
    model_file_name = model_file.split("/")[-1]
    model = nn(
        X_train=X_train,
        y_train=y_train,
        X_val=X_val,
        y_val=y_val,
        epochs=1000,
        batch_size=32,
        layers=layers,
        nodes=nodes,
        ix=ix,
        model_file=model_file,
        verbose=0,
    )
    test_pred = np.hstack(model.predict(X_test))
    auc_val = roc_auc_score(y_test, test_pred)
    print(f"    AUC: {auc_val:.4}")
Exemplo n.º 7
0
def plotTSP():

    fo = open("./data/a281.tsp", "r")
    X, Y = [], []
    paths = nn.nn()
    data = fo.readlines()
    for i in range(len(data)):
        X.append(float(data[i].split()[1]))
        Y.append(float(data[i].split()[2]))

    a_scale = float(max(X)) / float(100.0)
    new_data = []
    for i in range(len(paths)):
        new_data.append([X[paths[i]], Y[paths[i]]])
    print new_data[-1][0]
    print new_data[-1][1]
    #print new_data[0][0]-new_data

    #plt.arrow(new_data[-1][0],new_data[-1][1], (new_data[0][0] - new_data[-1][0]), (new_data[0][1] - new_data[-1][1]), head_width = a_scale,color ='g', length_includes_head=True)

    for i in range(len(new_data) - 1):

        plt.arrow(new_data[i][0],
                  new_data[i][1], (new_data[i + 1][0] - new_data[i][0]),
                  (new_data[i + 1][1] - new_data[i][1]),
                  head_width=a_scale,
                  color='g',
                  length_includes_head=True)

    plt.xlim(min(X) * 1.1, max(X) * 1.1)
    plt.ylim(min(Y) * 1.1, max(Y) * 1.1)
    plt.show()
Exemplo n.º 8
0
	def __init__(self):
		
		# load network configuration
		self.conf_ = conf()
		self.conf_.start()

		# get test data
		pre_proc = pre_processor()
		test_x, test_y = pre_proc.get_data(testing=True)

		self.nn = nn(self.conf_)
		self.sess = tf.Session()

		# Build Computation Graph
		meta_filepath = "../models/sess-" + str(self.conf_.epochs) + ".meta"
		ckpt_filepath = "../models/sess-" + str(self.conf_.epochs)
		self.nn.restore_tensors(self.sess, meta_filepath, ckpt_filepath)
		self.nn.forward_prop(testing=True)
		self.nn.compute_loss()
		self.nn.compute_acc()

		# Initialize local variables only (not global variables)
		self.sess.run(tf.local_variables_initializer())

		# start testing
		self.start(test_x, test_y)
Exemplo n.º 9
0
def lvq_3(prots):
    prots_3 = prots[:]
    for r in range(repetitions):
        for x in dataset:
            closest_prototypes = nn(x, 2, prots_3)
            m = closest_prototypes[0]['elem']
            n = closest_prototypes[1]['elem']

            m_class = closest_prototypes[0]['class']
            n_class = closest_prototypes[1]['class']
            x_class = x[len(x) - 1]

            same_class = m
            if (m_class == x_class):
                same_class = m
                other_class = n
            elif (n_class == x_class):
                same_class = n
                other_class = m
            else:
                same_class = False

            if (window_rule(x, m, n) and same_class):
                if (m_class != n_class):
                    movement(same_class, x, True)
                    movement(other_class, x, False)
                else:
                    movement(same_class, x, True, e=e)
                    movement(other_class, x, True, e=e)

    print "LVQ 3 RESULTS:"
    return knn(k, prots_3, evaluation)
Exemplo n.º 10
0
	def __init__(self, num_chars, compression_vector_size):
		super(cclstm, self).__init__()
		self.autoencoder = nn.nn([num_chars,1000,250,100,250,1000,num_chars])
		self.charlstm = lstm.lstm([self.autoencoder.layers[3],compression_vector_size,self.autoencoder.layers[3]],softmax=False)
		self.wordlstm = lstm.lstm([compression_vector_size,compression_vector_size + (compression_vector_size/2),compression_vector_size],softmax=False)
		self.sentlstm = lstm.lstm([self.wordlstm.layers[1],self.wordlstm.layers[1] + (self.wordlstm.layers[1]/2),self.wordlstm.layers[1]],softmax=False)
		self.responder = lstm.lstm([self.sentlstm.layers[1],self.sentlstm.layers[1] + (self.se.layers[1]/2),self.sentlstm.layers[1]],softmax=False)
		self.compression_vector_size = compression_vector_size
		self.response_vectors = []
		self.response_lookup = []
Exemplo n.º 11
0
def test():  
    db = NFdb.NFdb("config/db.conf")
    n = XMPPNotify("config/xmpp.conf")
    contact = "*****@*****.**"

    solve = nn("config/nn.conf")

    res = solve.rmseProbeCustomerKNN()
    msg = "RMSE for Customer KNN = %f" % (res)
    print msg
    n.notify(contact, msg)
Exemplo n.º 12
0
	def generate_summary(self,title,para):		#generates summary by paaing every sentence through the net and checking if output=1 or not
		summary=""
		para1=para.split(".")
		for lines in para1:
			weights=self.tp.generate_weights(para,title,lines)	
			net=nn.nn()			
			net.load_net()
			net.update_weight(weights)
			if(net.check_imp_feature()==1): 	#checks whether the given sentence imp for including in summary or not
				summary+=lines
				if summary[-1]!='.' : summary+='.'
		return summary
Exemplo n.º 13
0
def train_nn(ix):
    pred_file = pass_path / "test_pred.feather"
    layers = 3
    nodes = 50
    batch_size = 256

    # Find the "first" EFP that is most similar to the NN(LL) predictions
    # Train a simple NN with this first choice
    X, y = get_data(selected_efps=selected_efps,
                    include_hl=include_hl,
                    include_pt=include_pt)

    X_train, X_val, y_train, y_val = train_test_split(X,
                                                      y,
                                                      test_size=0.2,
                                                      random_state=42)

    X_val, X_test, y_val, y_test = train_test_split(X_val,
                                                    y_val,
                                                    test_size=0.5,
                                                    random_state=42)

    # Try different network designs according to number of hidden layers and units
    model_file = model_path / f"model_l_{layers}_n_{nodes}_bs_{batch_size}.h5"

    model = nn(
        X_train=X_train,
        y_train=y_train,
        X_val=X_val,
        y_val=y_val,
        epochs=3000,
        batch_size=batch_size,
        layers=layers,
        nodes=nodes,
        model_file=model_file,
        verbose=2,
    )

    predictions = np.hstack(model.predict(X))
    auc_val = roc_auc_score(y_test, np.hstack(model.predict(X_test)))
    print(f"test-set AUC={auc_val:.4}")

    if truth_guided:
        ll = y.copy()
    else:
        ll = np.concatenate(
            np.load(path.parent / "data" / "raw" / "ll_predictions.npy"))
    test_df = pd.DataFrame({"hl": predictions, "y": y, "ll": ll})
    test_df.to_feather(pred_file)
    return auc_val
def nn_run(data_flag, iter_var_idxs):
    """
	Run a supervised learning classification for a single specs file.

	Data is read from a specifications file in the data_dir/specs/ 
	folder, with proper formatting given in read_specs_file.py. The
	specs file indicates the full range of the iterated variable; this
	script only produces output from one of those indices, so multiple
	runs can be performed in parallel.
	"""

    # Aggregate all run specifications from the specs file; instantiate model
    list_dict = read_specs_file(data_flag)
    vars_to_pass = compile_all_run_vars(list_dict, iter_var_idxs)
    obj = nn(**vars_to_pass)

    # Need this to save tensor flow objects on iterations
    obj.data_flag = data_flag

    # Set the signals and free energy, depending if adaptive or not.
    if 'run_type' in list(list_dict['run_specs'].keys()):
        val = list_dict['run_specs']['run_type']
        if val[0] == 'nn':
            obj.init_nn_frontend()
        elif val[0] == 'nn_adapted':
            obj.init_nn_frontend_adapted()
        else:
            print('`%s` run type not accepted for '
                  'supervised learning calculation' % val[0])
            quit()
    else:
        print ('No learning calculation run type specified, proceeding with' \
          'unadapted learning calculation')
        obj.init_nn_frontend()

    # Set the network variables, learning algorithm
    obj.set_AL_MB_connectome()
    obj.set_ORN_response_array()
    obj.set_PN_response_array()
    obj.init_tf()

    # Train and test performance
    obj.set_tf_class_labels()
    obj.train_and_test_tf()

    # Delete tensorflow variables to allow saving
    obj.del_tf_vars()
    dump_objects(obj, iter_var_idxs, data_flag)

    return obj
Exemplo n.º 15
0
def bootstrap_runner(run_name):
    selected_efps = pd.read_csv(
        path / "results" / run_name / "selected_efps.csv"
    )
    selected_efps = selected_efps.efp.tolist()[1:]
    X, y = grab_and_mix_data(selected_efps)

    n = len(y)
    n_train = int(0.85 * n)
    n_test = int(0.15 * n)
    rs = ShuffleSplit(n_splits=n_splits, random_state=0, test_size=0.15)
    rs.get_n_splits(X)

    ShuffleSplit(n_splits=n_splits, random_state=0, test_size=0.15)
    straps = []
    aucs = []
    bs_count = 0
    for train_index, test_index in rs.split(X):
        X_train = X[train_index]
        y_train = y[train_index]
        X_val = X[test_index]
        y_val = y[test_index]
        model_file = f"{bs_model_dir}/bs-{bs_count}.h5"
        if not os.path.isfile(model_file):
            model = nn(
                X_train=X_train,
                y_train=y_train,
                X_val=X_val,
                y_val=y_val,
                epochs=epochs,
                batch_size=batch_size,
                layers=layers,
                nodes=nodes,
                model_file=model_file,
                verbose=0,
            )
        else:
            model = tf.keras.models.load_model(model_file)

        auc_val = roc_auc_score(y_val, np.hstack(model.predict(X_val)))
        # print(f"    test-set AUC: {auc_val:.5}")
        straps.append(bs_count)
        aucs.append(auc_val)
        results = pd.DataFrame({"bs": straps, "auc": aucs})
        results.to_csv(path / "results" / run_name / "bootstrap_results.csv")
        bs_count += 1
        auc_mean = np.average(aucs)
        auc_std = np.std(aucs)
        print(f"AUC = {auc_mean:.5f} +/- {auc_std:.5f}")
Exemplo n.º 16
0
def lvq_1(prots):
    for r in range(repetitions):
        for x in dataset:

            closest_prototype = nn(x, 1, prots)[0]
            closest_class = closest_prototype['class']
            x_class = x[len(x) - 1]

            if (closest_class == x_class):
                movement(closest_prototype['elem'], x, True)
            else:
                movement(closest_prototype['elem'], x, False)

    print "LVQ 1 RESULTS:"
    return {'results': knn(k, prots, evaluation), 'prots': prots}
Exemplo n.º 17
0
    def __init__(self, use_batchnorm = True):

        self.num_sgd_updates = 3000

        # create neural network:
        self.car1 = moutaincar_dpg.mountaincar_dpg()
        self.car1.loaddata('dpg_mountain_car_iter250')
        self.car1.plot_policy(mode= 'deterministic')

        obs_low = self.car1.env.observation_space.low
        obs_high = self.car1.env.observation_space.high

        if use_batchnorm:
            self.nn1 = nn_batchnorm(input_low= obs_low, input_high= obs_high, car= self.car1)
        else:
            self.nn1 = nn(input_low= obs_low, input_high= obs_high, car= self.car1)

        self.nn1.main()
def anneal():

    fo = open("adj_mat", "rb")
    adj_mat = pickle.load(fo)
    init = nn.nn()
    cost = eval_state(init, adj_mat)
    T = 50000
    alpha = 0.99
    while T > 0.00000000000000000000000000001:
        newstate = get_state(init)
        p = random.random()
        old = eval_state(init, adj_mat)
        new = eval_state(newstate, adj_mat)
        if p < probability(old, new, T):
            init = newstate
            cost = min(cost, new)
        T *= alpha
    print(cost)
Exemplo n.º 19
0
    def __init__(self, state_shape, actions, output_path):
        self.alpha = 1
        self.gamma = 0.99
        self.random_action_alpha = 1
        self.random_action_alpha_cap = 1
        self.ra_range_begin = 0.05
        self.ra_range_end = 0.99
        self.total_actions = 0
        self.lam = 0.9
        self.history_size = 100000
        self.batch_size = 256

        self.actions = actions
        self.history = history.history(self.history_size)

        output_path += '/run.%d' % (time.time())
        self.summary_writer = tf.summary.FileWriter(output_path)

        self.main = nn.nn("main", state_shape[0], actions, self.summary_writer)
Exemplo n.º 20
0
def test(test_image, A, norm=2, k=80):
    global cA
    global avg_image
    global E
    global Y
    global ppt
    if cA is None:
        tic = time()
        cA, avg_image = center_data(A)
        cA = cA.T
        E = hqpb(cA, k)  # High-Quality Pseudo-basis
        Y = np.dot(E.T, cA)
        toc = time()
        ppt = toc - tic
        print("pre-processing time = ", (toc - tic) * 1000, ' ms')
    else:
        pass
    c_test_image = np.array([np.array(test_image - avg_image)]).T
    test_pr = np.dot(E.T, c_test_image)
    return nn(test_pr.T, Y.T, norm)
Exemplo n.º 21
0
def eigenfaces_reduced(test_image, A, norm=2, k=60):
    global cA
    global avg_image
    global E
    global Y
    global ppt
    # pre processing stage
    if cA is None:
        tic = time()
        cA, avg_image = center_data(reduce(A))
        cA = cA.T
        E = eigenvalues(cA, k)
        Y = np.dot(E.T, cA)
        toc = time()
        ppt = toc - tic
        print("pre-processing time = ", (toc - tic) * 1000, ' ms')
    else:
        pass
    # testing stage
    c_test_image = np.array([np.array(test_image) - avg_image]).T
    test_pr = np.dot(E.T, c_test_image)
    return nn(test_pr.T, Y.T, norm, reduced=True)
Exemplo n.º 22
0
def train_nn(efp):
    layers = 3
    nodes = 100
    batch_size = 512

    # Find the "first" EFP that is most similar to the NN(LL) predictions
    # Train a simple NN with this first choice
    X, y = data_grabber(efp)

    X_train, X_val, y_train, y_val = train_test_split(X,
                                                      y,
                                                      test_size=0.2,
                                                      random_state=42)

    X_val, X_test, y_val, y_test = train_test_split(X_val,
                                                    y_val,
                                                    test_size=0.5,
                                                    random_state=42)

    # Try different network designs according to number of hidden layers and units
    model_file = path / "6HL_1EFP_models" / f"{efp}.h5"

    model = nn(
        X_train=X_train,
        y_train=y_train,
        X_val=X_val,
        y_val=y_val,
        epochs=200,
        batch_size=batch_size,
        layers=layers,
        nodes=nodes,
        model_file=model_file,
        verbose=0,
    )

    predictions = np.hstack(model.predict(X))
    auc_val = roc_auc_score(y_test, np.hstack(model.predict(X_test)))
    print(f"test-set AUC={auc_val:.4}")
    return auc_val
Exemplo n.º 23
0
def eigenfaces(test_image, A, norm, k=60):
    global cA
    global avg_image
    global E
    global Y
    global ppt
    # pre processing stage
    if cA is None:
        tic = time()
        cA, avg_image = center_data(A)
        cA = cA.T
        E = eigenvalues(cA, k)  # High-Quality Pseudo-basis
        Y = np.dot(E.T, cA)
        toc = time()
        ppt = toc - tic
        print("pre-processing time = ", (toc - tic) * 1000, ' ms')
    else:
        pass
    # testing stage
    c_test_image = np.array([np.array(test_image) - avg_image]).T
    test_pr = np.dot(E.T, c_test_image)
    p = nn(test_pr.T, Y.T, norm)
    return p
Exemplo n.º 24
0
    def __init__(self, puzzleSize, epsilon, alpha, gamma):

        # exploration factor between 0-1 (chance of taking a random action)
        self.epsilon = epsilon
        # learning rate between 0-1 (0 means never update Q-values, 1 means discard old value)
        self.alpha = alpha
        # discount factor between 0-1 (higher means the algorithm looks farther into the future
        # at 1 infinite rewards possible -> dont go to 1)
        self.gamma = gamma

        self.puzzleSize = puzzleSize
        self.actionsSize = puzzleSize**2
        self.actions = range(4)
        self.inputSize = self.actionsSize**2

        # create one nn per action:
        self.networks = {}
        for action in self.actions:
            net = nn.nn(puzzleSize=self.puzzleSize, alpha=self.alpha)
            self.networks[action] = copy.copy(net)

        if puzzleSize == 2:
            self.batchMaxSize = 50
            #self.moveBatchMaxSize = 10
            # self.learningSteps = 20
            # self.learnSize = 10
        if puzzleSize == 3:
            self.batchMaxSize = 10002  # how many [state,action,reward,newstate] tuples to remember
            #self.moveBatchMaxSize = 50 # how many tuples to remember where a change in the boardstate happened
            # self.learningSteps = 200  # after how many actions should a batch be learned
            # self.learnSize = 20  # how many of those tuples to randomly choose when learning

        self.age = 0
        self.batch = deque(maxlen=self.batchMaxSize)
        # self.batchSize = 0
        self.mem_count = 0
Exemplo n.º 25
0
# Normalise data
normalised_data = (data - np.mean(data)) / np.std(np.array(data), 0)

# Split data
train_x, test_x, train_y, test_y = train_test_split(normalised_data,
                                                    labels,
                                                    train_size=0.6,
                                                    test_size=0.4,
                                                    random_state=1)
bin_train_y = binarize_labels(train_y)
bin_test_y = binarize_labels(test_y)

if __name__ == '__main__':
    # Initialise
    nnet = nn(layers=[4, 10, 3], sigmoids=[logistic, softmax], random_seed=12)

    # Train
    ncores = get_num_cores()
    nn_params = {
        'learning_rate': 1,
        'influence_of_inertia': 0.1,
        'size_minibatch': 90,
        'epochs': 10,
        'error_func': mse,
        'verbose': True,
        'n_jobs_data_parallelisation': 1
    }

    nnet.fit(x=train_x, y=bin_train_y, **nn_params)
Exemplo n.º 26
0
import nn.nn as nn
from sklearn import datasets

# SPIRAL

x, y = datasets.make_moons(n_samples=1000, noise=0.1, random_state=0)

# Netwotk Structure
net = nn([2, 5, 5, 3, 2], ['relu', 'relu', 'relu', 'softmax'],
         'CrossEntropyLoss',
         inp=x,
         target=y)
# net.load()

bs = 10
lr = 1e-2
iterations = len(y) // bs

# Training
for e in range(5000):
    Loss = 0
    for i in range(iterations):
        start = i * bs
        net.zero_grad()
        out, loss = net(x[start:start + bs], y[start:start + bs])
        Loss += loss
        net.adam(10 * lr)
    if e % 5 == 0:
        net.viz(epoch=e, loss=Loss / iterations)
        net.save()
        # net.viz(rows=2, cols=2)
import game2
import othello
import ntuplesystematic as nts
import time
import random
import numpy
import nn

populationsize=10
goodpopulationsize=5
generations=5
parent = []
child = [0]*populationsize

for i in range(populationsize):
		playermaxx = nn.nn()
		for j in range(200):
			game2.play(othello.game(), game2.player(lambda x: playermaxx.play_move(x,0.3)),game2.player(lambda x: playermaxx.play_move(x,0.3)), False)
			playermaxx.reset()
		parent.append(playermaxx)


for z in range(generations):
	win = []
	for i in range(populationsize):
		winsfori=0
		for j in range(100):
			winner = game2.play(othello.game(), game2.player_epsilon(lambda x: parent[i].play_move(x)),game2.player_epsilon(lambda x: nTuplesSystematicObject.play_next_move(x)), False)
			if winner == 1:
				winsfori += 1
			winner = game2.play(othello.game(),game2.player_epsilon(lambda x: nTuplesSystematicObject.play_next_move(x)), game2.player_epsilon(lambda x: parent[i].play_move(x)), False)
Exemplo n.º 28
0
	def train(self,title,para,summary):	#training	
		weights=self.tp.generate_weights(para,title,summary)
		net=nn.nn()		
		net.load_net()
		net.update_weight(weights)
		net.save_net()
Exemplo n.º 29
0
def nn_results(hdf5, experiment, code_size_1, code_size_2, code_size_3):

    exp_storage = hdf5["experiments"]['cc200_whole']

    experiment = "cc200_whole"

    print exp_storage

    n_classes = 2

    results = []

    list = ['']

    list2 = []

    for fold in exp_storage:

        experiment_cv = format_config("{experiment}_{fold}", {
            "experiment": experiment,
            "fold": fold,
        })

        print "experiment_cv"

        print fold

        X_train, y_train, \
        X_valid, y_valid, \
        X_test, y_test,test_pid = load_fold(hdf5["patients"], exp_storage, fold)

        list.append(test_pid)

        print "X_train"

        print X_train.shape

        y_test = np.array([to_softmax(n_classes, y) for y in y_test])

        ae1_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_autoencoder-1.ckpt",
            {
                "experiment": experiment_cv,
            })
        ae2_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_autoencoder-2.ckpt",
            {
                "experiment": experiment_cv,
            })

        ae3_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_autoencoder-3.ckpt",
            {
                "experiment": experiment_cv,
            })

        nn_model_path = format_config(
            "./data/cc200_tichu_2500_1250_625/{experiment}_mlp.ckpt", {
                "experiment": experiment_cv,
            })

        try:

            model = nn(X_test.shape[1], n_classes, [
                {
                    "size": 2500,
                    "actv": tf.nn.tanh
                },
                {
                    "size": 1250,
                    "actv": tf.nn.tanh
                },
                {
                    "size": 625,
                    "actv": tf.nn.tanh
                },
            ])

            init = tf.global_variables_initializer()
            with tf.Session() as sess:

                sess.run(init)

                saver = tf.train.Saver(model["params"])

                print "savernn_model_path"

                print nn_model_path

                saver.restore(sess, nn_model_path)

                output = sess.run(model["output"],
                                  feed_dict={
                                      model["input"]: X_test,
                                      model["dropouts"][0]: 1.0,
                                      model["dropouts"][1]: 1.0,
                                      model["dropouts"][2]: 1.0,
                                  })

                np.set_printoptions(suppress=True)

                y_score = output[:, 1]

                print "y_score"

                print y_score

                y_pred = np.argmax(output, axis=1)

                print "y_pred"
                print y_pred

                print "output"

                hang = output.shape[0]

                lie = output.shape[1]

                print hang

                print lie

                for tt in range(hang):
                    for xx in range(lie):

                        output[tt][xx] = round(output[tt][xx], 4)

                        output[tt][xx] = str(output[tt][xx])

                aa = output[:, 0]

                print type(aa)

                list2.append(output)

                list.append(y_pred)

                print "-------------------------------------"

                y_true = np.argmax(y_test, axis=1)

                list.append(y_true)

                print "y_true"
                print y_true

                auc_score = roc_auc_score(y_true, y_score)
                print auc_score

                [[TN, FP], [FN,
                            TP]] = confusion_matrix(y_true,
                                                    y_pred,
                                                    labels=[0,
                                                            1]).astype(float)
                accuracy = (TP + TN) / (TP + TN + FP + FN)

                print(TP)
                print(TN)
                print(FP)
                print(FN)
                specificity = TN / (FP + TN)
                precision = TP / (TP + FP)
                sensivity = recall = TP / (TP + FN)
                fscore = 2 * TP / (2 * TP + FP + FN)

                results.append([
                    accuracy, precision, recall, fscore, sensivity,
                    specificity, auc_score
                ])
        finally:
            reset()

    workbook = xlwt.Workbook(encoding='utf-8')

    booksheet = workbook.add_sheet('Sheet 1', cell_overwrite_ok=True)

    wb = xlwt.Workbook(encoding='utf-8')

    worksheet = wb.add_sheet('Sheet 1', cell_overwrite_ok=True)

    DATA = list

    print list2

    for i, row in enumerate(DATA):
        for j, col in enumerate(row):
            booksheet.write(j, i, col)
    # workbook.save('./data/dos_tichu_2500_1250_625_xlst.xls')

    return [experiment] + np.mean(results, axis=0).tolist()
Exemplo n.º 30
0
import nn.nn as nn
import numpy as np
# XOR
###############################################################
net = nn([2, 3, 2], ['relu', 'softmax'], 'CrossEntropyLoss')
###############################################################

# Data generation
x = np.linspace(-40, 40, 40)
y = np.linspace(-40, 40, 40)
x = np.column_stack([[xi, yi] for xi in x for yi in y])
x = np.array(list(zip(x[0], x[1])))

y = []
for X in x:
    xi, yi = X
    if (xi > 0 and yi > 0) or (xi < 0 and yi < 0):
        y.append(1)
    else:
        y.append(0)
y = np.array(y)

index = np.arange(len(y))
np.random.shuffle(index)
x = x[index]
y = y[index]

# Params
bs = 10
lr = 1e-3
iterations = len(y) // bs
Exemplo n.º 31
0
    def __init__(self,
                 gamma=0.99,
                 N_0=50.,
                 random_init_theta=False,
                 environment = 'MountainCarContinuous-v0',
                 algorithm = 'dpg1',
                 ):

        self.algorithm = algorithm
        self.env = gym.make(environment)
        self.select_env = environment

        self.action_limits = (self.env.action_space.low, self.env.action_space.high)
        print('action limits', self.action_limits)
        actionmean = (self.action_limits[0]+ self.action_limits[1])/2

#        self.num_actions = self.env.action_space.n
#        self.prob_distrib = np.zeros(self.num_actions)
        self.statedim = self.env.observation_space.shape[0]


        # lengths of all the played episodes
        self.episode_lengths = []
        # lengths of episodes run with target policy
        self.test_lengths = []

        #tile parameteres
        self.tile_resolution = 10.0
        self.overlap = False
        if self.overlap:
            self.num_tile_features = int(pow(self.tile_resolution,self.statedim)*2)
        else:
            self.num_tile_features = int(pow(self.tile_resolution,self.statedim))


        self.gamma = gamma  #similar to 0.9

        self.N_0 = N_0

        ######################### dpg params ###############


        if random_init_theta:
            # random initialization
            self.theta = np.ones(self.num_tile_features)*actionmean + np.random.randn(self.num_tile_features)*0.1
        else:
            # initialization with "no" actions (corresponds to action = 1)
            self.theta = np.ones(self.num_tile_features)*actionmean

        # for the value function estimation:
        self.v = np.zeros(self.num_tile_features)  # weights for value function estimator

        self.w = np.zeros(self.num_tile_features)  # weights for q-function estimator

        self.sigma_b = 1   #standard deviation for behavior policy

        self.alpha_theta = 1e-3
        self.alpha_w = 1e-2
        self.alpha_v = 1e-2

        print('N_0',self.N_0)
        print('using environment',environment)
        print('tile resolution',self.tile_resolution)
        print('gamma',self.gamma)

        # create neural network:
        self.nn1 = nn()
        self.nn1.main()
Exemplo n.º 32
0
def train(d, l):
	import nn
	classifier_nn = nn.nn(d,l)
	import knn
	classifier_knn = knn.knn(d,l)
	return classifier_nn, classifier_knn
Exemplo n.º 33
0
def train(d, l, w, th):
	classifier_nn = nn.nn(d,l,w,th)
	classifier_knn = knn.knn(d,l)
	return classifier_nn, classifier_knn
Exemplo n.º 34
0
def train(d, l):
    import nn
    classifier_nn = nn.nn(d, l)
    import knn
    classifier_knn = knn.knn(d, l)
    return classifier_nn, classifier_knn
Exemplo n.º 35
0

# x = numpy.asarray(all_values, dtype=float)
# y = numpy.asarray(all_classes, dtype=int)

# rfe
# n_features, support, ranking, grid_scores, estimator = rfe(x,
#                                                            y)
# print(n_features, support, ranking, grid_scores, estimator)

# f = open('log.log', 'a')
# f.write(str(n_features))
# f.write(str(support))
# f.write(str(ranking))
# f.write(str(grid_scores))
# f.write(str(estimator))
# f.close()

# nn
result, history = nn(train_values_rfe, train_classes_binary, test_values_rfe, test_classes_binary)
print(result)
print(max(history.history['val_acc']))

# for perc in [100]:
# for perc in [100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30, 25, 20, 15, 10, 5]:
    # print()
    # for deg in range(6):
    # deg = 1
    # print('test accuracy with deg ', deg, ', perc: ', perc, ' - ', ova(train_values_rfe, train_classes, test_values_rfe,
    #                                                                        test_classes, class_desc, deg=deg, perc=perc))
Exemplo n.º 36
0
import matplotlib.pyplot as plt

import numpy as np

import neuron
import layer
import nn

jk = [1, 2, 3, 4, 34, 54]
jk.pop(4)

k = nn.nn()

k.add(1, 1)
k.add(5)
k.add(3)
k.add(1)

x = np.array([[.1, .2, .3]]).T
y = np.array([[.2, .4, .6]]).T

input_data = np.random.rand(500, 3)
k.forward(x)

k.backprop(x, y)

for _ in range(1000):
    k.backprop(x, y)

f = np.array([[0.3, 0.2, 0.5, 0.1, 0.1], [0.2, 0.4, 0.6, 0.1, 0.1],
              [0.5, 0.2, 0.3, 0.1, 0.1], [0.6, 0.4, 0.2, 0.1, 0.2]]).T
Exemplo n.º 37
0
len_train_X = len(train_X)

# Validation Dataset
val_X = pd.read_csv(ROOT_PATH + "Val_COH_Dataset.csv", header=None)
val_Y = pd.read_csv(ROOT_PATH + "Val_COH_Label.csv", header=None)

# Testing Dataset
test_X = pd.read_csv(ROOT_PATH + "Test_COH_Dataset.csv", header=None)
test_Y = pd.read_csv(ROOT_PATH + "Test_COH_Label.csv", header=None)

#######################
# 2. Build & Train NN #
#######################

# Initialize the Custom Class
NN = nn()

# Initialize Weight Matrix
W1, W2 = NN.initialize_weights()

# Accuracy_top_1 and Accuracy_top_5 will record the top-1 and top-5 accuracies. And E will record the loss.
Trian_Accuracy_top_1, Val_Accuracy_top_1, Train_Accuracy_top_5, Val_Accuracy_top_5, E = [], [], [], [], []

############ Hyper Parameter ############
Epoch = 100
lr_1, lr_2 = 0.01, 0.01
Scale = 100.0
############ Hyper Parameter ############

tic = time.time()
for epoch in range(Epoch):
Exemplo n.º 38
0
c = int(input("Please input number of layer -> "))
l = float(input("Please input value of LR rate -> "))
count = int(input("Please input value of learn number of times -> "))

length = len(x)
if len(a) != length:
    print("Match the number of values and answers.")

x = np.array(x)
a = np.array(a)

i = 0
f = []
for i in range(c):
    f.append(nn(length, l))

mse = MSE()

i = 0
j = 0
for i in range(count):
    j = 0
    for j in range(c):
        if j == 0:
            tmp = f[j].forward(x)
        else:
            tmp = f[j].forward(tmp)

    loss = mse.forward(tmp, a).mean()
    print(loss)
Exemplo n.º 39
0
from metric import printErrorMetrics

from rf import rf
from rr import rr
from nn import nn
from lr import lr


if __name__ == '__main__':
    extract_dir = sys.argv[1]
    fnum = int(sys.argv[2])

    """ datasets and labels's size is fnum """
    datasets, labels = GetAllData(extract_dir, fnum, 'bfs', total_vertex_num=4900578, L=500000)
    # datasets, labels = GetAllData(extract_dir, fnum, 'bfs', total_vertex_num=65608366, L=10000000)

    """ ridge regression """
    sr, sl = rr(datasets, labels, fnum)

    """ neural network """
    sr, sl = nn(datasets, labels, fnum)
    
    """ liner regression """
    sr, sl = lr(datasets, labels, fnum)

    """ random forest """
    sr, sl = rf(datasets, labels, fnum)

    """ draw picture """
    sample_draw(sr,sl)