示例#1
0
def train(inputs, outputs, size, participants, victors, generations, threshold, cRate, mRate, printFile=False):
    '''Create and start training the NN via evolution strategy. Selection, crossover, mutation, evaluation.''' 
    global hero
    global OrigAnswers
    OrigAnswers = copy.deepcopy(outputs)
    EvaluationNN = GA.create_net(inputs, outputs)
    population = init_pop(EvaluationNN, inputs, outputs, size)
    # Test each citizen and determine initial fitness
    GA.evaluate(EvaluationNN, population, inputs, outputs)

    if printFile: f = open('ES.csv', 'w')
    gen = 0
    children = []
    # loop until a hero is found or we've reached max generations
    while gen <= generations and hero == 0:
        # Select our parents using tournament selection
        parents = GA.tournament(population, participants, victors)
        # Have our parents mate (Crossover)
        children = GA.mate(parents, cRate)
        # Have the children experience the world (Mutate)
        for child in children:
            mutate(child, mRate)
        # Test each child's fitness
        GA.evaluate(EvaluationNN, children, inputs, outputs)
        children = GA.tournament(children, participants, victors)
        population = sorted(population + children,
                            key=itemgetter(-1))[:-victors]
        if GA.heroFound(population, threshold):
            break
        else:
            print("Training: {:2.2%}".format(
                population[0][-1]), "{:2.2%}     ".format(gen / generations), end="\r")
            if printFile: f.write('%f,' % population[0][-1])
            if printFile: f.write('\n')
        gen += 1
    if printFile: f.close()
    if hero == 0:
        gen -= 1
        hero = sorted(population, key=itemgetter(-1))[0]
    EvaluationNN.SetNNWeights(hero[:-1])  # Load hero into NN, prep for usage.

    # Evaluate the hero on the inputs and outputs
    print('Generations: %d' % gen, ' ' * 20)
    print("Error Relative: {:2.5%}".format(NN.calcRelativeError(EvaluationNN, inputs, OrigAnswers)))
    print("Least Squares: %d" % NN.calcLeastSquaresError(EvaluationNN, inputs, OrigAnswers))
    print("Loss Squared: %d" % NN.calcLossSquared(EvaluationNN, inputs, OrigAnswers))
    #for x in inputs:
    #    EvaluationNN.SetStartingNodesValues(x)
    #    EvaluationNN.CalculateNNOutputs()
    #    print(x, EvaluationNN.GetNNResults(), EvaluationNN.GetNNResultsInt(), OrigAnswers[inputs.index(x)])
    print()

    return EvaluationNN
示例#2
0
def test_ModelLayer():
    for typ in ModelTypes:
        for i in range(4):
            cl = i+1 if typ == "SL2" and i >0 else 0
            NNx = NN.clsNN(1,i,1,Type=typ)
            assert len(NNx.model) == 1                  # Only One Model (one Output)                  
            assert len(NNx.model[0].layers) == i + 2 + cl  # i+ Input and Output Layer
示例#3
0
 def xQModelPredict(self, Xraw, rund=None):
     Xx = []
     Ax = []
     Q = []
     la = len(self.actions)
     # Extend X to X+Actions
     for state in Xraw:
         Q.append([0] * 4)
         for j in range(len(self.actions)):
             if str(type(Xraw[0][-1])) == "<class 'int'>":
                 Xx.append(state[:-1])
             else:
                 Xx.append(state)
             Ax.append(j)
     X = self._RetXActAs01(Xx, Ax)
     # Predict
     Q01np = NN.ModelPredict(self.QModel[0], X)
     # Reduce Output to State
     Q01 = [float(Q01np[i][0]) for i in range(len(Q01np))]
     for i in range(len(Q01)):
         if rund == None:
             Q[i // la][i % la] = Q01[i]
         else:
             Q[i // la][i % la] = round(Q01[i], rund)
     return Q
示例#4
0
def start_thread(inp, activation, out_activ, outp, learn, thresh, mmntm,
                 logger):
    global count
    training_inputs = []
    training_data = []
    count += 1
    print(out_activ)
    testNN = NN.main(inp, activation, out_activ, outp, learn, thresh, mmntm)
    print("DONE TRAINING")
    for i in inp:
        for j in i:
            training_inputs.append(random.randint(
                0, 4))  #create random inputs for testing
        training_data.append(training_inputs)
        training_inputs = []
    logger.info("ACTIVATION SET: ")
    logger.info(activation)
    logger.info("OUTPUT ACTIVATION: %s" % out_activ)
    logger.info("TESTING INPUT: ")
    logger.info(training_data)
    logger.info("OUTPUT: ")
    for x in training_data:
        testNN.SetStartingNodesValues(x)
        testNN.CalculateNNOutputs()
        logger.info(str(x))
        logger.info(testNN.GetNNResults())
        logger.info("RB OUTPUT: %s" % rb_test.rb_test(x))
示例#5
0
def start_thread(inp, activation, out_activ, outp, learn, thresh, mmntm, logger):
    global count
    training_inputs = []
    training_data = []
    count += 1
    print(out_activ)
    testNN = NN.main(inp, activation, out_activ, outp, learn, thresh, mmntm)
    print("DONE TRAINING")
    for i in inp:
        for j in i:
            training_inputs.append(random.randint(0, 4))  # create random inputs for testing
        training_data.append(training_inputs)
        training_inputs = []
    logger.info("ACTIVATION SET: ")
    logger.info(activation)
    logger.info("OUTPUT ACTIVATION: %s" % out_activ)
    logger.info("TESTING INPUT: ")
    logger.info(training_data)
    logger.info("OUTPUT: ")
    for x in training_data:
        testNN.SetStartingNodesValues(x)
        testNN.CalculateNNOutputs()
        logger.info(str(x))
        logger.info(testNN.GetNNResults())
        logger.info("RB OUTPUT: %s" % rb_test.rb_test(x))
示例#6
0
def verify_frame(pred, detc, le):
    global frame, NAME, available, cache, rect, correct
    while True:
        if cache:
            state, shape, rect = facial_landmarks.test_frame(frame, pred, detc)
            if state:
                #FIND THE NAME
                # _, embeddings = facial_landmarks.get_ratios(shape, frame)
                embeddings = face_recognition.get_embeddings(
                    frame, rect, model)

                name, percentage = NN.predict_input_from_video(embeddings,
                                                               le,
                                                               second=False)
                percentage = float("{:.2f}".format(percentage))
                NAME = str(name) + " " + str(percentage) + "%"
                # if name != "Bill_Clinton":
                #     print (name)
                #     correct = False
                # else:
                #     print ("Correct")
                #     correct = True
                available = True
            else:
                available = False
        cache = False
示例#7
0
    def __init__(self, alpha, decay, n_panels, buffer_size, path=''):

        # Initialize model
        self.model = NN.NN(n_panels)
        self.n_panels = n_panels

        # Load model
        if len(path) != 0:
            self.load(path)

        # Initialize loss criterion, and optimizer
        self.criterion = nn.MSELoss()
        self.optimizer_forward = torch.optim.Adam(self.model.parameters(),
                                                  lr=alpha)
        self.optimizer_backward = torch.optim.Adam(self.model.parameters(),
                                                   lr=alpha)
        self.lr_scheduler_forward = torch.optim.lr_scheduler.ExponentialLR(
            optimizer=self.optimizer_forward, gamma=decay)
        self.lr_scheduler_backward = torch.optim.lr_scheduler.ExponentialLR(
            optimizer=self.optimizer_backward, gamma=decay)

        # Initialize frame buffer
        self.buffer_size = buffer_size
        self.panels_forward_buffer = []
        self.performance_forward_buffer = []
        self.panels_backward_buffer = []
        self.performance_backward_buffer = []
def Alex_features_MNIST(bulk_size):
    """Pre-processing MNIST data to make them consistent with AlexNet, and
    then extract the features as the output of the 7'th layer
    """
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
    imagenet_mean = np.array([104., 117., 124.], dtype=np.float32)

    # we cannot give the whole training data at once because of OOM
    # forming the division indices
    train_size = mnist.train.images.shape[0]
    divisions = np.append(np.arange(0, train_size, bulk_size), train_size)

    train_features = np.zeros((4096, train_size))
    for t in range(len(divisions) - 1):
        inds = np.arange(divisions[t], divisions[t + 1])
        Alexified_images = np.zeros((len(inds), 227, 227, 3))
        for i in range(len(inds)):
            img = np.reshape(mnist.train.images[inds[i], :], (28, 28))
            img = cv2.resize(img.astype(np.float32), (227, 227))
            img = img.reshape((227, 227, 1))
            img = np.repeat(img, 3, axis=2)
            img -= imagenet_mean
            Alexified_images[i, :, :, :] = img.reshape((1, 227, 227, 3))

        train_features[:, inds] = NN.AlexNet_features(Alexified_images).T
        print("%d / %d" % (t, len(divisions) - 1))

    print("Extracting test features...")
    test_size = mnist.test.images.shape[0]
    divisions = np.append(np.arange(0, test_size, bulk_size), test_size)

    test_features = np.zeros((4096, test_size))
    for t in range(len(divisions) - 1):
        inds = np.arange(divisions[t], divisions[t + 1])
        Alexified_images = np.zeros((len(inds), 227, 227, 3))
        for i in range(len(inds)):
            img = np.reshape(mnist.test.images[inds[i], :], (28, 28))
            img = cv2.resize(img.astype(np.float32), (227, 227))
            img = img.reshape((227, 227, 1))
            img = np.repeat(img, 3, axis=2)
            img -= imagenet_mean
            Alexified_images[i, :, :, :] = img.reshape((1, 227, 227, 3))

        test_features[:, inds] = NN.AlexNet_features(Alexified_images).T
        print("%d / %d" % (t, len(divisions) - 1))

    return train_features, test_features
示例#9
0
def NeuralNetworkTest(pca_option):

    import NN

    NN.NeuralNetworkSimulation(
        NN.nn, processing.linear_pca, processing.overall_training_data, pca_option)

    processing.final_validation = np.array(processing.final_validation)

    FV_features = []
    FV_labels = []

    FV_features, FV_labels = processing.createFeatures_Labels(
        processing.final_validation)

    FV_features_data = None
    FV_labels_data = None

    FV_features_data, FV_labels_data = processing.convertToDataFrame(
        FV_features, FV_labels, processing.column_titles)

    global NN_final_predictions
    if(pca_option == 'yes' or pca_option == 'both'):

        transformed_FV = processing.linear_pca.transform(FV_features_data)

        final_predictions = NN.nn.predict(transformed_FV)
        NN_final_predictions = final_predictions

        accuracy = metrics.accuracy_score(final_predictions, FV_labels)
        precision = metrics.precision_score(
            FV_labels, final_predictions, average='micro')
        recall = metrics.recall_score(
            FV_labels, final_predictions, average='micro')

        print('NEURAL NETWORK MODEL FINAL TEST DATA ACCURACY: ', 100 * accuracy)
        print('NEURAL NETWORK MODEL FINAL TEST DATA PRECISION: ', 100 * precision)
        print('NEURAL NETWORK MODEL FINAL TEST DATA RECALL: ', 100 * recall)
        print()

        return accuracy, precision, recall

    else:

        final_predictions = NN.nn.predict(FV_features_data)
        NN_final_predictions = final_predictions

        accuracy = metrics.accuracy_score(final_predictions, FV_labels)
        precision = metrics.precision_score(
            FV_labels, final_predictions, average='micro')
        recall = metrics.recall_score(
            FV_labels, final_predictions, average='micro')

        print('NEURAL NETWORK MODEL FINAL TEST DATA ACCURACY: ', 100 * accuracy)
        print('NEURAL NETWORK MODEL FINAL TEST DATA PRECISION: ', 100 * precision)
        print('NEURAL NETWORK MODEL FINAL TEST DATA RECALL: ', 100 * recall)
        print()

        return accuracy, precision, recall
示例#10
0
def trainAndSaveNN(train_dl, test_dl, model):
    '''
    Train the Neural Network and save the trained model

    Parameters
    ----------
    train_dl : Training data
    test_dl : Test data
    model : Neural Network model

    Returns
    -------
    None.

    '''
    nn.train_model(train_dl, test_dl, model)
    torch.save(model.state_dict(), 'trainedNN.pt')
示例#11
0
def split_train(ds, lab, test_ratio=0.3, regression=False, model=None):
    all = len(lab)
    splitor = split.TTSplit(all, 'portion', test=test_ratio)
    tr_idx, te_idx = splitor.split()
    xtr, ytr, xte, yte = ds[tr_idx], lab[tr_idx], ds[te_idx], lab[te_idx]

    if model: pass
    else:
        if regression: model = NN.FcRegRDKit()
        else: model = NN.FcRDKit()
    print('start training')
    if regression: nn = active.TorchRegressionFold(xtr, ytr, xte, yte, model, 'active', path, ['percent_of_unlabel', 1],
                                  measure='distance', distance='linear')

    else: nn = active.TorchFold(xtr, ytr, xte, yte, model, 'active', path, ['percent_of_unlabel', 1])
    nn.train()
    print('finish training')
示例#12
0
def test_ModelOut():
    for typ in ModelTypes:
        for i in range(1,4):
            NNx = NN.clsNN(1,1,i,Type=typ)
            if typ == "STD":
                assert len(NNx.model) == 1
            else:                                    
                assert len(NNx.model) == i
示例#13
0
 def test_forward(self):
     input_ = np.random.random((5, 3))
     nn = NN.NN(loss_func=loss_fs.MeanSquaredLoss())
     nn.add_layer(
         layers.DenseLayer(n_neurons=5, activation_func=af.Sigmoid()))
     nn.add_layer(
         layers.DenseLayer(n_neurons=7, activation_func=af.Sigmoid()))
     rv = nn.forward(input_)
     assert rv.shape == (5, 7)
示例#14
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--df', default=df_path, metavar='df', help='Dataframe path.')
    parser.add_argument('--metrics', default=True, help='Turn on metrics.')
    args = parser.parse_args()

    df = pd.read_csv(args.df, header=None)
    X = df.loc[:, df.columns != 1].to_numpy()
    Y = df[1].to_numpy()
    Y = np.where((Y == 'M'), 1, 0)
    X = NN.feature_scale(X, mode='standart')
    split_index = round(X.shape[0] * 0.8) + 1
    X_train, X_test = X[:split_index], X[split_index:]
    Y_train, Y_test = Y[:split_index], Y[split_index:]
    nn = NN.NN()
    nn.load_weights()
    nn.predict(X_test)
    if args.metrics:
        nn.calc_metrics(X_test, Y_test)
示例#15
0
def neural_network_with_batch(X_c,Y_c,hidden_layer_list,activation_list,learning_rate,l2_norm,keep_prob=1,max_iter=1000,batch_size=None,\
                              beta1=0.9,beta2=0.999,seed=10,iter_to_print=100,optimization='gd',epsilon=1e-8,verbose=False):
    m = X_c.shape[1]
    nx = X_c.shape[0]
    ny = Y_c.shape[0]
    if batch_size is None:
        batch_size = m
    epochs = list()
    tb = int(math.ceil(float(m) / batch_size))
    for i in xrange(tb):
        epochs.append((X_c[:, i * batch_size:(i + 1) * batch_size],
                       Y_c[:, i * batch_size:(i + 1) * batch_size, ]))
    epochs = tuple(epochs)
    np.random.seed(seed)
    if (type(keep_prob) == int):
        keep_prob = np.repeat(float(keep_prob), len(hidden_layer_list) + 1)
    keep_prob[len(keep_prob) - 1] = 1
    assert (len(keep_prob) == len(hidden_layer_list) + 1)
    parameter, momentum, RMS = initialize_parameter_rms(
        nx, ny, activation_list, hidden_layer_list)
    cost_series = list()
    for i in range(max_iter):
        for epoch in epochs:
            (X, Y) = epoch
            m = X.shape[1]
            AL, linear_cache = nn.forward_propagation_(X, hidden_layer_list,
                                                       parameter, keep_prob,
                                                       activation_list)
            cost = nn.compute_cost(AL, Y, parameter, l2_norm)
            if (math.isnan(cost)):
                print("Cost went nuts")
                break


#            grads=nn.backward_propagation_(Y,AL,linear_cache,keep_prob)
            grads = nn.backward_propagation_both(Y, AL, linear_cache,
                                                 keep_prob)
            if (optimization == 'gd'):
                parameter = nn.update_parameter(parameter, grads,
                                                learning_rate, l2_norm, m)
            elif (optimization == 'gdm'):
                parameter, momentum = update_parameter_momentum(
                    parameter, grads, momentum, learning_rate, l2_norm, m,
                    beta1, i + 1)
            elif (optimization == 'adam'):
                parameter, momentum, RMS = update_parameter_adam(
                    parameter, grads, momentum, RMS, learning_rate, l2_norm, m,
                    beta1, beta2, i + 1, epsilon)
        AL_c, _ = nn.forward_propagation_(X_c, hidden_layer_list, parameter,
                                          activation_list)
        cost_t = nn.compute_cost(AL_c, Y_c, parameter, l2_norm)
        cost_series.append(cost_t)
        if (verbose and i % iter_to_print == 0):
            print("Iteration: %s Cost=%.5f" % (i, cost_t))
    cache = (parameter, hidden_layer_list, activation_list)
    AL, _ = nn.forward_propagation_(X_c, hidden_layer_list, parameter,
                                    activation_list)
    return AL, cache, cost_series
示例#16
0
 def test_backward(self):
     input_ = np.random.random((5, 3))
     output_grad = np.random.random((5, 7))
     nn = NN.NN(loss_func=loss_fs.MeanSquaredLoss())
     nn.add_layer(
         layers.DenseLayer(n_neurons=4, activation_func=af.Sigmoid()))
     nn.add_layer(
         layers.DenseLayer(n_neurons=7, activation_func=af.Sigmoid()))
     nn.forward(input_)
     nn.backward(output_grad)
示例#17
0
def parse_input(game_style, view):
    splitter = game_style.split(" ")
    switcher = {
        "solo": Solo_Ai(view),
        "minmax": AI.MINMAX(),
        "mcts": AI.MCTS(),
        "rnd": AI.VariousRnd(),
        "NN": NN.BetaOne()
    }
    return switcher[splitter[0]], switcher[splitter[1]]
示例#18
0
def parse_input(game_style):
    splitter = game_style.split(" ")
    switcher = {
        "solo": AI.SOLO(),
        "minmax": AI.MINMAX(),
        "mcts": AI.MCTS(),
        "NN": NN.BetaOne(),
        "File": Network_from_file()
    }
    return switcher[splitter[0]], switcher[splitter[1]]
示例#19
0
def build_network(shape, individual):
    ''' Builds a neural net of the input shape using the individual's array of weights. '''
    nn = NN.Network(shape)
    counter = 0
    for layer in nn.weights:
        for node in layer:
            for i in range(len(node)):
                node[i] = individual[counter]
                counter += 1
    return nn
示例#20
0
def main_loop(pred, detc):
    global frame, NAME, available, cache, rect, correct
    #video path
    path_to_vid = '../video/bill_bush.mp4'
    video_capture = cv2.VideoCapture(path_to_vid)
    size = (int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    still = True
    frame_num = 0
    _, _, le = NN.prepare_data()
    verification_thread = threading.Thread(target=verify_frame,
                                           args=(pred, detc, le),
                                           daemon=True)
    verification_thread.start()
    out = cv2.VideoWriter('../video/bill_bush_2.avi',
                          cv2.VideoWriter_fourcc(*'DIVX'), 30.0, size)
    while still:
        # Capture frame-by-frame
        still, frame_clear = video_capture.read()
        #if new, update frame
        if not cache:
            frame = frame_clear
            cache = True

        #DRAW ON THE FRAME
        if available:
            (x, y, w, h) = face_utils.rect_to_bb(rect)
            if correct:
                cv2.rectangle(frame_clear, (x, y), (x + w, y + h), (0, 255, 0),
                              2)
                cv2.putText(frame_clear, NAME, (x - 10, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            else:
                cv2.rectangle(frame_clear, (x, y), (x + w, y + h), (0, 0, 255),
                              2)
                cv2.putText(frame_clear, NAME, (x - 10, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

        # cv2.imshow('Video', frame_clear)

        out.write(frame_clear)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        print(frame_num)
        frame_num += 1
        # time.sleep(0.001)
    # When everything is done, release the capture
    video_capture.release()
    out.release()
    cv2.destroyAllWindows()


# pred, detc = facial_landmarks.load_pred_detec()
# main_loop(pred, detc)
示例#21
0
    def oneColumn(self, name_column):
        """
        Fill the NaN for one column
        :param df: dataframe
        :param name_column: column name to fill up
        :return: Nothing. Modify self.df_ori
        """

        df = self.df_ori[self.df_ori[name_column].notnull()]
        df = df.fillna(0)
        inter = self.extractXY_values(df, name_column)
        X = inter[0]
        Y = inter[1]
        columns_entry = inter[2]
        ##        print(columns_entry)

        # Extract important coefficients. Just ridge regression
        columns_entry = self.importantCoeff(X, Y, 12, columns_entry, alpha=12)

        x_values = df.loc[:, columns_entry].values
        y_values = df.loc[:, [name_column]].values

        # Normalisation. MinMaxScaler may not be appropriate
        mmsx = MinMaxScaler().fit(x_values)
        X = mmsx.transform(x_values)
        mmsy = MinMaxScaler().fit(y_values)
        Y = mmsy.transform(y_values)

        # handle data missing values
        df_cor = self.df_ori[self.df_ori[name_column].isnull()]
        len_df_cor = len(df_cor)
        len_df_ori = len(self.df_ori)
        ##        print(len_df_cor/len_df_ori)
        if len_df_cor != 0 and len_df_cor / len_df_ori < 0.2:
            df_cor = df_cor.fillna(0)
            x_cor = df_cor.loc[:, columns_entry]
            y_cor = df_cor.loc[:, [name_column]]
            x_cor_values = x_cor.values
            y_cor_values = y_cor.values
            X_cor = mmsx.transform(x_cor_values)
            Y_cor = mmsy.transform(y_cor_values)

            # give the mean value to, to avoid bias. Only for RBM or Autoencoder
            Y_cor = self.meanValueToNaN(Y, Y_cor)

            # Model learning
            nn = NN.nn(X.shape[1], [10], learning_rate=0.01)
            nn.train(X, Y, X_cor, batch_size=40, n_epoches=15)

            # inverse scaler
            corrected_column = mmsy.inverse_transform(nn.test)

            # correct columns in original dataframe
            self.fillDfOneColumn(df_cor, corrected_column, name_column)
示例#22
0
 def __init__(self, n_networks):
     """
     The PSO object contains an input n_networks which is the number of neural networks
     that are to be initialised.
     networks: is a list to store the initialised networks
     global_best_value: is initialised as infinity
     global_best_position: gets its shape from the Neural Network's getParams function
     global_best_yHat: is initialised at floating point 0. Useful for future plotting of graphs.
     """
     self.neurons = int(
         input("Inform the number of neurons in hidden layer of NN: "))
     self.n_networks = n_networks
     self.networks = [
         NN.NeuralNetwork(NN.x, NN.y, self.neurons)
         for i in range(self.n_networks)
     ]
     self.global_best_value = float("inf")
     self.global_best_position = NN.NeuralNetwork(
         NN.x, NN.y, self.neurons).getParams.shape
     self.global_best_yHat = 0
示例#23
0
 def file_check(self):
     """Open file explorer, get file path and load net"""
     filename = tk.filedialog.askopenfilename(
         title="Seleccioni l'arxiu",
         filetypes=(("pickled files", "*.pkl"), ("all files", "*.*")))
     try:
         self.net = NN.load_net(filename)
         print("Successful Load")
         self.root.destroy()
     except:
         print("Failed")
def eval_MultimgAL(expr, method_name, img_paths, start_ind=0, save_dir=[]):

    m = len(expr.train_paths[0]) - 1
    patch_shape = expr.pars['patch_shape'][:2] + \
        (m*expr.pars['patch_shape'][2],)

    model = NN.create_model(expr.pars['model_name'], expr.pars['dropout_rate'],
                            expr.nclass, expr.pars['learning_rate'],
                            expr.pars['grad_layers'],
                            expr.pars['train_layers'],
                            expr.pars['optimizer_name'], patch_shape)
    model.add_assign_ops()

    method_path = os.path.join(expr.root_dir, method_name)

    Qs = get_queries(expr, method_name)
    qnum = len(Qs)
    imgnum = len(img_paths)

    save_dir = os.path.join(method_path, 'test_scores.txt')

    if start_ind > 0:
        scores = np.loadtxt(save_dir)
    else:
        scores = np.zeros((imgnum, qnum))

    with tf.Session() as sess:
        model.initialize_graph(sess)
        sess.graph.finalize()

        for i in range(start_ind, qnum):
            weights_path = os.path.join(method_path,
                                        'curr_weights_%d.h5' % (i + 1))
            print('Loading weights %s' % weights_path)
            model.perform_assign_ops(weights_path, sess)

            for j in range(imgnum):
                # grid-samples from the j-th image
                expr.test_paths = img_paths[j:j + 1]
                stats_arr = np.zeros((1, 2 * m))
                mask, _ = nrrd.read(expr.test_paths[0][-1])
                for t in range(m):
                    img, _ = nrrd.read(expr.test_paths[0][t])
                    stats_arr[0, 2 * t:2 * (t + 1)] = np.array([
                        np.mean(img[~np.isnan(mask)]),
                        np.std(img[~np.isnan(mask)])
                    ])
                expr.test_stats = stats_arr

                scores[j, i], test_preds = expr.test_eval(model, sess)
                np.savetxt(save_dir, scores)

                print(j, end=',')
            print()
示例#25
0
 def test_eval(self):
     input_ = np.random.random((5, 3))
     true = np.random.random((5, 4))
     nn = NN.NN(loss_func=loss_fs.MeanSquaredLoss())
     nn.add_layer(
         layers.DenseLayer(n_neurons=8, activation_func=af.Sigmoid()))
     nn.add_layer(
         layers.DenseLayer(n_neurons=4, activation_func=af.Sigmoid()))
     loss = nn.eval(input_, true)
     assert loss > 0
     assert list(nn.parameters) != []
     assert list(nn.parameter_gradients) != []
示例#26
0
def finetune_multimg(expr, model, sess, all_padded_imgs, training_inds):

    s = len(training_inds)
    img_ind_sizes = [len(training_inds[i]) for i in range(s)]
    n = np.sum(img_ind_sizes)
    m = len(all_padded_imgs[0]) - 1
    b = expr.pars['b']
    d3 = expr.pars['patch_shape'][2]

    for t in range(expr.pars['epochs']):
        batch_inds = NN.gen_batch_inds(n, b)

        for i in range(len(batch_inds)):
            """ Preparing the Patches/Labels """

            # batch indices are global indices,
            # extract local indices for each image
            local_inds = patch_utils.global2local_inds(batch_inds[i],
                                                       img_ind_sizes)
            # local indices --> image (voxel) indices
            img_inds = [
                np.array(training_inds[j])[local_inds[j]] for j in range(s)
            ]

            b_patches, b_labels = patch_utils.get_patches_multimg(
                all_padded_imgs, img_inds, expr.pars['patch_shape'],
                expr.train_stats)

            # stitching patches and labels
            b_patches = [
                b_patches[j] for j in range(len(img_inds))
                if len(img_inds[j]) > 0
            ]
            b_patches = np.concatenate(b_patches, axis=0)
            b_labels = [
                b_labels[j] for j in range(len(img_inds))
                if len(img_inds[j]) > 0
            ]
            b_labels = np.concatenate(b_labels)

            # converting to hot-one vectors
            hot_labels = np.zeros((2, len(b_labels)))
            hot_labels[0, b_labels == 0] = 1
            hot_labels[1, b_labels == 1] = 1
            """ Doing an Optimization Iteration """
            # finally we are ready to take
            # optimization step
            sess.run(model.train_step,
                     feed_dict={
                         model.x: b_patches,
                         model.y_: hot_labels,
                         model.keep_prob: model.dropout_rate
                     })
示例#27
0
def NN_result_preview(second=False,
                      image_num=None,
                      blur=False,
                      pred=None,
                      detc=None,
                      pos_fals=False):
    print("Load labels")
    if second:
        data = auxilary.read_csv(fileName='../csv_files/embedded_2.csv')
        D = 22
    else:
        data = auxilary.read_csv(fileName='../csv_files/embedded.csv')
        D = 128
    N = len(data)

    data_inputs = (data.iloc[:, :D])
    inputs = np.zeros([N, D])
    inputs = np.array(data_inputs)

    labels = np.zeros([N, 1])
    labels = np.array(data.iloc[:, D])

    if not second:
        embeddings, face_name, human_file_path = face_recognition.face_recognition(
            dataset_path="../dataset/main_data/*/*",
            preview=True,
            image_num=image_num,
            blur=blur)
    else:
        embeddings, face_name, human_file_path = facial_landmarks.test_preview(
            blur=blur,
            dataset_path="../dataset/main_data/*/*",
            pred=pred,
            detc=detc)

    # identicals, similars = NN_results(embeddings, inputs, labels)
    identicals, similars = NN.predict_input(embeddings, second=second)

    idc_paths, idc_names, sim_paths, sim_names, others_paths, others_names = \
        trim_NN_outputs (labels, face_name, identicals, similars, human_file_path, pos_fals = pos_fals)

    show_tests.buttons(identicalls=idc_paths,
                       id_titles=idc_names,
                       similars=sim_paths,
                       sim_titles=sim_names,
                       left_overs=others_paths,
                       left_titles=others_names,
                       orig_image_path=human_file_path,
                       orig_title=face_name,
                       title1="MATCHING",
                       title2="SIMILARS",
                       title3="OTHERS")
示例#28
0
def finetune(model, sess, expr, padded_imgs, mask, train_inds):
    """Fine-tuning a given model for
    a number of epochs; written mainly
    to be used within querying iterations

    This function basically does same thin
    as `PW_NN.PW_train_epoch_MultiModal`,
    but is little handier and more brief:
    it only uses indices of a single imagel,
    there is no option for saving variables
    in tensorboard, and also the images are
    given as input arguments, hence no need
    to load them separately here
    """

    n = len(train_inds)
    m = len(padded_imgs)
    b = expr.pars['b']
    patch_shape = expr.pars['patch_shape']
    stats = expr.pars['stats']

    for t in range(expr.pars['epochs']):
        # batch-ify the data
        batch_inds = NN.gen_batch_inds(n, b)

        for i in range(len(batch_inds)):
            img_inds = train_inds[batch_inds[i]]
            patches, labels = patch_utils.\
                              get_patches(
                                  padded_imgs,
                                  img_inds,
                                  patch_shape,
                                  True,
                                  mask)
            # hot-one vector for labels
            hot_labels = np.zeros((2, len(labels)))
            hot_labels[0, labels == 0] = 1
            hot_labels[1, labels == 1] = 1

            # normalizing the patches
            for j in range(m):
                patches[:, :, :,
                        j] = (patches[:, :, :, j] - stats[j][0]) / stats[j][1]

            # perform this iteration
            # batch gradient step
            sess.run(model.train_step,
                     feed_dict={
                         model.x: patches,
                         model.y_: hot_labels,
                         model.keep_prob: model.dropout_rate
                     })
def XOR():
    print("\nRunning XOR Neural Network:")

    case_0 = [[[0, 0]], [0]]
    case_1 = [[[1, 0]], [1]]
    case_2 = [[[0, 1]], [1]]
    case_3 = [[[1, 1]], [0]]
    case_base = [case_0, case_1, case_2, case_3]

    nn = NN.NeuralNetwork(layer_sizes=[2, 2, 1], learning_rate=0.1)
    nn.Train(10000, case_base, print_interval=100000)
    nn.Test(add_bias=False)
    nn.PlotError()
示例#30
0
文件: Trainer.py 项目: bauhaus93/nn
 def __init__(self, nn, trainingSet, initialLearningRate,
              learningModification, learnModFrequency, inputCount,
              outputCount):
     self.nn = nn
     self.trainingSet = NN.PrepareTrainingSet(nn.GetDimensions()[1],
                                              trainingSet)
     self.learningRate = float(initialLearningRate)
     self.learningModification = learningModification
     self.learnModFrequency = learnModFrequency
     self.inputCount = inputCount
     self.outputCount = outputCount
     self.cycles = 0
     self.errors = []
示例#31
0
def convertNN(neuralNet, formatString):
    assert formatString.count("-") == (getLabel(neuralNet)[1:]).count(
        "-"), "I can't add/remove a whole layer yet"
    l = formatString.split("-")
    nbHiddenLayer = 0
    nbNeuronPerHiddenLayer = 0
    if len(l) >= 3:
        nbHiddenLayer = len(l) - 2
        nbNeuronPerHiddenLayer = int(l[1])
        compareTo = l[1]
        for i in l[1:-1]:
            assert i == compareTo, "I can't have hidden layers of different sizes"

    targetNbNeuronsLayerWithoutBias = [int(s) for s in formatString.split("-")]
    actualNbNeuronsLayerWithoutBias = [
        int(s) for s in (getLabel(neuralNet)[1:]).split("-")
    ]
    outWeights = copy.deepcopy(neuralNet.weights)
    for k in range(len(neuralNet.weights)):
        diffEntree = targetNbNeuronsLayerWithoutBias[
            k] - actualNbNeuronsLayerWithoutBias[k]
        if diffEntree < 0:  #reduction de l'entree de la matrice
            lastLigne = copy.deepcopy(neuralNet.weights[k][-1, :])
            outWeights[k] = copy.deepcopy(
                neuralNet.weights[k][:targetNbNeuronsLayerWithoutBias[k] + 1])
            outWeights[k][-1, :] = lastLigne
        elif diffEntree > 0:  #augmentation de l'entree de la matrice
            lastLigne = copy.deepcopy(neuralNet.weights[k][-1, :])
            for p in range(len(neuralNet.weights[k][-1, :])):
                neuralNet.weights[k][-1, p] = 0
            outWeights[k] = numpy.pad(
                neuralNet.weights[k], ((0, diffEntree), (0, 0)),
                mode='constant'
            )  # def lignes de 0 ont été rajoutées. Mais on veut garder les valeurs de la dernière ligne (biais)
            (outWeights[k])[-1, :] = lastLigne
        else:
            outWeights[k] = copy.deepcopy(neuralNet.weights[k])

        diffSortie = targetNbNeuronsLayerWithoutBias[
            k + 1] - actualNbNeuronsLayerWithoutBias[k + 1]
        if diffSortie < 0:  #reduction de la sortie de la matrice
            outWeights[k] = copy.deepcopy(
                outWeights[k][:, :targetNbNeuronsLayerWithoutBias[k + 1]])
        elif diffSortie > 0:  #augmentation de la sortie de la matrice
            outWeights[k] = numpy.pad(outWeights[k], ((0, 0), (0, diffSortie)),
                                      mode='constant')

    out = NN.NeuralNetwork(outWeights[0].shape[0] - 1, nbHiddenLayer,
                           nbNeuronPerHiddenLayer, outWeights[-1].shape[1])
    out.setWeight(copy.deepcopy(outWeights))
    return out
示例#32
0
def main():
    genre_type.find_genre(
    )  #creating the genre types column iterating through each row
    content_rating.find_content_type(
    )  #creating the content rating type column iterating through each row
    readCSV.read_data()  #creating feature columns by iteartong row in CSV
    normalize.normalize_data(readCSV.numberCritics, readCSV.duration,
                             readCSV.directorFBLikes, readCSV.actor3FBLikes,
                             readCSV.actor2FBLikes, readCSV.actor1FBLikes,
                             readCSV.numberVotedUsers, readCSV.castFBLikes,
                             readCSV.numberUserReviews, readCSV.budget,
                             readCSV.imdbScore, readCSV.movieFBLikes)
    Dataset.get_dataset(
        readCSV.numberCritics, readCSV.duration, readCSV.directorFBLikes,
        readCSV.actor3FBLikes, readCSV.actor2FBLikes, readCSV.actor1FBLikes,
        readCSV.numberVotedUsers, readCSV.castFBLikes,
        readCSV.numberUserReviews, readCSV.budget, readCSV.imdbScore,
        readCSV.movieFBLikes, genre_type.action_type,
        genre_type.adventure_type, genre_type.fantasy_type,
        genre_type.scifi_type, genre_type.thriller_type,
        genre_type.comedy_type, genre_type.family_type, genre_type.horror_type,
        genre_type.war_type, genre_type.animation_type,
        genre_type.western_type, genre_type.romance_type,
        genre_type.musical_type, genre_type.documentary_type,
        genre_type.drama_type, genre_type.history_type,
        genre_type.biography_type, genre_type.mystery_type,
        genre_type.crime_type, content_rating.general_type,
        content_rating.parental_type, content_rating.parentStrong_type,
        content_rating.restrict_type, content_rating.adults_type)
    train = Dataset.Data[:3539]  #training data as 70%
    test = Dataset.Data[3539:]  #test data as 30%
    trainLabel = readCSV.gross[:3539]
    testLabel = readCSV.gross[3539:]
    applySVM()
    applyNB()
    applyLogRegression(train, test, trainLabel, testLabel)
    applyLinearRegression(train, test, trainLabel, testLabel)
    NN.apply_NN(Dataset.Data, readCSV.gross)
示例#33
0
def evaluate(NNWorking, population, inputs, outputs):
    '''Tests each citizen in the population against a NN topology with inputs
    and outputs to generate an cumulitive fitness measurement, which should be
    minimized'''
    for citizen in population:
        citizen[-1] = 0
        NNWorking.SetNNWeights(citizen[:-1])  # Load weights into the NN
        for i in range(len(inputs)):
            NNWorking.SetStartingNodesValues(inputs[i])  # Load inputs into NN
            NNWorking.CalculateNNOutputs()  # Run the NN once
            # Calculate the fitness value and let the citizen track it
            # for j in range(len(NN.GetNNResults())):
            #    citizen[-1] += ((outputs[i][j] - NN.GetNNResults()[j]))**2
            citizen[-1] += NN.calcRelativeError(NNWorking,
                                                inputs, outputs) / len(inputs)
示例#34
0
def start_thread(inp, activation, out_activ, outp, learn, thresh, mmntm, logger):
    global count
    training_inputs = []
    training_data = []
    count += 1
    
    testNN = NN.main(inp, activation, out_activ, outp, learn, thresh, mmntm)
    print ("DONE TRAINING")
    for i in inp:
        for j in i:
            training_inputs.append(random.randint(0,4)) #create random inputs for testing
        training_data.append(training_inputs)
        training_inputs = []
    for x in training_data:
        testNN.SetStartingNodesValues(x)
        testNN.CalculateNNOutputs()
        logger.info(str(x))
        logger.info(testNN.GetNNResults())
示例#35
0
文件: main_NN.py 项目: Glasssix/Notes
#################################################  
# Title  : Male or Female?
# Method : Back-Propagation Neural Networks
# Author : yang xiaolong  
#################################################  


from NN import *
from autonorm import* 
import time  
  
startTime = time.time()   
# create a network with two input, one hidden, and one output nodes
n = NN(2, 1, 1)
opts = {'iterations': 50, 'learning rate': 0.25, 'momentum factor': 0.1} 
def loadData():  
    te=[];te_x=[];te_y=[]
    fileIn = open('data.txt')  
    for line in fileIn.readlines():  
        lineArr = line.strip().split()  
        te_x.append([float(lineArr[0]),float(lineArr[1])])
        te_y.append([float(lineArr[2])])
        te.append([[float(lineArr[0]),float(lineArr[1])],[float(lineArr[2])]])
    return te_x,te_y,te

tex,tey,te= loadData()
tex1=AutoNorm(tex)
tey1=tey
te1=[]
for i in range(len(tex1)):
    te1.append([tex1[i],tey1[i]])
示例#36
0
def train(inputs, outputs, size, generations, threshold, cRate, mRate, printFile=False):
    """The train method creates a neural netwrok from the sets of 
    inputs and outputs. A population vector of size, is initialized 
    with ranodm weight vectors associated with the weights between 
    nodes in the neural network and will be the values being trained.
    Generations is the max number of generations allowed while 
    threshold is the accuracy needed. cRate and mRate are the 
    crossover and mutation rates respectively."""
    global hero
    global OrigAnswers

    OrigAnswers = copy.deepcopy(outputs)
    # set up NN
    EvaluationNN = GA.create_net(inputs, outputs)

    # initialize population of size as random weights of NN
    population = GA.generatePopulation(EvaluationNN, inputs, outputs, size)

    if printFile:
        f = open("DE.csv", "w")
    gen = 0
    trialV = []
    offspringV = []

    # evaluate the entire population
    GA.evaluate(EvaluationNN, population, inputs, outputs)

    # loop until a hero is found or we've reached max generations
    while gen <= generations and hero == 0:
        for i in range(size):
            # mutate with DE/x/1/bin
            trialV = mutate(population, i, mRate)
            # perform binomial crossover
            offspringV = crossover(population[i], trialV, cRate)
            # evaluation of offspring
            GA.evaluate(EvaluationNN, [offspringV], inputs, outputs)
            # selection of better vector
            if population[i][-1] > offspringV[-1]:
                population[i] = offspringV
        population = sorted(population, key=itemgetter(-1))
        # check for hero in population
        if GA.heroFound(population, threshold):
            break
        else:
            print("Training: {:2.2%}".format(population[0][-1]), "{:2.2%}     ".format(gen / generations), end="\r")
            if printFile:
                f.write("%f," % population[0][-1])
            if printFile:
                f.write("\n")
        gen += 1
    # return best hero if max generations is met and hero hasn't been selected.
    # hero = sorted(population, key=itemgetter(-1))[0]  # default to best in
    # population if no hero steps forward
    if printFile:
        f.close()
    if hero == 0:
        gen -= 1
        hero = sorted(population, key=itemgetter(-1))[0]
    EvaluationNN.SetNNWeights(hero[:-1])  # Load hero into NN, prep for usage.

    # Evaluate the hero on the inputs and outputs
    print("Generations: %d" % gen, " " * 20)
    print("Error Relative: {:2.5%}".format(NN.calcRelativeError(EvaluationNN, inputs, OrigAnswers)))
    print("Least Squares: %d" % NN.calcLeastSquaresError(EvaluationNN, inputs, OrigAnswers))
    print("Loss Squared: %d" % NN.calcLossSquared(EvaluationNN, inputs, OrigAnswers))
    # for x in inputs:
    #    EvaluationNN.SetStartingNodesValues(x)
    #    EvaluationNN.CalculateNNOutputs()
    #    print(x, EvaluationNN.GetNNResults(), EvaluationNN.GetNNResultsInt(), OrigAnswers[inputs.index(x)])
    print()

    return EvaluationNN
示例#37
0
trainingdatafile = open('newdata.txt', 'r')

inputdata= readRecords(trainingdatafile)
datasize = len(inputdata)

# Randomly splits the file in 80-20 split for test and train data
trainingbatch , testbatch = train_test_split(inputdata, test_size=0.2)

# Values for the no of hidden, input and output neurons
numHiddenCells = 64
numInputCells = 9
numOutputCells = len(inputdata[0]) - numInputCells


nn = NN.neuralNet(numInputCells, numHiddenCells, numOutputCells)

errTrain = nn.train(trainingbatch)
errPred = 0

for test in testbatch:
	output = nn.predict(test[:numInputCells])
	#print output, np.argmax(test[numInputCells:]) + 1
	if (output != np.argmax(test[numInputCells:]) + 1) :
		errPred =  errPred + 1

print errTrain, len(trainingbatch)
print errPred, len(testbatch)


示例#38
0
                #interneuron
                NN.Matrix([5,1],[5,1],sigmaR=sigmaR),
                NN.Addition([5,1],sigmaR=sigmaR),
                NN.ComponentwiseFunction(),
                # interneuron 2
                NN.Matrix([5,1],[5,1],sigmaR=sigmaR),
                NN.Addition([5,1],sigmaR=sigmaR),
                NN.ComponentwiseFunction(),
                # output
                #Matrix([5,1],[2,1],sigmaR=sigmaR),
                #Addition([2,1],sigmaR=sigmaR) 
                NN.Matrix([5,1],[3,1],sigmaR=sigmaR),
                NN.Addition([3,1],sigmaR=sigmaR) 
                ])
# a nice simple interface
nn = NN.makeStandardNeuralNet(inputDim=2,outputDim=3,interDim=20,nInter=5,sigmaR=sigmaR)
# simple training set in 2D
n = 100
x = np.zeros([2,1,n])
z = np.zeros([2,1,n])
for i in range(n):
    off = np.random.rand() > 0.5
    x[:,:,i] = np.random.randn(2,1) + off*3
    z[0,:,i] = float(off)
    z[1,:,i] = 1.0 - float(off)
    
n = 200
x = np.zeros([2,1,n])
z = np.zeros([3,1,n])
for i in range(n):
    category = np.random.randint(3)
示例#39
0
    hAx.imshow(I,interpolation='none')
    #raise Exception
    plt.pause(0.01)
    I.shape=I.shape+(1,)
    X[:,:,:,:,i] = I.astype(np.double)/255.0
    # gender
    if f.find('-f')>0:
        Z[0,0,i] = 1.0
        Z[1,0,i] = 0.0
    else:
        Z[0,0,i] = 0.0
        Z[1,0,i] = 1.0

    
nCopies = 4
nn = NN.makeStandardConvolutionalNeuralNet(inputShape=I.shape,outputDim=2,nCopies=nCopies,sigmaR=10.0)
nn.setTrainingData(X,Z)
layersToDraw = [l for l in nn if type(l) == type(NN.ComponentwiseFunction())]
nPlots = len(layersToDraw)
hFig.clf()

hFig2 = plt.figure()


nIter = 10000
nPerDraw = 1
nRepeats = nIter/nPerDraw
epsilon = 0.0001
epsilon = 0.00001
L = 1.0e10
for repeat in range(nRepeats):    
示例#40
0
if __name__ == '__main__':
    print "Part 1: Loading Data\n"

    X, y = utils.loadData(conf.FILE_X, conf.FILE_Y)

    print "Part 2: Loading Parameters\n"

    W1, W2 = utils.loadParams(conf.FILE_W1, conf.FILE_W2)
    # Unroll parameters
    W = np.hstack((W1.flatten(0), W2.flatten(0)))
    W = W.reshape((len(W), 1))

    print "Part 3: Compute Cost(Feedforward)\n"

    LEARN_RATE = 0
    J, _ = NN.nnCostFunction(W, conf.INPUT_LAYER_SIZE, conf.HIDDEN_LAYER_SIZE,
                             conf.NUM_LABELS, X, y, LEARN_RATE)
    print ("Cost at parameters (loaded from w1.txt and w2.txt): %f"
           "\n(this value should be about 0.287629)\n") % J

    print "Part 4: Implement Regularization\n"

    LEARN_RATE = 1
    J, _ = NN.nnCostFunction(W, conf.INPUT_LAYER_SIZE, conf.HIDDEN_LAYER_SIZE,
                             conf.NUM_LABELS, X, y, LEARN_RATE)
    print ("Cost at parameters (loaded from w1.txt and w2.txt): %f"
           "\n(this value should be about 0.383770)\n") % J

    print "Part 5: Sigmoid Gradient\n"

    g = Sigmoid.dy_dz(Sigmoid.y(np.array([-1, -0.5, 0, 0.5, 1])))
    print "Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:", g
示例#41
0
def func(x,*args):
    """evaluate function L=sum_{samples}[E(pmd)-E(ref)]^2.
    
    This will be called from scipy.optimize.fmin_cg().
    The 1st argument x should be 1-D array of variables.
    """
    global _valmin
    
    t0= time.time()
    #.....write parameters to in.params.????? file
    dir= args[0]

    if fmethod in ('test','TEST','check_grad') or \
       not potential in ('linreg','NN'):
        #.....store original file
        os.system('cp '+dir+'/'+parfile+' '+dir+'/'+parfile+'.tmp')
        write_params(dir+'/'+parfile,x)
        #.....run smd in all sample directories
        os.chdir(dir)
        #print os.getcwd(),dir
        if runmode in ('serial','Serial','SERIAL','sequential','single'):
            os.system('./serial_run_smd.sh '+parfile)
        elif runmode in ('parallel','Parallel','PARALLEL'):
            os.system('python ./parallel_run_smd.py '+parfile)
        else:
            print "{0:*>20}: no such run_mode !!!".format(' Error', runmode)
            exit()
        os.chdir(cwd)
        #.....restore original file
        os.system('cp '+dir+'/'+parfile+' '+dir+'/'+parfile+'.current')
        os.system('cp '+dir+'/'+parfile+'.tmp'+' '+dir+'/'+parfile)
        #.....gather smd results
        ergs,frcs=gather_smd_data(dir)
    elif potential in ('linreg'):
        #.....calc ergs and frcs from bases data and x (variables)
        read_bases(dir)
        ergs,frcs=calc_ef_from_bases(x,*args)
    elif potential in ('NN'):
        #.....now it is possible to compute only from bases
        ergs,frcs= NN.calc_ef_from_bases(x,*args)

    #.....calc function value of L
    val= eval_L(ergs,frcs,ergrefs,frcrefs,samples)
    #.....output temporal results
    output_energy_relation(ergs,ergrefs,samples,sample_dirs, \
                               fname='out.erg.pmd-vs-dft.tmp')
    output_force_relation(frcs,frcrefs,samples,sample_dirs, \
                              fname='out.frc.pmd-vs-dft.tmp')

    print
    print ' L value=',val

    if penalty in ('ridge','Ridge','RIDGE') and potential in ('linreg'):
        p= 0.0
        lx= len(x)
        for n in range(lx):
            p += math.sqrt(x[n]**2)
        print ' penalty value=',p*pweight
        val += p*pweight
        print ' total L value=',val

    elif penalty in ('lasso','LASSO') and potential in ('linreg'):
        p= 0.0
        lx= len(x)
        for n in range(lx):
            p += abs(x[n])
        print ' penalty value=',p*pweight
        val += p*pweight
        print ' total L value=',val
    sys.stdout.flush()

    #.....if L value is minimum ever, store this parameter file
    if val < _valmin:
        _valmin= val
        if potential in ('linreg','NN'):
            write_params(dir+'/'+parfile+'.min',x)
        else:
            os.system('cp '+dir+'/'+parfile+'.current' \
                          +' '+dir+'/'+parfile+'.min')
        
    print ' ===> time func: {0:12.3f} sec'.format(time.time()-t0) \
          +', {0:12.3f} sec'.format(time.time()-_init_time)
    return val
示例#42
0
    sample_dirs.sort()
    if nsmpl != len(sample_dirs):
        print '{0:*>20}: num_samples in in.fitpot is wrong.'.format(' Error')
        exit()
    read_pos()

    #.....initial data
    gather_ref_data(maindir)
    #.....read bases data if needed
    if potential in ('linreg') and not fmethod in ('test','TEST'):
        read_bases(maindir)
        if regularize:
            vars= scale_vars(vars,bmax)
    elif potential in ('NN') and not fmethod in ('test','TEST'):
        NN.init(maindir,params,sample_dirs,samples,nprcs,fmatch \
                ,ergrefs,frcrefs,fmethod,parfile,runmode,rcut,pranges \
                ,vranges)

    #.....1st call of func
    func(vars,maindir)
    if potential in ('linreg') and not fmethod in ('test','TEST'):
        ergs,frcs= calc_ef_from_bases(vars,maindir)
    elif potential in ('NN') and not fmethod in ('test','TEST'):
        ergs,frcs= NN.calc_ef_from_bases(vars)
    else:
        ergs,frcs= gather_smd_data(maindir)

    if fmethod in ('test','TEST') and potential in ('NN'):
        NN.init(maindir,params,sample_dirs,samples,nprcs,fmatch \
                ,ergrefs,frcrefs,fmethod,parfile,runmode \
                ,rcut,pranges,vranges)
示例#43
0
def train(inputs, outputs, size, participants, victors,
          generations, threshold, cRate, mRate, printFile=False):
    '''The train method takes in a set of inputs and outputs which will be
    compared against a hardcoded NN topology. The size, participants, and
    victors are with regard to tournament selection and elitism selection
    techniques. Generations is the max number of generations allowed while
    threshold is the accuracy needed. cRate and mRate are the rate of
    crossover and rate of mutation respectively. '''
    global hero
    global OrigAnswers
    EvaluationNN = create_net(inputs, outputs)
    population = generatePopulation(EvaluationNN, inputs, outputs, size)
    # Test each citizen and determine initial fitness
    evaluate(EvaluationNN, population, inputs, outputs)
    if printFile: f = open('GA.csv', 'w')
    gen = 0
    children = []
    # loop until a hero is found or we've reached max generations
    while gen <= generations and hero == 0:
        # Select our parents using tournament selection
        parents = tournament(population, participants, victors)
        # Have our parents mate (Crossover)
        children = mate(parents, cRate)
        # Have the children experience the world (Mutate)
        for child in children:
            mutate(child, mRate)
        # Test each child's fitness
        evaluate(EvaluationNN, children, inputs, outputs)
        # We were to prolific, thus children must fight to the death via draft
        # call. Make participants len(children) to have all of them fight
        # This might not be a good idea as late generation counts result in not
        # keeping the children.
        children = tournament(children, participants, victors)
        # purging of population is determined by elitism inverted on fitness
        # level (cowardace is greater number).
        # Take number of children equal to number of tournament victors and
        # reintroduce to the population
        population = sorted(population + children,
                            key=itemgetter(-1))[:-victors]
        # Determine if a child is a hero (<threshold) and if so, return child
        if heroFound(population, threshold):
            break
        else:
            print("Training: {:2.2%}".format(
                population[0][-1]), "{:2.2%}     ".format(gen / generations), end="\r")
            if printFile: f.write('%f,' % population[0][-1])
            if printFile: f.write('\n')
        gen += 1
    # return best hero if max generations is met and hero hasn't been selected.
    if printFile: f.close()
    if hero == 0:
        gen -= 1
        hero = sorted(population, key=itemgetter(-1))[0]
    EvaluationNN.SetNNWeights(hero[:-1])  # Load hero into NN, prep for usage.

    # Evaluate the hero on the inputs and outputs
    print('Generations: %d' % gen, ' ' * 20)
    print("Error Relative: {:2.5%}".format(NN.calcRelativeError(EvaluationNN, inputs, OrigAnswers)))
    print("Least Squares: %d" % NN.calcLeastSquaresError(EvaluationNN, inputs, OrigAnswers))
    print("Loss Squared: %d" % NN.calcLossSquared(EvaluationNN, inputs, OrigAnswers))
    #for x in inputs:
    #    EvaluationNN.SetStartingNodesValues(x)
    #    EvaluationNN.CalculateNNOutputs()
    #    print(x, EvaluationNN.GetNNResults(), EvaluationNN.GetNNResultsInt(), OrigAnswers[inputs.index(x)])
    print()

    return EvaluationNN
示例#44
0
 def costFunc(p):
     return NN.nnCostFunction(p, conf.INPUT_LAYER_SIZE,
                              conf.HIDDEN_LAYER_SIZE, conf.NUM_LABELS,
                              X, y, conf.PART8_LEARN_RATE)
示例#45
0
import weather
import NN as nn 
get_pm5_prediction = nn.setup()
# SAMPLE usage, Delete before using it as library
def predictPollution(precipitation_prob, relative_humidity, temp, wind_direction, wind_speed):
    O3PreictedVal = 5.03704930e+01 + (precipitation_prob * 9.66895471e-02) + (relative_humidity * -2.99780572e-03) + (temp * -2.26017118e-01) + (wind_direction * -8.96663780e-03) + (wind_speed *  9.98339351e+00)
    PM25PredictedVal = 1.36006991e+01 +  (temp * -9.32461073e-02)  +   (wind_direction * -3.35510810e-04) +   (wind_speed * -7.50369156e-01)
    nnPrediction = get_pm5_prediction(TMP = temp,WDIR = wind_direction,WSPD = wind_speed)
    #3.6 is average
    if abs(nnPrediction - PM25PredictedVal) > 7.2:
        if abs(nnPrediction - 3.6) > abs(PM25PredictedVal - 3.6):
            return O3PreictedVal, PM25PredictedVal
        else:
            return O3PreictedVal, nnPrediction
    return O3PreictedVal, (nnPrediction + PM25PredictedVal)/2
        
    
def pollutionAPi(lat, lon, offset):
    return predictPollution(*weather.get_weather(lat, lon, offset))

#offset -> [0,2], 0 means now, 1 means one hour from now, and 2 means 2 hour from now
print pollutionAPi(51.0123, 0.3, 0)
示例#46
0
    return tmp

'''
Convert neural network data to game situation.
'''
def NN2board(li):
    tmp = ["X" if x==1 else ("O" if x==-1 else " ") for x in li]
    tmp = np.array(tmp)
    tmp.shape = (7,6)
    tmp = tmp.T
    return tmp

################################################################

try:
    nn = NN.openNN()
    print "NN successfully opened"

except:
    print "not able to open NN"
    print "create new one"

    inp = []
    out = []

    fobj = open("database.txt")
    counter=0

    for line in fobj:
        #print line.rstrip()
        #displayStr(line.rstrip())
示例#47
0
def train_test():
    global cRate, mRate, threshold, generations, size, participants, victors, inFile, algo, dataset, resultsFile
    inputs = []
    outputs = []
    evolve()
    
    resultsFile.write("DATASET: " + dataset + "\n")
    #resultsFile.write("ALGORITHM | Generations | Size | Participants | Victors | mRate | cRate | Threshold \n")
    #resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " +
    #          str(size) + "  |     " + str(participants) + "       |    " + str(victors) + 
    #          "    |  " + str(mRate) + "  |  " + str(cRate) + "  |   " + str(threshold) + "     \n")

    dataIn = dataHandler()
    inputs = dataIn[0]
    outputs = dataIn[1]
    testInput = []
    testOutput = []
    learnrate = 0.3
    momentum = 0.5
    # Need 20% of inputs for testing
    for i in range((int(len(inputs)*0.8)+1), len(inputs)):
        x = random.choice(inputs)
        testInput.append(x)
        testOutput.append(outputs[inputs.index(x)])
        del outputs[inputs.index(x)]
        del inputs[inputs.index(x)]
    resultsFile.write("\nTest inputs: \n")
    for i in range(len(testInput)):
        resultsFile.write("%s " % testInput[i])
    resultsFile.write("\nTest expected outputs: \n")
    for i in range(len(testOutput)):
        resultsFile.write("%s " % testOutput[i])
    # Which algorithm gets chosen to run
    if algo in 'G':
        print("DOING GA TRAINING...")
        resultsFile.write("\nALGORITHM | Generations | Size | Participants | Victors | mRate | cRate | Threshold \n")
        resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " + str(size) + "  |     " + str(participants) + "       |    " + str(victors) + "    |  " + str(mRate) + "  |  " + str(cRate) + "  |   " + str(threshold) + "     \n")
        testNN = GA.train(inputs, outputs, size, participants, victors, generations, threshold, cRate, mRate)
    elif algo in 'E':
        print("DOING ES TRAINING...")
        resultsFile.write("\nALGORITHM | Generations | Size | Participants | Victors | mRate | cRate | Threshold \n")
        resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " + str(size) + "  |     " + str(participants) + "       |    " + str(victors) + "    |  " + str(mRate) + "  |  " + str(cRate) + "  |   " + str(threshold) + "     \n")
        testNN = ES.train(inputs, outputs, size, participants, victors, generations, threshold, cRate, mRate)
    elif algo in 'D':
        print("DOING DE TRAINING...")
        resultsFile.write("\nALGORITHM | Generations | Size | mRate | cRate | Threshold \n")
        resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " +  str(size) + "    |  " + str(mRate) + "  |  " + str(cRate) + "  |   " + str(threshold) + "     \n")
        testNN = DE.train(inputs, outputs, size, generations, threshold, cRate, mRate)
    elif algo in 'B':
        print("DOING BP TRAINING...")
        resultsFile.write("\nALGORITHM | Generations | learnrate | momentum | Threshold \n")
        resultsFile.write("   " + str(algo) + "      |     " + str(generations) + "      |  " + str(learnrate) + "  |  " + str(momentum) + "  |   " + str(threshold) + "     \n")
        testNN = NN.main(inputs, [['S','S','S'], ['S','S']], ['S'], outputs, generations, learnrate, threshold, momentum)
    else:
        print("Unrecognized algorithm!")
        sys.exit()
    # Print test input/expected output - could be made prettier in a table
    # Start testing testNN
    for x in testInput:
        resultsFile.write("\nSet starting node vals\n")
        resultsFile.write("%s \n" % testNN.SetStartingNodesValues(x))
        testNN.CalculateNNOutputs()
        resultsFile.write("\nTest Input: " + str(x) + "\n")
        resultsFile.write("\nTest results: %s\n" % testNN.GetNNResults())
    resultsFile.write("\nRelative Error: {:2.2%} \n".format(NN.calcRelativeError(testNN, testInput, testOutput)))
    resultsFile.write("\nLeast Squares Error: %s \n" % NN.calcLeastSquaresError(testNN, testInput, testOutput))
    resultsFile.write("\nLoss Squared Error: %s \n" % NN.calcLossSquared(testNN, testInput, testOutput))
    resultsFile.write("\nPercent Misidentified: {:2.2%} \n".format(NN.calcPercentIncorrect(testNN, testInput, testOutput)))
    resultsFile.close()
示例#48
0
  print('Attempting to open {}.'.format(filename))
  im = readImage(filename)
  if (max(im.size) > 600):
    im = im.resize((int(600*(float(im.size[0])/max(im.size))), int(600*(float(im.size[1])/max(im.size)))))
  print('Opened.')

  print('Attempting to stipple...')
  cellSize = 2

  # Create a stippled version of the image; limit 6000 px.
  lst = stipple(im, cellSize, 0)
  while (len(lst) > 6000):
    cellSize += 1
    lst = stipple(im, cellSize, 0)
  lst = stipple(im, cellSize, 8)
  print('There are {} points.'.format(len(lst)))
  print('Stippled!')

  print('Attempting TSP with naive NN...')
  lst = NN.tsp(lst)

  print('Now converting to list of segments...')
  segSet = createSegSet(lst)

  ## Let's make sure all of our segments share a point...
  print('Correcting any overlaps...')
  drawSegSet(segSet, im.size, 'start.jpg')
  segSet = correct(segSet, im)
  drawSegSet(segSet, im.size, 'end.jpg')
  print('Done.')