Пример #1
0
def read_landsat():
    array = []
    f = open("database/landsat.txt", "r")
    for line in f.read().splitlines():
        lista = line.split(" ")
        classe = lista.pop(len(lista) - 1)
        parameters = [float(i) for i in lista]
        array.append(Sample(parameters, classe))
    bd = Dataset("landsat", array)
    return bd
Пример #2
0
def read_database(name, index):
    array = []
    f = open("database/" + name + ".txt", "r")
    for line in f.read().splitlines():
        lista = line.split(",")
        classe = lista.pop(index)
        parameters = [float(i) for i in lista]
        array.append(Sample(parameters, classe))
    bd = Dataset(name, array)
    return bd
Пример #3
0
def read_yeast():
    array = []
    f = open("database/yeast.data", "r")
    for line in f.read().splitlines():
        lista = line.split()
        classe = lista.pop(len(lista) - 1)
        lista.pop(0)
        parameters = [float(i) for i in lista]
        array.append(Sample(parameters, classe))
    bd = Dataset("yeast", array)
    return bd
Пример #4
0
def read_xls_database(name):
    df = pd.read_excel("database/" + name + ".xls")
    array = []
    nparray = df[1:].as_matrix()
    for k in nparray:
        infos = k
        classe = infos[-1]
        parameters = [float(i) for i in infos[:-1]]
        array.append(Sample(parameters, classe))
    data_base = Dataset(name, array)
    return data_base
Пример #5
0
 def cluster_centers_pcm(self, numero_de_centros, vizinhos):
     parameters = np.transpose(
         np.array([k.parameters for k in self.initial_dataset.samples]))
     cntr, u, u0, d, jm, p, fpc = fuzzy.cluster.cmeans(
         parameters, numero_de_centros, 2.0, 0.005, 2000)
     new_samples = []
     local_dataset = self.dataset
     for k in cntr:
         amostras = self.get_k1_closest(k, vizinhos)
         for sample_pega in amostras:
             new_samples.append(sample_pega)
     self.clustered_dataset = Dataset("Clustered", new_samples)
Пример #6
0
# Add the cost and accuracy to summary
tf.summary.scalar('loss', cost)
tf.summary.scalar('accuracy', accuracy)

# Merge all summaries together
merged_summary = tf.summary.merge_all()

path = "../dqn" #The path to save our model to.

num_epochs = 100
batch_size=32

saver = tf.train.Saver()

with tf.Session() as sess:
    dset=Dataset()
    image_count=dset.getNumExamples()
    # Initialize all variables
    sess.run(tf.global_variables_initializer())
    print('Loading Model...')
    ckpt = tf.train.get_checkpoint_state(path)
    saver.restore(sess,ckpt.model_checkpoint_path)
    # Loop over number of epochs
    for epoch in range(num_epochs):
        
        start_time = time.time()
        train_accuracy = 0
        
        for batch in range(0, int(image_count/batch_size)):
            # Run the optimizer using this batch of training data.
            dataBatch=dset.next_batch(batch_size)
Пример #7
0
def split_train_test(dataset, i):
    test = dataset[i]
    train_dataset = Dataset(
        "Treino", array_of_samples_to_db([dataset[:i] + dataset[i + 1:]]))
    test_dataset = Dataset("Teste", test)
    return test_dataset, train_dataset
Пример #8
0
    else:
        print("Loading configuration from file %s" % inputfile)
        print()

        inputParser = InputParser(inputfile)
        dimensions = inputParser.get_dimensions()
        clusters = inputParser.get_clusters()
        export_name = inputParser.get_export_name()

        print("Dimensions = %s" % dimensions)
        print("Cluster count = %s" % len(clusters))
        for cluster in clusters:
            print(" - %s" % cluster)
        print()

        dataset = Dataset(dimensions, clusters, export_name)
        print("Generating random values for clusters...")
        dataset.generate_values()
        print("Balancing clusters... (this may take a while)")
        dataset.balance_clusters()
        print()

        filewriter = FileWriter(dataset)
        print("Writing SOMToolbox files...")
        filewriter.export_for_somtoolbox()

        if 1 <= dimensions <= 2 and not quiet_mode:
            print()
            print("Input has <= 2 dimensions - showing plot")
            plotter = Plotter(dataset)
            plotter.plot()
Пример #9
0
                # using teacher forcing
                dec_input = tf.expand_dims(targ[:, t], 1)

        batch_loss = (loss / int(targ.shape[1]))

        variables = encoder.trainable_variables + decoder.trainable_variables

        gradients = tape.gradient(loss, variables)

        optimizer.apply_gradients(zip(gradients, variables))

        return batch_loss

    EPOCHS = 5
    steps_per_epoch = 5
    dataset = Dataset()

    for epoch in range(EPOCHS):
        start = time.time()

        enc_hidden = encoder.initialize_hidden_state()
        enc_cell = encoder.initialize_cell_state()
        total_loss = 0

        for (batch, (inp, targ)) in enumerate(dataset(steps_per_epoch)):
            batch_loss = train_step(inp, targ, enc_hidden, enc_cell)
            total_loss += batch_loss

            if batch % 1 == 0:
                print('Epoch {} Batch {} Loss {:.4f}'.format(
                    epoch + 1, batch, batch_loss.numpy()))