コード例 #1
0
def get_act_map(x, y, batch_size, sess, model, w_plus):
    """
    Computes the class activation maps
    :param x: input images of size (#imgs, args.img_h, args.img_w, 1)
    :param y: corresponding labels of size (#imgs, args.n_cls)
    :param batch_size: batch size
    :param sess: Tensorflow session
    :param model: Full network model
    :param w_plus: weights for the positive class (to be used in the loss function)
    :return: array of class activation maps, (#imgs, 64, args.n_cls)
    """
    model.is_train = False
    x, y = randomize(x, y)
    step_count = int(len(x) / batch_size)
    all_features = np.zeros((0, 64, args.n_cls))
    for step in range(step_count):
        start = step * batch_size
        end = (step + 1) * batch_size
        x_batch, y_batch = get_next_batch(x, y, start, end)
        feed_dict_batch = {
            model.x: x_batch,
            model.y: y_batch,
            model.w_plus: w_plus
        }
        features = sess.run(model.get_cls_act_map, feed_dict=feed_dict_batch)
        all_features = np.concatenate((all_features, features), axis=0)
    return all_features
コード例 #2
0
def train(in_file):
    xvals, yvals = utils.load_data(in_file)
    xvals, yvals = utils.randomize(xvals, yvals)
    network = build_network()
    model = tflearn.DNN(network)
    model.fit(xvals, yvals, n_epoch=200, validation_set=0.2)
    model.save('circle.tflearn')
コード例 #3
0
def train(in_file):
    xvals, yvals = utils.load_hot(in_file)
    xvals, yvals = utils.randomize(xvals, yvals)

    network = build_network()
    model = tflearn.DNN(network)
    model.fit(xvals, yvals, n_epoch=400, validation_set=0.2, show_metric=True)
    model.save('complicated.tflearn')
コード例 #4
0
    def __init__(self, sizex, sizey, sizez, pointsLen, stokLen, stokR):
        self.vertex = [sizex, sizey, sizez]
        self.stoks = []

        self.stoks = [
            Stok(
                randomize(sizex, sizey, sizez, self.is_normal_positioned_stok),
                stokR) for _ in range(0, stokLen)
        ]

        self.points = [
            Point(randomize(sizex, sizey, sizex, self.is_normal_positioned))
            for _ in range(0, pointsLen)
        ]
        print(self.stoks)
        print(self.points)

        self.stok_pos = [sizex / 2, sizey / 2, sizez / 2]
        self.R = stokR
コード例 #5
0
ファイル: tester_icf.py プロジェクト: mataevs/persondetector
def train(classifier_out_name, noInitialFeatures, noWantedFeatures, noEstimators):
    posImages = utils.getFullImages(
        "/home/mataevs/ptz/INRIAPerson/train/pos",
        "/home/mataevs/ptz/positive")

    posImages = utils.randomize(posImages)

    negImages = utils.getFullImages(
        "/home/mataevs/ptz/INRIAPerson/train/neg",
        "/home/mataevs/ptz/negative")
    negImages = utils.randomize(negImages)

    print len(posImages)
    print len(negImages)

    c = Classifier()
    print "Starting training"
    c.train(posImages, negImages, initialFeatures=noInitialFeatures, wantedFeatures=noWantedFeatures, noEstimators=noEstimators)
    print "Finished training"
    c.saveClassifier(classifier_out_name)
コード例 #6
0
def compute_features(x,
                     y,
                     batch_size,
                     sess,
                     model,
                     w_plus,
                     after_pooling=True):
    """
    Feeds the data to the network and extracts the features from the last layers
    :param x: input images of size (#imgs, args.img_h, args.img_w, 1)
    :param y: corresponding labels of size (#imgs, args.n_cls)
    :param batch_size: batch size
    :param sess: Tensorflow session
    :param model: Full network model
    :param w_plus: weights for the positive class (to be used in the loss function)
    :param after_pooling: boolean.
    If True, computes the features after the final global average pooling layer (#imgs, 2048)
    Else, computes the features before the pooling, (#imgs, 8, 8, 2048)
    :return:
    """
    model.is_train = False
    x, y = randomize(x, y)
    step_count = int(len(x) / batch_size)
    if after_pooling:
        all_features = np.zeros((0, 2048))
        for step in range(step_count):
            start = step * batch_size
            end = (step + 1) * batch_size
            x_batch, y_batch = get_next_batch(x, y, start, end)
            feed_dict_batch = {
                model.x: x_batch,
                model.y: y_batch,
                model.w_plus: w_plus
            }
            features = sess.run(model.get_features, feed_dict=feed_dict_batch)
            all_features = np.concatenate((all_features, features), axis=0)
        return all_features
    else:
        all_features = np.zeros((0, 8, 8, 2048))
        for step in range(step_count):
            start = step * batch_size
            end = (step + 1) * batch_size
            x_batch, y_batch = get_next_batch(x, y, start, end)
            feed_dict_batch = {
                model.x: x_batch,
                model.y: y_batch,
                model.w_plus: w_plus
            }
            features = sess.run(model.get_vol_features,
                                feed_dict=feed_dict_batch)
            all_features = np.concatenate((all_features, features), axis=0)
        return all_features
コード例 #7
0
def train(classifier_out_name, noInitialFeatures, noWantedFeatures,
          noEstimators):
    posImages = utils.getFullImages("/home/mataevs/ptz/INRIAPerson/train/pos",
                                    "/home/mataevs/ptz/positive")

    posImages = utils.randomize(posImages)

    negImages = utils.getFullImages("/home/mataevs/ptz/INRIAPerson/train/neg",
                                    "/home/mataevs/ptz/negative")
    negImages = utils.randomize(negImages)

    print len(posImages)
    print len(negImages)

    c = Classifier()
    print "Starting training"
    c.train(posImages,
            negImages,
            initialFeatures=noInitialFeatures,
            wantedFeatures=noWantedFeatures,
            noEstimators=noEstimators)
    print "Finished training"
    c.saveClassifier(classifier_out_name)
コード例 #8
0
 def is_stok(self, point: Point):
     if dist(point.x, point.y, point.z, *self.stok_pos) < self.R:
         self.accelerator.append(point.counter)
         self.pDrawer.append(point.counter)
         if len(self.pDrawer) >= 5:
             self.pDrawer.change_p(self.pDrawer.p + 0.1)
         if self.pDrawer.p >= 0.89:
             self.pDrawer.draw()
             exit(0)
         point.set_stok(True)
         pos = randomize(*self.vertex, self.is_normal_positioned)
         point.x = pos[0]
         point.y = pos[1]
         point.z = pos[2]
     else:
         point.set_stok(False)
コード例 #9
0
import numpy as np
import matplotlib.pyplot as plt
from utils import randomize

# Hyper-parameters
EPOCHS = 500
NUM_HIDDEN_UNITS = 64
LEARNING_RATE = 0.001
BATCH_SIZE = 32

# Load the Boston Housing Prices dataset
(X_train, y_train), (X_test, y_test) = boston_housing.load_data()
num_features = X_train.shape[1]

# Shuffle the training set
X_train, y_train = randomize(X_train, y_train)

print("Train data size -> input: {}, output: {}".format(
    X_train.shape, y_train.shape))
print("Test data size: -> input: {}, output: {}".format(
    X_test.shape, y_test.shape))

# Normalize features
# Test data is *not* used when calculating the mean and std
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std

# Build the model
model = Sequential()
コード例 #10
0
ファイル: main.py プロジェクト: royanshul/easy-tensorflow
                              tf.argmax(y, 1),
                              name='correct_pred')
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                          name='accuracy')

# Creating the op for initializing all variables
init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    global_step = 0
    # Number of training iterations in each epoch
    num_tr_iter = int(len(y_train) / batch_size)
    for epoch in range(epochs):
        print('Training epoch: {}'.format(epoch + 1))
        x_train, y_train = randomize(x_train, y_train)
        for iteration in range(num_tr_iter):
            global_step += 1
            start = iteration * batch_size
            end = (iteration + 1) * batch_size
            x_batch, y_batch = get_next_batch(x_train, y_train, start, end)
            x_batch = x_batch.reshape((batch_size, timesteps, num_input))
            # Run optimization op (backprop)
            feed_dict_batch = {x: x_batch, y: y_batch}
            sess.run(optimizer, feed_dict=feed_dict_batch)

            if iteration % display_freq == 0:
                # Calculate and display the batch loss and accuracy
                loss_batch, acc_batch = sess.run([loss, accuracy],
                                                 feed_dict=feed_dict_batch)
コード例 #11
0
ファイル: __init__.py プロジェクト: faridsaud/ML-Snippets
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from utils import randomize, draw_learning_curves

data = pd.read_csv('data.csv')
X = np.array(data[['x1', 'x2']])
y = np.array(data['y'])

# Fix random seed caca
np.random.seed(55)

### Imports


# TODO: Uncomment one of the three classifiers, and hit "Test Run"
# to see the learning curve. Use these to answer the quiz below.

### Logistic Regression
estimator = LogisticRegression()

### Decision Tree
#estimator = GradientBoostingClassifier()

### Support Vector Machine
#estimator = SVC(kernel='rbf', gamma=1000)


X2, y2 = randomize(X, y)

draw_learning_curves(X, y, X2, y2, estimator, 10)
コード例 #12
0
ファイル: network.py プロジェクト: rsarai/MLP_Prediction
def train(network,
          train_base,
          test_base,
          val_base,
          batch_size=20,
          num_epochs_without_change=100,
          printing=True):
    start_time = datetime.datetime.now()
    best_loss = np.inf
    Stop = False
    h = 0
    num_error_starvation = 0
    lenBase = len(train_base[0])
    maxLen = math.ceil(lenBase / float(batch_size))
    labels = ["Por neuronio", "MSE", "MAPE"]
    oldLabels = [[], [], []]
    oldValLabels = [[], [], []]
    oldTrainLabels = [[], [], []]

    directory = path_graphs
    if not os.path.exists(directory):
        os.makedirs(directory)

    while (not Stop):

        epoch_start = datetime.datetime.now()

        train_elems = train_base[0]
        train_labels = train_base[1]

        randomize(train_elems, train_labels)

        loss = [0, 0]

        for x in range(0, maxLen):
            if (printing):
                updateBar(x, maxLen, loss)
            x_train = train_elems[batch_size * x:batch_size * (x + 1)]
            x_test = train_labels[batch_size * x:batch_size * (x + 1)]

            train_loss = network.train_on_batch(x_train, x_test)
            for index in range(0, len(train_loss) - 1):
                loss[index] += train_loss[index + 1]
        if (printing):
            print()
        results_train = network.predict(train_elems)
        tLoss = calculateErrorPerNeuron(results_train, train_labels)
        for index in range(0, len(tLoss)):
            oldTrainLabels[index].append(tLoss[index])

        test_loss = test_network(test_base, network)
        for i in range(0, len(test_loss)):
            oldLabels[i].append(test_loss[i])

        val_loss = test_network(val_base, network)
        for i in range(0, len(val_loss)):
            oldValLabels[i].append(val_loss[i])

        if (printing):
            print("Treino -> %d  Epoch" % (h + 1))
        newError = trysave(val_loss, network, h, best_loss, printing)
        if (newError == best_loss):
            num_error_starvation += 1
        else:
            best_loss = newError
            num_error_starvation = 0

        elapsed_time = datetime.datetime.now() - start_time
        epoch_time = datetime.datetime.now() - epoch_start
        if (printing):
            print("\tnum no changes %d\n\ttotal time: %s %s" %
                  (num_error_starvation, epoch_time, elapsed_time))

        for i in range(0, len(labels)):
            plt.plot(oldTrainLabels[i], 'r', label='Treino')
            plt.plot(oldLabels[i], 'b', label='Teste')
            plt.plot(oldValLabels[i], 'g', label='Teste')
            plt.ylabel(labels[i])
            plt.savefig(os.path.join(path_graphs, labels[i] + '.png'))
            plt.clf()
        h += 1
        Stop = num_error_starvation >= num_epochs_without_change
    return best_loss, h
コード例 #13
0
def train(model):
    x_train, y_train, x_valid, y_valid = load_data(dataset=args.dataset,
                                                   mode='train')
    print('Data set Loaded')
    num_train_batch = int(y_train.shape[0] / args.batch_size)
    if not os.path.exists(args.checkpoint_path + args.dataset):
        os.makedirs(args.checkpoint_path + args.dataset)

    with tf.Session() as sess:
        if args.restore_training:
            saver = tf.train.Saver()
            ckpt = tf.train.get_checkpoint_state(args.checkpoint_path +
                                                 args.dataset)
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Model Restored')
            start_epoch = int(str(ckpt.model_checkpoint_path).split('-')[-1])
            fd_train, fd_val, best_loss_val = load_and_save_to(
                start_epoch, num_train_batch)
        else:
            saver = tf.train.Saver(tf.global_variables())
            tf.global_variables_initializer().run()
            print('All variables initialized')
            fd_train, fd_val = save_to()
            start_epoch = 0
            best_loss_val = np.infty
        print('Start Training')
        acc_batch_all = loss_batch_all = np.array([])
        train_writer = tf.summary.FileWriter(args.log_dir + args.dataset,
                                             sess.graph)
        for epoch in range(start_epoch, args.epoch):
            print(
                '_____________________________________________________________________________'
            )
            print('Training Epoch] #{}'.format(epoch + 1))
            x_train, y_train = randomize(x_train, y_train)
            for step in range(num_train_batch):
                start = step * args.batch_size
                end = (step + 1) * args.batch_size
                global_step = epoch * num_train_batch + step
                x_batch, y_batch = get_next_batch(x_train, y_train, start, end)
                feed_dict_batch = {
                    model.X: x_batch,
                    model.Y: y_batch,
                    model.mask_with_labels: True
                }
                if not (global_step % args.tr_disp_sum):
                    _, acc_batch, loss_batch, summary_tr = sess.run(
                        [
                            model.train_op, model.accuracy, model.total_loss,
                            model.summary_now
                        ],
                        feed_dict=feed_dict_batch)
                    train_writer.add_summary(summary_tr, global_step)
                    acc_batch_all = np.append(acc_batch_all, acc_batch)
                    loss_batch_all = np.append(loss_batch_all, loss_batch)
                    mean_acc = np.mean(acc_batch_all)
                    mean_loss = np.mean(loss_batch_all)
                    summary_tr = tf.Summary(value=[
                        tf.Summary.Value(tag='Accuracy', simple_value=mean_acc)
                    ])
                    train_writer.add_summary(summary_tr, global_step)
                    summary_tr = tf.Summary(value=[
                        tf.Summary.Value(tag='Loss/total_loss',
                                         simple_value=mean_loss)
                    ])
                    train_writer.add_summary(summary_tr, global_step)

                    fd_train.write(
                        str(global_step) + ',' + str(mean_acc) + ',' +
                        str(mean_loss) + "\n")
                    fd_train.flush()
                    print(
                        "  Step #{0}, training loss: {1:.4f}, training accuracy: {2:.01%}"
                        .format(global_step, mean_loss, mean_acc))
                    acc_batch_all = loss_batch_all = np.array([])
                else:
                    _, acc_batch, loss_batch = sess.run(
                        [model.train_op, model.accuracy, model.total_loss],
                        feed_dict=feed_dict_batch)
                    acc_batch_all = np.append(acc_batch_all, acc_batch)
                    loss_batch_all = np.append(loss_batch_all, loss_batch)

            # Run validation after each epoch
            acc_val, loss_val, _ = evaluate(sess, model, x_valid, y_valid)
            fd_val.write(
                str(epoch + 1) + ',' + str(acc_val) + ',' + str(loss_val) +
                '\n')
            fd_val.flush()
            print(
                '-----------------------------------------------------------------------------'
            )
            print(
                "Epoch #{0}, Validation loss: {1:.4f}, Validation accuracy: {2:.01%}{3}"
                .format(epoch + 1, loss_val, acc_val,
                        "(improved)" if loss_val < best_loss_val else ""))

            # And save the model if it improved:
            if loss_val < best_loss_val:
                saver.save(sess,
                           args.checkpoint_path + args.dataset +
                           '/model.tfmodel',
                           global_step=epoch + 1)
                best_loss_val = loss_val
        fd_train.close()
        fd_val.close()
コード例 #14
0
print "Hierarchial clustering..."
hierarchy = machine_learning.recursiveCluster(X[dAll], size=500)
Y = machine_learning.flatten(hierarchy, min=40)
X = X[Y >= 0]  # Eliminating outliers
Y = Y[Y >= 0]
y_values = np.unique(Y)
for i in range(0, len(y_values)):
    Y[Y == y_values[i]] = i
print "Done."

print "Visualizing..."
machine_learning.pca(X[dAll], Y)
machine_learning.hist(X[["time"]], Y)
print "Done."

print "Shifting and randomizing..."
shift = 10
X, Y = utils.shiftLabels(X, Y, shift)
X, Y = utils.randomize(X, Y)
print 'Done.'

print "Choosing best parameters for classifier..."
clf, clf_acc, test_acc = machine_learning.best_classifier(X[All], Y)
print "Done."
print "Classifier accuracy: ", clf_acc
print "Test data accuracy: ", test_acc
print "Classifier model: ", clf

utils.save(clf, 'SVM.spkl')
print "Classifier saved."
コード例 #15
0
ファイル: validation.py プロジェクト: amobiny/chest_xray
def validation(x_valid, y_valid, val_batch_size, num_classes, sess, model, epoch, start_time, w_plus):
    loss_batch_all = np.array([])
    acc_batch_all = y_pred_all = logits_all = np.zeros((0, num_classes))
    model.is_train = False
    x_valid, y_valid = randomize(x_valid, y_valid)
    step_count = int(len(x_valid) / val_batch_size)

    for step in range(step_count):
        start = step * val_batch_size
        end = (step + 1) * val_batch_size
        x_batch, y_batch = get_next_batch(x_valid, y_valid, start, end)

        feed_dict_val = {model.x: x_batch, model.y: y_batch, model.w_plus: w_plus}
        acc_valid, loss_valid, y_pred, logits = sess.run(
            [model.accuracy, model.loss, model.prediction, model.get_logits],
            feed_dict=feed_dict_val)

        acc_batch_all = np.concatenate((acc_batch_all, acc_valid.reshape([1, num_classes])))
        y_pred_all = np.concatenate((y_pred_all, y_pred.reshape([val_batch_size, num_classes])))
        logits_all = np.concatenate((logits_all, logits.reshape([val_batch_size, num_classes])))
        loss_batch_all = np.append(loss_batch_all, loss_valid)

    mean_acc = np.mean(acc_batch_all, axis=0)
    mean_loss = np.mean(loss_batch_all)
    num_examples = np.sum(y_valid, axis=0)
    num_preds = np.sum(y_pred_all, axis=0)
    epoch_time = time.time() - start_time
    print('******************************************************************************'
          '********************************************************')
    print('--------------------------------------------------------Validation, Epoch: {}'
          ' -----------------------------------------------------------'.format(epoch + 1))
    print("Atlc\tCrdmg\tEffus\tInflt\tMass\tNodle\tPnum\tPntrx\tConsd"
          "\tEdma\tEmpys\tFbrss\tTkng\tHrna\t|Avg.\t|Loss\t|Run Time")
    for accu in mean_acc:
        print '{:.01%}\t'.format(accu),
    print '|{0:.01%}\t|{1:0.02}\t|{2}'.format(np.mean(mean_acc), mean_loss, epoch_time)

    for exm in num_examples:
        print '{:}\t'.format(exm),
    print("Count of pathalogies")
    for pred in num_preds:
        print '{:}\t'.format(pred),
    print("Count of recognized pathalogies")

    P = R = np.zeros((1, args.n_cls))
    for cond in range(args.n_cls):
        y_true = y_valid[:, cond]
        y_pred = y_pred_all[:, cond]
        P[0, cond], R[0, cond] = precision_recall(y_true, y_pred)
    P = np.reshape(P, args.n_cls)
    R = np.reshape(R, args.n_cls)

    for p in P:
        print '{:0.03}\t'.format(p),
    print("Precision")
    for r in R:
        print '{:0.03}\t'.format(r),
    print("Recall")

    plot_precision_recall_curve(y_valid[:logits_all.shape[0], :], logits_all, epoch)
    write_acc_loss_csv(mean_acc, mean_loss, epoch)
    write_precision_recall_csv(P, R, epoch)

    return mean_acc, mean_loss
コード例 #16
0
ファイル: index.py プロジェクト: lyricalpaws/CoffeeAPI
@app.route("/assets/images/<filename>")
def template_images(filename):
    return send_from_directory("templates/images", filename)


@app.route("/random")
def randomcoffee():
    choose_random = random.choice(cache_images)
    name = choose_random.split(".")

    return send_file(
        f"{config.imagefolder}/{choose_random}",
        mimetype=f"image/{name[-1] if name[-1] != 'jpg' else 'jpeg'}",
        attachment_filename=choose_random)


@app.route("/random.json")
def randomcoffeeJSON():
    domain = config.domain

    if config.localhost:
        domain = f"http://localhost:{config.port}/"

    return jsonify({"file": domain + random.choice(cache_images)})


if __name__ == '__main__':
    utils.randomize(config.imagefolder, config.suffix)
    app.run(port=config.port, debug=config.debug, extra_files=extra_files)
コード例 #17
0
ファイル: pilotfish.py プロジェクト: therden/pilotfish
import PySimpleGUI as sg

from utils import shark_icon, pilot_icon, images
from utils import update_images, randomize
from utils import make_shark, make_pilotfish, position_pilotfish

direction, position = randomize()
shark_list = [make_shark(direction)]
pilotfish_list = [make_pilotfish(shark_list[0])]

while shark_list:
    for shark in shark_list:
        event, values = shark.read(timeout=15)
        if event == sg.TIMEOUT_KEY:
            shark_x, shark_y = shark.CurrentLocation()
            last_shark_x = shark.metadata['last_shark_x']
            last_shark_y = shark.metadata['last_shark_y']
            if (shark_x, shark_y) != (last_shark_x, last_shark_y):
                if shark_x > (last_shark_x + 10):
                    direction = 'right'
                elif shark_x < (last_shark_x - 10):
                    direction = 'left'
                pilot = pilotfish_list[shark_list.index(shark)]
                update_images(shark, pilot, direction)
                position_pilotfish(pilot, direction)
                shark.metadata['last_shark_x'] = shark_x
                shark.metadata['last_shark_y'] = shark_y
        elif event == 'Add another pair':
            direction, position = randomize()
            next_shark_str = 'shark' + str(len(shark_list))
            next_pilot_str = 'pilot' + str(len(shark_list))
コード例 #18
0
ファイル: wappclient.py プロジェクト: sigidagi/wappy
from datetime import datetime
import re
import sys
import logging
from configuration import wappy
from json.decoder import JSONDecodeError
from utils import randomize, Periodic
from bastard import Bastard

TCP_TIMEOUT = 20.0

module_logger = logging.getLogger(__name__)

networkRPC = {
    "jsonrpc": "2.0",
    "id": randomize("POST"),
    "method": "POST",
    "params": {
        "data": {
            "name": "WappyName",
            "meta": {
                "id": wappy.conf.backend.network_id,
                "type": "network",
                "version": "2.0"
            },
        },
        "url": "/network",
        "meta": {
            "send_time": datetime.utcnow().isoformat() + "Z"
        }
    }