Exemplo n.º 1
0
    def update_r(cond, i, x, r):
        x_adv = x + (1 + over_shoot) * r
        logits = model(x_adv)
        prob = tf.nn.softmax(logits)
        log_prob = tf.nn.log_softmax(logits)

        prob_t = tf.reduce_sum(prob * targets_onehot, axis=1)
        log_prob_t = tf.reduce_sum(log_prob * targets_onehot, axis=1)

        f = tf.abs(log_prob_t - tf.log(confidence))
        f = tf.reshape(f, (-1, ) + (1, ) * (ndims - 1))
        w = tf.gradients(log_prob_t, x_adv)[0]

        w_norm = tf.sqrt(epsilon +
                         tf.reduce_sum(w**2, axis=reduce_ind, keepdims=True))
        r_upd = (epsilonsqrt + f / w_norm) * w / w_norm
        if clip_dist is not None:
            r_upd = tf.clip_by_norm(r_upd, clip_dist, axes=reduce_ind)

        # select and update
        is_high_confidence = tf.greater_equal(prob_t, confidence)
        is_target_hit = tf.equal(prediction(logits), targets)
        target_is_label = tf.equal(labels, targets)
        selector = tf.logical_or(
            tf.logical_and(is_target_hit, is_high_confidence), target_is_label)
        r_new = tf.where(selector, r, r + r_upd)
        cond = tf.logical_not(tf.reduce_all(selector))
        if boxmin is not None and boxmax is not None:
            x_adv_new = x + (1 + over_shoot) * r_new
            r_new = (tf.clip_by_value(x_adv_new, boxmin, boxmax) -
                     x) / (1 + over_shoot)
        return cond, i + 1, x, r_new
Exemplo n.º 2
0
def test_epoch(model, test_loader, loss_fn):
    print('{}  Validation Begins...'.format(
        datetime.now().strftime(_format)[:-3]))
    model.eval()
    counter = 0
    total_loss = 0
    post_acc = 0
    pipe_acc = 0

    for train_data, train_label in tqdm(test_loader):
        # train_data(?, 7, 80), train_label(?, 7)
        train_data = train_data[0]
        train_label = train_label[0]
        train_data = torch.as_tensor(train_data,
                                     dtype=torch.float32).to(DEVICE)
        train_label = torch.as_tensor(train_label,
                                      dtype=torch.float32).to(DEVICE)
        counter += 1
        midnet_output, postnet_output, alpha = model(train_data)
        postnet_accuracy, pipenet_accuracy = prediction(
            train_label, midnet_output, postnet_output)
        post_acc += float(postnet_accuracy)
        pipe_acc += float(pipenet_accuracy)
        loss, _, _, _ = loss_fn(model, train_label, postnet_output,
                                midnet_output, alpha)
        total_loss += loss.detach().item()

    # clear cache
    gc.collect()
    torch.cuda.empty_cache()
    print('{}  Validation Finished...'.format(
        datetime.now().strftime(_format)[:-3]))
    print('Loss: {:.5f}, post acc: {:.4f}, pipe acc: {:.4f}'.format(
        total_loss / counter, post_acc / counter, pipe_acc / counter))
    return total_loss / counter, post_acc / counter
Exemplo n.º 3
0
def api_predict():
    """
        Renvoie un texte decrivant le résultat de la prédiction
    """

    assert isinstance(
        request.json['title'],
        str), "The title of the article is not defined or not string"
    assert isinstance(
        request.json['text'],
        str), "The text of the article is not defined or not string"

    title = request.json['title']
    date = request.json['date']
    text = request.json['text']
    subject = request.json['subject']

    # Create dataframe
    data = formate_dataset(
        pd.DataFrame(data={
            "title": [title],
            "date": [date],
            "text": [text],
            "subject": [subject]
        }))
    # Return prediction
    return jsonify(message(int(prediction(data))))
Exemplo n.º 4
0
def predict():
    """
        Renvoie un template decrivant le résultat de la prédiction
    """
    assert isinstance(
        request.form['title'],
        str), "The title of the article is not defined or not string"
    assert isinstance(
        request.form['text'],
        str), "The text of the article is not defined or not string"

    title = str(request.form["title"])
    date = str(request.form["date"])
    text = str(request.form["text"])
    subject = str(request.form["subject"])

    # Create dataframe
    data = formate_dataset(
        pd.DataFrame(data={
            "title": [title],
            "date": [date],
            "text": [text],
            "subject": [subject]
        }))
    # Return prediction
    return render_template('result.html',
                           prediction=prediction(data),
                           title=title)
def handler(event, context):

    # Initialize Logger
    log = init_logger()
    log = add_handler(log)

    input_data = json.loads(event['body'])
    log.info(f"Input data: {input_data}")

    # Retrieve inputs
    input_url, n_predictions = input_data['input_url'], input_data[
        'n_predictions']

    # Download image
    input_image = download_image(input_url)

    # Process input image
    log.info(f"INFO -- Processing Image")
    batch = preprocess_image(input_image)

    # Generate prediction
    log.info(f"INFO -- Generating Prediction")
    pred = prediction(input_batch=batch, mdl=mobilenet_v2)

    # Top n results
    log.info(f"INFO -- Generating Top n predictions")
    n_results = number_output(mdl_output=pred,
                              mdl_labels=labels,
                              top_n=n_predictions)

    # prediction = model.predict(url)
    response = {"statusCode": 200, "body": json.dumps(n_results)}

    return response
Exemplo n.º 6
0
def predict():

    if request.method == 'POST':
        message = request.form['message']
        my_prediction = prediction(message)
    return render_template('home.html',
                           prediction=my_prediction,
                           question=message)
Exemplo n.º 7
0
def test_prediction():
    """
    test prediction
    """
    dataf = pd.DataFrame(
        data={"title": ["1st title"],
              "date": ["1st date"],
              "text": ["1st text"],
              "subject": ["1st subject"]})
    assert utils.prediction(dataf) in (0, 1)
Exemplo n.º 8
0
    def autoLabel(self):
        if self.loadImage is not None:
            Utils.changeCursor(Qt.WaitCursor)
            image = load_image(self.loadImage.filePath)
            boundingBoxes = prediction(image, self.yolo)

            for box in boundingBoxes:
                oldH, oldW, _ = image.shape
                newH, newW = self.viewer.height(), self.viewer.width()
                box[:] = [
                    box[0] * newW / oldW, box[1] * newH / oldH,
                    box[2] * newW / oldW, box[3] * newH / oldH
                ]

            self.viewer.autoLabeling(boundingBoxes)
            Utils.changeCursor(Qt.ArrowCursor)
Exemplo n.º 9
0
    def update_r(cond, i, x, r):
        x_adv = x + r
        logits = model(x_adv)
        x_adv_os = x + (1 + over_shoot) * r
        logits_os = model(x_adv_os)
        pred = prediction(logits_os)

        # closest boundary
        if targets is None:
            l = find_next_target(x, logits, labels, random=False)
        else:
            l = targets
        l_idx = batch_indices * num_classes + l

        logits_flt = tf.reshape(logits, (-1, ))
        logits_labels = tf.gather(logits_flt, labels_idx)
        logits_targets = tf.gather(logits_flt, l_idx)

        f = logits_targets - logits_labels
        w = tf.gradients(f, x_adv)[0]
        reduce_ind = list(range(1, ndims))
        w2_norm = tf.sqrt(tf.reduce_sum(w**2, axis=reduce_ind))
        if ord == 2:
            dist = tf.abs(f) / w2_norm
        else:
            dist = tf.abs(f) / tf.reduce_sum(tf.abs(w), axis=reduce_ind)
        # avoid numerical instability and clip max value
        if clip_dist is not None:
            dist = tf.clip_by_value(dist, 0, clip_dist)
        if ord == 2:
            r_upd = w * tf.reshape(((dist + epsilon) / w2_norm),
                                   (-1, ) + (1, ) * (ndims - 1))
        else:
            r_upd = tf.sign(w) * tf.reshape(dist, (-1, ) + (1, ) * (ndims - 1))

        # select and update
        is_mistake = tf.not_equal(labels, pred)
        # if targets is provides and it is equal to the class label
        target_is_label = tf.equal(labels, l)
        selector = tf.logical_or(is_mistake, target_is_label)
        r_new = tf.where(selector, r, r + r_upd)
        if boxmin is not None and boxmax is not None:
            x_adv_new = x + (1 + over_shoot) * r_new
            r_new = (tf.clip_by_value(x_adv_new, boxmin, boxmax) -
                     x) / (1 + over_shoot)
        cond = tf.logical_not(tf.reduce_all(selector))
        return cond, i + 1, x, r_new
Exemplo n.º 10
0
def main():

    args = vars(ap.parse_args())

    image_path = args['image_path']
    model = args['model']
    top_k = args['top_k']
    category_names = args['category_names']

    with open(category_names, 'r') as f:
        class_names = json.load(f)

    reloaded_SavedModel = tf.keras.models.load_model(
        model, custom_objects={'KerasLayer': hub.KerasLayer})

    image = preprocess(image_path)

    image = np.expand_dims(image, axis=0)

    ps = reloaded_SavedModel(image, training=False).numpy()

    if top_k > 1:

        preds, classes = prediction(ps, class_names, top_k)

        print("Top ", top_k, " Classes are:- ")

        for i in range(0, top_k):

            print(classes[i], " with probability of ", preds[i])

    else:

        class_idx = np.argmax(ps[0])

        Class_name = class_names[str(class_idx + 1)]

        print(" The given image is ", Class_name, " with probability of ",
              max(ps[0]))
import datetime
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential, save_model, load_model
from tensorflow.keras.layers import Dense, Dropout, LSTM, Activation
import numpy as np
import seaborn as sns
import h5py
import quandl
from utils import create_dataset, preprocess, calcAccuracy, prediction
from train import training

app = Flask(__name__)

nextDayPrice, price, lAcc, lPctAcc, gAcc, gPctAcc = prediction()


@app.route('/')
def hello_world():
    # accuracy,pctChangeAccuracy,nextDayPrice,price = run_model()
    return render_template('index.html', tomorrow=nextDayPrice, today=price)


@app.route('/results')
def results():
    return render_template('accuracy.html',
                           lAcc=lAcc,
                           lPctAcc=lPctAcc[0],
                           gAcc=gAcc,
                           gPctAcc=gPctAcc[0])
Exemplo n.º 12
0
ts9 = 15
mask_ts_[mask_tiles == ts1] = 1
mask_ts_[mask_tiles == ts2] = 1
mask_ts_[mask_tiles == ts3] = 1
mask_ts_[mask_tiles == ts4] = 1
mask_ts_[mask_tiles == ts5] = 1
mask_ts_[mask_tiles == ts6] = 1
mask_ts_[mask_tiles == ts7] = 1
mask_ts_[mask_tiles == ts8] = 1
mask_ts_[mask_tiles == ts9] = 1

#% Load model
model = load_model(filepath + 'unet_exp_' + str(exp) + '.h5', compile=False)
area = 11
# Prediction
ref_final, pre_final, prob_recontructed, ref_reconstructed, mask_no_considered_, mask_ts, time_ts = prediction(
    model, image_array, image_ref, final_mask, mask_ts_, patch_size, area)

# Metrics
cm = confusion_matrix(ref_final, pre_final)
metrics = compute_metrics(ref_final, pre_final)
print('Confusion  matrix \n', cm)
print('Accuracy: ', metrics[0])
print('F1score: ', metrics[1])
print('Recall: ', metrics[2])
print('Precision: ', metrics[3])

# Alarm area
total = (cm[1, 1] + cm[0, 1]) / len(ref_final) * 100
print('Area to be analyzed', total)

print('training time', end_training)
Exemplo n.º 13
0
    sess = tf.Session()

    images = tf.placeholder(tf.float32, [None, 400, 400, 3])
    true_out = tf.placeholder(tf.float32, [None, 3])
    train_mode = tf.placeholder(tf.bool)

    vgg = vgg19.Vgg19('./vgg19.npy')
    vgg.build(images, train_mode)
    # print number of variables used: 143667240 variables, i.e. ideal size = 548MB
    print(vgg.get_var_count())

    sess.run(tf.global_variables_initializer())

    # test classification
    prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False})
    utils.prediction(prob[0])

    # simple 1-step training
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(labels=true_out,
                                                   logits=vgg.fc8))
    train = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
    sess.run(train,
             feed_dict={
                 images: batch1,
                 true_out: [img1_true_result],
                 train_mode: True
             })

    # test classification again, should have a higher probability about tiger
    prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False})
Exemplo n.º 14
0
def train_epoch(model, train_loader, loss_fn, optimizer, scheduler, batch_size,
                epoch, start_stpe):
    model.train()
    count = 0
    total_loss = 0
    n = batch_size
    step = start_stpe
    examples = []
    total_loss_window = ValueWindow(100)
    post_loss_window = ValueWindow(100)
    post_acc_window = ValueWindow(100)

    for x, y in train_loader:
        count += 1
        examples.append([x[0], y[0]])

        if count % 8 == 0:
            examples.sort(key=lambda x: len(x[-1]))
            examples = (np.vstack([ex[0] for ex in examples]),
                        np.vstack([ex[1] for ex in examples]))
            batches = [(examples[0][i:i + n], examples[1][i:i + n])
                       for i in range(0,
                                      len(examples[-1]) + 1 - n, n)]

            if len(examples[-1]) % n != 0:
                batches.append(
                    (np.vstack((examples[0][-(len(examples[-1]) % n):],
                                examples[0][:n - (len(examples[0]) % n)])),
                     np.vstack((examples[1][-(len(examples[-1]) % n):],
                                examples[1][:n - (len(examples[-1]) % n)]))))

            for batch in batches:  # mini batch
                # train_data(?, 7, 80), train_label(?, 7)
                step += 1
                train_data = torch.as_tensor(batch[0],
                                             dtype=torch.float32).to(DEVICE)
                train_label = torch.as_tensor(batch[1],
                                              dtype=torch.float32).to(DEVICE)

                optimizer.zero_grad(True)
                midnet_output, postnet_output, alpha = model(train_data)
                postnet_accuracy, pipenet_accuracy = prediction(
                    train_label, midnet_output, postnet_output)
                loss, postnet_loss, pipenet_loss, attention_loss = loss_fn(
                    model, train_label, postnet_output, midnet_output, alpha)
                total_loss += loss.detach().item()
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), 5, norm_type=2)
                optimizer.step()
                scheduler.step()
                lr = scheduler._rate

                total_loss_window.append(loss.detach().item())
                post_loss_window.append(postnet_loss.detach().item())
                post_acc_window.append(postnet_accuracy)
                if step % 10 == 0:
                    print(
                        '{}  Epoch: {}, Step: {}, overall loss: {:.5f}, postnet loss: {:.5f}, '
                        'postnet acc: {:.4f}, lr :{:.5f}'.format(
                            datetime.now().strftime(_format)[:-3], epoch, step,
                            total_loss_window.average,
                            post_loss_window.average, post_acc_window.average,
                            lr))
                if step % 50_000 == 0:
                    print('{} save checkpoint.'.format(
                        datetime.now().strftime(_format)[:-3]))
                    checkpoint = {
                        "model": model.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        "epoch": epoch,
                        'step': step,
                        'scheduler_lr': scheduler._rate,
                        'scheduler_step': scheduler._step
                    }
                    if not os.path.isdir("./checkpoint"):
                        os.mkdir("./checkpoint")
                    torch.save(
                        checkpoint, './checkpoint/STAM_weights_%s_%s.pth' %
                        (str(epoch), str(step / 1_000_000)))
                    gc.collect()
                    torch.cuda.empty_cache()
            del batches, examples
            examples = []
Exemplo n.º 15
0
from config import MODEL, ALPHABET, N_HEADS, ENC_LAYERS, DEC_LAYERS, DEVICE, HIDDEN

from utils import generate_data, process_data 
from dataset import TextCollate, TextLoader
from utils import prediction

char2idx = {char: idx for idx, char in enumerate(ALPHABET)}
idx2char = {idx: char for idx, char in enumerate(ALPHABET)}

if MODEL == 'model1':
  from models import model1
  model = model1.TransformerModel(len(ALPHABET), hidden=HIDDEN, enc_layers=ENC_LAYERS, dec_layers=DEC_LAYERS,   
                          nhead=N_HEADS, dropout=0.0).to(DEVICE)
if MODEL == 'model2':
  from models import model2
  model = model2.TransformerModel(len(ALPHABET), hidden=HIDDEN, enc_layers=ENC_LAYERS, dec_layers=DEC_LAYERS,   
                          nhead=N_HEADS, dropout=0.0).to(DEVICE)

if WEIGHTS_PATH != None:
  print(f'loading weights from {WEIGHTS_PATH}')
  model.load_state_dict(torch.load(WEIGHTS_PATH))

preds = prediction(model, PREDICT_PATH, char2idx, idx2char)

f = open(DIR+'/predictions.tsv', 'w')
f.write('filename\tprediction\n')
for item in preds.items():
  f.write(item[0]+'\t'+item[1]+'\n')
f.close()
print(f'predictions are saved in {DIR}predictions.tsv')
Exemplo n.º 16
0
def model(X_train,
          Y_train,
          X_test,
          Y_test,
          learning_rate=0.009,
          num_epochs=100,
          minibatch_size=64,
          print_cost=True):
    """
    Arguments:
    X_train -- training set, of shape (None, 64, 64, 3)
    Y_train -- test set, of shape (None, n_y = 6)
    X_test -- training set, of shape (None, 64, 64, 3)
    Y_test -- test set, of shape (None, n_y = 6)
    learning_rate -- learning rate of the optimization
    num_epochs -- number of epochs of the optimization loop
    minibatch_size -- size of a minibatch
    print_cost -- True to print the cost every 100 epochs

    Returns:
    train_accuracy -- real number, accuracy on the train set (X_train)
    test_accuracy -- real number, testing accuracy on the test set (X_test)
    parameters -- parameters learnt by the model. They can then be used to predict.
    """

    ops.reset_default_graph(
    )  # to be able to rerun the model without overwriting tf variables
    (m, n_H0, n_W0, n_C0) = X_train.shape
    n_y = Y_train.shape[1]
    costs = []  # To keep track of the cost

    images = tf.placeholder(tf.float32, [1, 400, 400, 3])
    true_out = tf.placeholder(tf.float32, [1, 64])
    train_mode = tf.placeholder(tf.bool)
    vgg = vgg19.Vgg19('./vgg19.npy')
    vgg.build(images, train_mode)

    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(labels=true_out,
                                                   logits=vgg.fc8))
    optimizer = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        # Run the initialization
        sess.run(init)

        # Do the training loop
        for epoch in range(num_epochs):

            minibatch_cost = 0.
            num_minibatches = int(
                m / minibatch_size
            )  # number of minibatches of size minibatch_size in the train set
            minibatches = random_mini_batches(X_train, Y_train, minibatch_size)

            for minibatch in minibatches:

                (minibatch_X, minibatch_Y) = minibatch
                _, temp_cost = sess.run([optimizer, cost],
                                        feed_dict={
                                            images: minibatch_X,
                                            true_out: [minibatch_Y],
                                            train_mode: True
                                        })
                minibatch_cost += temp_cost / num_minibatches

            # Print the cost every epoch
            if print_cost == True and epoch % 5 == 0:
                print("Cost after epoch %i: %f" % (epoch, minibatch_cost))
            if print_cost == True and epoch % 1 == 0:
                costs.append(minibatch_cost)

        # Calculate the correct predictions
        prob = sess.run(vgg.prob,
                        feed_dict={
                            images: images_test,
                            train_mode: False
                        })
        predict_op = utils.prediction(
            vgg.prob
        )  #Attention recoder fonction predictions pour si plusieurs images
        correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))

        # Calculate accuracy on the test set
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print(accuracy)
        train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
        test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
        print("Train Accuracy:", train_accuracy)
        print("Test Accuracy:", test_accuracy)

        return train_accuracy, test_accuracy, parameters
Exemplo n.º 17
0
def deepfool(model,
             x,
             labels=None,
             targets=None,
             ord=2,
             max_iter=25,
             clip_dist=None,
             over_shoot=0.02,
             boxmin=None,
             boxmax=None,
             epsilon=1e-4):
    """Tensorflow implementation of DeepFool https://arxiv.org/abs/1511.04599
    """
    ndims = x.get_shape().ndims
    batch_size = tf.shape(x)[0]
    batch_indices = tf.range(batch_size)
    num_classes = int(model(x).get_shape()[1])

    if labels is None:
        labels = prediction(model(x))

    labels_idx = batch_indices * num_classes + labels

    def should_continue(cond, i, x, r):
        return tf.logical_and(cond, tf.less(i, max_iter))

    def update_r(cond, i, x, r):
        x_adv = x + r
        logits = model(x_adv)
        x_adv_os = x + (1 + over_shoot) * r
        logits_os = model(x_adv_os)
        pred = prediction(logits_os)

        # closest boundary
        if targets is None:
            l = find_next_target(x, logits, labels, random=False)
        else:
            l = targets
        l_idx = batch_indices * num_classes + l

        logits_flt = tf.reshape(logits, (-1, ))
        logits_labels = tf.gather(logits_flt, labels_idx)
        logits_targets = tf.gather(logits_flt, l_idx)

        f = logits_targets - logits_labels
        w = tf.gradients(f, x_adv)[0]
        reduce_ind = list(range(1, ndims))
        w2_norm = tf.sqrt(tf.reduce_sum(w**2, axis=reduce_ind))
        if ord == 2:
            dist = tf.abs(f) / w2_norm
        else:
            dist = tf.abs(f) / tf.reduce_sum(tf.abs(w), axis=reduce_ind)
        # avoid numerical instability and clip max value
        if clip_dist is not None:
            dist = tf.clip_by_value(dist, 0, clip_dist)
        if ord == 2:
            r_upd = w * tf.reshape(((dist + epsilon) / w2_norm),
                                   (-1, ) + (1, ) * (ndims - 1))
        else:
            r_upd = tf.sign(w) * tf.reshape(dist, (-1, ) + (1, ) * (ndims - 1))

        # select and update
        is_mistake = tf.not_equal(labels, pred)
        # if targets is provides and it is equal to the class label
        target_is_label = tf.equal(labels, l)
        selector = tf.logical_or(is_mistake, target_is_label)
        r_new = tf.where(selector, r, r + r_upd)
        if boxmin is not None and boxmax is not None:
            x_adv_new = x + (1 + over_shoot) * r_new
            r_new = (tf.clip_by_value(x_adv_new, boxmin, boxmax) -
                     x) / (1 + over_shoot)
        cond = tf.logical_not(tf.reduce_all(selector))
        return cond, i + 1, x, r_new

    cond = tf.constant(True, tf.bool)
    i = tf.constant(0)
    r0 = tf.zeros_like(x)
    r = tf.while_loop(should_continue,
                      update_r, [cond, i, x, r0],
                      back_prop=False)[-1]
    x_adv = tf.stop_gradient(x + (1 + over_shoot) * r)
    return x_adv