Ejemplo n.º 1
0
def swap_faces(image_a_path, image_b_path):
    image_a, image_b = load_images([
        os.path.join(IMAGES_FOLDER, 'trump', image_a_path),
        os.path.join(IMAGES_FOLDER, 'cage', image_b_path)]
    ) / 255.0

    image_a += images_B_mean - images_A_mean
    image_b += images_A_mean - images_B_mean

    # Preprocess loaded images
    image_a = cv2.resize(image_a, (64, 64))
    image_b = cv2.resize(image_b, (64, 64))

    image_a = toTensor(image_a).to(device).float()
    image_b = toTensor(image_b).to(device).float()

    # Forward with opposite encoders
    result_a = var_to_np(model(image_a, 'B'))
    result_b = var_to_np(model(image_b, 'A'))
    result_a = np.moveaxis(np.squeeze(result_a), 0, 2)
    result_b = np.moveaxis(np.squeeze(result_b), 0, 2)

    result_a = np.clip(result_a * 255, 0, 255).astype('uint8')
    result_b = np.clip(result_b * 255, 0, 255).astype('uint8')

    image_a_filename = os.path.splitext(image_a_path)[0]
    image_b_filename = os.path.splitext(image_b_path)[0]

    result_a_filename = f'{image_a_filename}-{image_b_filename}.jpg'
    result_b_filename = f'{image_b_filename}-{image_a_filename}.jpg'

    cv2.imwrite(os.path.join(SWAPS_FOLDER, result_a_filename), result_a)
    cv2.imwrite(os.path.join(SWAPS_FOLDER, result_b_filename), result_b)

    return result_a_filename, result_b_filename
Ejemplo n.º 2
0
def get_next(pre_words):

    model.zero_grad()
    state = model.init_hidden(1)
    # batch_size should be 1 since we generate each word in sequence
    sorted_lengths = [1]

    ouput_batch = None
    words = [word for word in pre_words]
    print(words)

    next_word = ''

    # input pre_words
    for w in words:
        yield w
        input_batch = torch.LongTensor([provider.vocab[w]]).view(1, 1)
        input_batch = Variable(input_batch)

        output_batch, state = model(input_batch, sorted_lengths)
        output_seqs = parse_batch(output_batch)
        next_word = output_seqs[0]

    yield next_word
    # predict
    while True:
        input_batch = torch.LongTensor([provider.vocab[next_word]]).view(1, 1)
        input_batch = Variable(input_batch)

        output_batch, state = model(input_batch, sorted_lengths)
        output_seqs = parse_batch(output_batch)

        next_word = output_seqs[0]
        # [0] since our batch size is only 1
        yield next_word
Ejemplo n.º 3
0
def start_training(files_benign, train_benign, files_malware, train_malware):
    with open("base.txt", "w") as f:
        for item in train_benign:
            f.write(files_benign[item] + "\n")
    with open("input.txt", "w") as f:
        for item in train_malware:
            f.write(files_malware[item] + "\n")
    train.model()
    print(
        "\n..................................................................................................................\n"
    )
Ejemplo n.º 4
0
    def evaluate(self, dataloader, epoch):

        model.eval()
        val_loss_value = 0
        with torch.no_grad():
            for batch in dataloader:
                anchor_embedding = model(batch["anchor"])
                positive_embedding = model(batch["positive"])
                negative_embedding = model(batch["negative"])
                loss = self.criterion(anchor_embedding, positive_embedding,
                                      negative_embedding)
                val_loss_value += loss.item()
            val_epoch_loss = val_loss_value / len(dataloader)
        return val_epoch_loss
Ejemplo n.º 5
0
    def train(self, dataloader, epoch):

        model.train()
        losses = 0
        for batch in tqdm(dataloader, total=len(dataloader)):
            model.zero_grad()
            anchor_embedding = model(batch["anchor"])
            positive_embedding = model(batch["positive"])
            negative_embedding = model(batch["negative"])
            loss = self.criterion(anchor_embedding, positive_embedding,
                                  negative_embedding)
            loss.backward()
            self.optimizer.step()
            losses += loss.item()
        epoch_loss = losses / len(dataloader)
        return epoch_loss
Ejemplo n.º 6
0
def sample_iteration(config, rng, params, sample):
    """PixelCNN++ sampling expressed as a fixed-point iteration."""
    rng, dropout_rng = random.split(rng)
    out = train.model(config).apply({'params': params},
                                    sample,
                                    rngs={'dropout': dropout_rng})
    c_params = pixelcnn.conditional_params_from_outputs(out, sample)
    return conditional_params_to_sample(rng, c_params)
Ejemplo n.º 7
0
    def summarize_embedding(self, dataloader, writer, epoch):

        model.eval()
        with torch.no_grad():
            for batch in dataloader:
                embeddings = model(batch["image"])
                writer.add_embedding(embeddings,
                                     global_step=epoch,
                                     label_img=batch["image"])
def compare_results(input_list):

    print(
        sorted([get_score(u) for u in input_list],
               key=lambda x: x[1],
               reverse=True))

    M = model()
    print([(u[0], u[1]) for u in sort_list(input_list, M)])
Ejemplo n.º 9
0
 def load_model(x, fold):
     _, f, _ = model(x, len(cfg.ml_variables), 1, fold)
     variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                   scope='model_fold{}'.format(fold))
     path = tf.train.latest_checkpoint(
         os.path.join(workdir, 'model_fold{}'.format(fold)))
     print('Load variables for fold {} from {}'.format(fold, path))
     saver = tf.train.Saver(variables)
     saver.restore(session, path)
     return f
Ejemplo n.º 10
0
def predict_model_batch(model, batch, label, result_csv, device=device):
    b_input_ids = batch[0].to(device)
    b_input_mask = batch[1].to(device)
    b_labels = batch[2].to(device)

    with torch.no_grad():
        res = model(b_input_ids,
                    token_type_ids=None,
                    attention_mask=b_input_mask,
                    labels=b_labels)
        loss, logits = res['loss'], res['logits'].cpu().numpy()
        prediction = list(np.argmax(logits, axis=1).flatten())
        result_csv[label] += prediction
Ejemplo n.º 11
0
def main(args):
    inv_fold = [1, 0][args.fold]
    x, y, w = build_dataset(os.path.join(args.workdir,
                                         'fold{}.root'.format(inv_fold)),
                            cfg.ml_classes,
                            inv_fold,
                            make_categorical=False,
                            use_class_weights=True)

    preproc = pickle.load(
        open(
            os.path.join(args.workdir,
                         'preproc_fold{}.pickle'.format(args.fold)), 'rb'))
    x_preproc = preproc.transform(x)

    x_ph = tf.placeholder(tf.float32)
    _, f = model(x_ph, len(cfg.ml_variables), len(cfg.ml_classes), args.fold)
    path = tf.train.latest_checkpoint(
        os.path.join(args.workdir, 'model_fold{}'.format(args.fold)))
    logger.debug('Load model {}'.format(path))
    config = tf.ConfigProto(intra_op_parallelism_threads=12,
                            inter_op_parallelism_threads=12)
    session = tf.Session(config=config)
    saver = tf.train.Saver()
    saver.restore(session, path)
    p = session.run(f, feed_dict={x_ph: x_preproc})
    p = np.argmax(p, axis=1)

    c = confusion_matrix(y, p, sample_weight=w)
    logger.debug('Confusion matrix (plain): {}'.format(c))
    plot(c, 'plain')

    c_norm_rows = c.copy()
    c_norm_cols = c.copy()
    for i in range(c.shape[0]):
        c_norm_rows[:, i] = c_norm_rows[:, i] / np.sum(c_norm_rows[:, i])
        c_norm_cols[i, :] = c_norm_cols[i, :] / np.sum(c_norm_cols[i, :])

    logger.debug('Confusion matrix (norm rows): {}'.format(c_norm_rows))
    plot(c_norm_rows, 'norm_rows')

    logger.debug('Confusion matrix (norm cols): {}'.format(c_norm_cols))
    plot(c_norm_cols, 'norm_cols')

    c_norm_all = c / np.sum(w)
    logger.debug('Confusion matrix (norm all): {}'.format(c_norm_all))
    plot(c_norm_all, 'norm_all')
Ejemplo n.º 12
0
def gen_poem(begin_word, file, models, poem_type):
    tf.reset_default_graph()
    n = [24, 48, 32, 64]
    List = ['5jue', '5lv', '7jue', '7lv']
    if poem_type in List:
        m = n[List.index(poem_type)]  # 选择对应的模型
    batch_size = 1
    print('loading model from %s' % models)
    vector, word_dic, words = process_poetry(file, poem_type)  # 将对应数据集数字化
    input_data = tf.placeholder(tf.int32, [batch_size, None])
    loss, initial_state, last_state = model(input_data=input_data,
                                            words_len=len(words),
                                            rnn_size=128,
                                            num_layers=2,
                                            batch=batch_size)  # 模型预测
    prediction = tf.nn.softmax(loss)  # 输出概率向量
    saver = tf.train.Saver(tf.global_variables())
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    with tf.Session() as sess:
        sess.run(init_op)
        checkpoint = tf.train.latest_checkpoint(models)
        saver.restore(sess, checkpoint)  # 加载模型
        x = np.array([list(map(word_dic.get, 'B'))])  # 得到首字符的字典对应数字
        [predict, last_state_] = sess.run([prediction, last_state],
                                          feed_dict={input_data: x})
        begin_word_ = to_word(predict, words)  # 得到诗文首字
        word = begin_word or begin_word_  # 首字由用户输入或系统生成
        poem_ = ''
        i = 0
        while word != 'E':  # 当预测出的字为尾字符时,停止预测
            poem_ += word
            i += 1
            if i > m:  # 预测出的字数超出诗的范围时,停止预测
                break
            x = np.array([[word_dic[word]]])  # 将汉字数字化,作为测试集
            [predict, last_state_] = sess.run([prediction, last_state],
                                              feed_dict={
                                                  input_data: x,
                                                  initial_state: last_state_
                                              })
            word = to_word(predict, words)
        return poem_
Ejemplo n.º 13
0
def generate_sample(config: ml_collections.ConfigDict, workdir: str):
    """Loads the latest model in `workdir` and samples a batch of images."""
    batch_size = config.sample_batch_size
    rng = random.PRNGKey(config.sample_rng_seed)
    rng, model_rng = random.split(rng)
    rng, dropout_rng = random.split(rng)

    # Create a model with dummy parameters and a dummy optimizer.
    init_batch = jnp.zeros((1, 32, 32, 3))

    params = train.model(config).init(
        {
            'params': model_rng,
            'dropout': dropout_rng
        }, init_batch)['params']
    optimizer_def = optim.Adam(learning_rate=config.learning_rate,
                               beta1=0.95,
                               beta2=0.9995)
    optimizer = optimizer_def.create(params)

    _, params = train.restore_checkpoint(workdir, optimizer, params)

    # Initialize batch of images
    device_count = jax.local_device_count()
    assert not batch_size % device_count, (
        'Sampling batch size must be a multiple of the device count, got '
        'sample_batch_size={}, device_count={}.'.format(
            batch_size, device_count))
    sample_prev = jnp.zeros(
        (device_count, batch_size // device_count, 32, 32, 3))

    # and batch of rng keys
    sample_rng = random.split(rng, device_count)

    # Generate sample using fixed-point iteration
    sample = sample_iteration(config, sample_rng, params, sample_prev)
    while jnp.any(sample != sample_prev):
        sample_prev, sample = sample, sample_iteration(config, sample_rng,
                                                       params, sample)
    return jnp.reshape(sample, (batch_size, 32, 32, 3))
Ejemplo n.º 14
0
if __name__ == "__main__":
    #test_data_path = './dataset/test_data.txt'
    #model_path = './model.pt'
    #test_data_path = './dataset/test_data_41.txt'#41维数据集测试
    #model_path = './model_41.pt'
    test_data_path = './dataset/test_data_10_bin.txt'  #5维数据集测试
    model_path = './model_10_bin.pt'
    test_data = Mydata(test_data_path)

    test_loader = DataLoader(test_data, batch_size=64, shuffle=True)
    model.load_state_dict(torch.load(model_path))
    with torch.no_grad():  #进行测试
        correct = 0
        total = 0
        for features, labels in test_loader:
            #print(type(features))
            features = np.array(features).astype(float).T
            features = torch.Tensor(features)
            labels = np.array(labels).astype(float)
            labels = torch.LongTensor(labels)
            output = model(features)
            #print(output)
            _, predicted = torch.max(output, 1)
            #print(labels)
            #print(labels.size())
            total += labels.size(0)  #获取总的样本数
            #print("label" ,labels.shape)
            #print("predicted ",predicted.shape)
            correct += (predicted == labels).sum().item()
        print("2分类10维测试集准确率:{:.2f}".format(100 * correct / total))
Ejemplo n.º 15
0
def main(args):
    inv_fold = [1, 0][args.fold]
    x, y, w = build_dataset(os.path.join(args.workdir,
                                         'fold{}.root'.format(inv_fold)),
                            cfg.ml_classes,
                            inv_fold,
                            make_categorical=False,
                            use_class_weights=True)

    preproc = pickle.load(
        open(
            os.path.join(args.workdir,
                         'preproc_fold{}.pickle'.format(args.fold)), 'rb'))
    x_preproc = preproc.transform(x)

    x_ph = tf.placeholder(tf.float32)
    _, f = model(x_ph, len(cfg.ml_variables), len(cfg.ml_classes), args.fold)
    path = tf.train.latest_checkpoint(
        os.path.join(args.workdir, 'model_fold{}'.format(args.fold)))
    logger.debug('Load model {}'.format(path))
    config = tf.ConfigProto(intra_op_parallelism_threads=12,
                            inter_op_parallelism_threads=12)
    session = tf.Session(config=config)
    saver = tf.train.Saver()
    saver.restore(session, path)

    # 1D coeffs
    grad1d_ops = []
    for i in range(len(cfg.ml_classes)):
        grad1d_ops.append(tf.gradients(f[:, i], x_ph)[0])
    grads1d = session.run(grad1d_ops, feed_dict={x_ph: x_preproc})
    grad_matrix = np.zeros((len(cfg.ml_classes), len(cfg.ml_variables)),
                           dtype=np.float32)
    for i, g in enumerate(grads1d):
        grad_matrix[i, :] = np.mean(np.abs(g), axis=0)

    plot1d(grad_matrix, 'plain')
    grad_matrix_norm = grad_matrix.copy()
    for i in range(grad_matrix.shape[0]):
        grad_matrix_norm[i, :] = grad_matrix_norm[i, :] / np.sum(
            grad_matrix_norm[i, :])
    plot1d(grad_matrix_norm, 'normrows')

    # 2D coeffs
    grad2d_ops = []
    for i in range(len(cfg.ml_classes)):
        tmp = []
        for j in range(len(cfg.ml_variables)):
            tmp.append(
                tf.reduce_mean(tf.abs(
                    tf.gradients(grad1d_ops[i][:, j], x_ph)[0]),
                               axis=0))
        grad2d_ops.append(tmp)
    grads2d = session.run(grad2d_ops, feed_dict={x_ph: x_preproc})

    for i, name in enumerate(cfg.ml_classes):
        grad2d_matrix = np.vstack(grads2d[i])
        for j in range(grad2d_matrix.shape[0]):
            grad2d_matrix[
                j, j] = grad2d_matrix[j, j] * 0.5  # see 2D Taylor expansion
        plot2d(grad2d_matrix, name, 'plain')

        grad2d_matrix = grad2d_matrix / np.sum(grad2d_matrix)
        plot2d(grad2d_matrix, name, 'normed')
Ejemplo n.º 16
0
 def __init__(self):
     self.output = model()
     self.sess = tf.InteractiveSession()
Ejemplo n.º 17
0
    map2 = map2.resize((640, 240), PIL.Image.ANTIALIAS)
    map3 = map3.resize((640, 240), PIL.Image.ANTIALIAS)
    mask = mask.resize((640, 240), PIL.Image.ANTIALIAS)

    final.paste(img, (0, 0, 640, 240))
    final.paste(map0, (0, 240, 640, 480))
    final.paste(map1, (0, 480, 640, 720))
    final.paste(map2, (0, 720, 640, 960))
    final.paste(map3, (0, 960, 640, 1200))
    final.paste(mask, (0, 1200, 640, 1440))
    images.append(final)


inp = Image.open(sys.argv[1])

model = model(True, (120, 320, 3), tr_model=sys.argv[2])
inp = inp.resize((320, 120))
font = ImageFont.truetype("/System/Library/Fonts/SFNSText.ttf", 16)

aimage = img_to_array(inp)
aimage = aimage.astype(np.float32) / 255
aimage = aimage - 0.5
res = model.predict(np.array([aimage]))[0]

for i in range(1, 120, 4):
    apply_mask(inp, i, 5, res)
    img = images.pop(0)
    img.save("map" + str(i) + ".jpg")
print(datetime.datetime.now() - start)
print(model_time)
Ejemplo n.º 18
0
"""

import base64
from flask import Flask, render_template
from io import BytesIO
from train import process_image, model
import eventlet
import eventlet.wsgi
import numpy as np
import socketio

sio = socketio.Server()
app = Flask(__name__)
target_speed = 22
shape = (100, 100, 3)
model = model(True, shape)


@sio.on('telemetry')
def telemetry(sid, data):
    # The current image from the center camera of the car
    img_str = data["image"]
    speed = float(data["speed"])

    # Set the throttle.
    throttle = 1.2 - (speed / target_speed)

    # read and process image
    image_bytes = BytesIO(base64.b64decode(img_str))
    image, _ = process_image(image_bytes, None, False)
Ejemplo n.º 19
0
    def __getitem__(self, idx):

        with torch.no_grad():
            if np.random.random() < config.SAMPLING_RATIO:
                anchor_file = self.paths[idx]
                anchor_label = self.labels[idx]

                positive_idx = np.argwhere((self.labels == anchor_label)
                                           & (self.paths != anchor_file))
                positives = self.paths[positive_idx].flatten()

                positives_model_input = get_tensors(positives)
                positives_embeddings = model(positives_model_input).detach()

                anchor_model_input = get_tensors([anchor_file])
                anchor_embedding = model(anchor_model_input)

                distaps=F.pairwise_distance(anchor_embedding.repeat(len(positives_embeddings),1),\
                positives_embeddings,\
                    2 )

                harderst_p_index = torch.argmax(distaps)
                hardest_positive = positives_model_input[harderst_p_index]

                negatives_idx = np.argwhere(self.labels != anchor_label)
                negatives = self.paths[negatives_idx].flatten()
                negatives = np.random.choice(negatives, 100, replace=False)
                negatives_model_input = get_tensors(negatives)

                negatives_embeddings = model(negatives_model_input)
                dist_nps=F.pairwise_distance(anchor_embedding.repeat(len(negatives_embeddings),1),\
                negatives_embeddings,\
                    2 )

                harderst_n_index = torch.argmin(dist_nps)
                hardest_negative = negatives_model_input[harderst_n_index]

                return {
                    "anchor": anchor_model_input.squeeze(0).to(config.DEVICE),
                    "positive": hardest_positive.to(config.DEVICE),
                    "negative": hardest_negative.to(config.DEVICE)
                }
            else:

                anchor_file = self.paths[idx]
                anchor_label = self.labels[idx]

                positive_idx = np.argwhere((self.labels == anchor_label)
                                           & (self.paths != anchor_file))
                positives = self.paths[positive_idx].flatten()
                positive = np.random.choice(positives)

                negatives_idx = np.argwhere(self.labels != anchor_label)
                negatives = self.paths[negatives_idx].flatten()
                negative = np.random.choice(negatives)
                anchors = np.array(Image.open(anchor_file))
                positives = np.array(Image.open(positive))
                negatives = np.array(Image.open(negative))

                anchors = np.transpose(anchors, (2, 0, 1)) / 255.0
                positives = np.transpose(positives, (2, 0, 1)) / 255.0
                negatives = np.transpose(negatives, (2, 0, 1)) / 255.0



                return {"anchor":torch.tensor(anchors,dtype=torch.float,device=torch.device(config.DEVICE)),\
                        "positive":torch.tensor(positives,dtype=torch.float,device=torch.device(config.DEVICE)),\
                        "negative":torch.tensor(negatives,dtype=torch.float,device=torch.device(config.DEVICE))}
Ejemplo n.º 20
0
import torch
from sklearn.datasets import load_digits
from random import randint
from train import model
from model import device
import matplotlib.pyplot as plt

digits = load_digits()

for _ in range(10):
    with torch.no_grad():
        X = torch.tensor(digits['data'], dtype=torch.float32).to(device)
        Y = torch.tensor(digits['target'], dtype=torch.int64).to(device)

        hypothesis = model(X)
        prediction = torch.argmax(hypothesis, 1)
        correct_prediction = prediction == Y
        accuracy = correct_prediction.float().mean()
        print('Accuracy: ', accuracy.item())

        r = randint(0, X.shape[0] - 1)
        X_single_data = X[r][:].float().to(device)
        Y_single_data = Y[r].to(device)

        print('Label: ', Y_single_data.item())
        single_prediction = model(X_single_data)
        print('Prediction: ', torch.argmax(single_prediction, dim=0).item())

        plt.imshow(X_single_data.view(8, 8),
                   cmap='Greys',
                   interpolation='nearest')
Ejemplo n.º 21
0
        '-detect',
        type=int,
        help='Turn detection module on - 1/ off - 0. Default:0',
        default=0)
    args = parser.parse_args()

    if os.path.exists(args.st_dir):
        fetch_last_img = "ls " + args.st_dir + " | tail -n1"
    else:
        print("Error: streaming directory %s doesn't exist" % args.st_dir)
        exit(1)

    auto = False
    if args.model:
        shape = (shapeY, shapeX, 3)
        model = model(True, shape, tr_model=args.model)
        auto = args.auto
        err = 0

    train = False
    if args.train:
        train = True
        img_dir = "./data_sets/" + args.train + "/data/"
        data_dir = "./model_data/"
        if not os.path.exists(img_dir):
            os.makedirs(img_dir)
        # if not args.model:
        # 	model = model(load=False, shape)

    # actions = ['A', 'D', 'C', 'B']
    # links = ['/fwd', '/fwd/lf', '/fwd/rt', '/rev', '/rev/lf', '/rev/rt', '/exp' + str(args.exp_time)]
Ejemplo n.º 22
0
import train
import numpy as np
import sys

X = np.load("data/X.npy")
Y = np.load("data/Y.npy")

params = np.load("params.npy").item()
params, costs, iters = train.model(X, Y, params, [X.shape[0], 12, 4], 0.02,
                                   100, True, int(sys.argv[1]))
np.save("params.npy", params)
Ejemplo n.º 23
0
from train import MODEL_DIR, model, INPUT, LABEL, DROPOUT_RATE


# 在验证集上测试准确率
if __name__ == '__main__':
    # 验证集图片文件夹地址
    DATA_DIR = BUTING_PATH + r"\data"
    # 验证集标注数据
    VAL_CSV_PATH = BUTING_PATH + r"\data\val.csv"
    LOG_PATH = BUTING_PATH + rf"\log"
    LOG_CSV_PATH = LOG_PATH + rf"\val_{today()}.csv"
    # 创建验证集对象
    val_set = DataSet(DATA_DIR, VAL_CSV_PATH)

    # 加载训练好的模型进行预测
    output = model()
    predict = tf.reshape(output, [-1, CATEGORY_COUNT])
    max_idx_p = tf.argmax(predict, 1)
    max_idx_l = tf.argmax(tf.reshape(LABEL, [-1, CATEGORY_COUNT]), 1)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(max_idx_p, max_idx_l), tf.float32))

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, tf.train.latest_checkpoint(MODEL_DIR))

        all_acc = 0.0
        result = dict()
        for img, label, image_path, category_id in val_set:
            acc, pred = sess.run([accuracy, max_idx_p],
                                 feed_dict={INPUT: [img], LABEL: [label], DROPOUT_RATE: 0.})
Ejemplo n.º 24
0
from flask import Flask, json, jsonify, request, render_template
from crossdomain import crossdomain
from sort_friends import *
from train import model

app = Flask(__name__)
M = model()


@app.route('/')
def hello_world():
    return 'Hello, World!'


@app.route('/influence', methods=['GET'])
@crossdomain(origin='*')
def get_influence():
    print(request.data)
    user_id = request.args.get("userid")
    results = rank_friends(user_id, M)
    print(results)
    return jsonify(influence_list=results)


@app.route('/sort', methods=['GET'])
@crossdomain(origin='*')
def get_influences():
    userids = json.loads(request.args.get("userids"))
    print(userids)
    results = sort_list(userids, M)
    print(results)
Ejemplo n.º 25
0
import joblib
import pandas as pd
import train
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
import csv
import numpy as np

train.model()

rf_mod = joblib.load("rf.pkl")
svm_mod = joblib.load("svm.pkl")

test_data = pd.read_csv("TestData.csv", header=None)
test_data = train.missingValueMean(test_data)

X_ttest = train.featureMat(test_data)

sc = MinMaxScaler(feature_range=(0, 1))
if len(X_ttest) > 1:
    X_ttest = sc.fit_transform(X_ttest)
else:
    X_ttest = sc.fit_transform(X_ttest.T)

pca = PCA(n_components=8)
pca.fit(X_ttest)
X_ttest_pca = pca.transform(X_ttest)

rf_predicted = rf_mod.predict(X_ttest)
print("Total Test cases:", len(rf_predicted))
print("RF Predicted:", rf_predicted)
Ejemplo n.º 26
0
    parser.add_argument(
        '-url',
        type=str,
        help='Url for connection. Default: http://10.10.10.112',
        default="http://10.10.10.112")
    parser.add_argument('-data_dir',
                        type=str,
                        help='Img stream directory. Default: st_data',
                        default="st_data")
    args = parser.parse_args()

    if os.path.exists(args.data_dir):
        fetch_last_img = "ls " + args.data_dir + " | tail -n1"
    else:
        print("Error: streaming directory %s doesn't exist" % args.data_dir)
        exit(1)

    auto = False
    if args.model:
        shape = (100, 100, 3)
        model = model(True, shape, args.model)
        auto = True
        err = 0

    actions = ['A', 'D', 'C', 'B']
    links = ['/fwd', '/fwd/lt', '/fwd/rt', '/rev']
    # clinks = ['curl '+ args.url + el for el in links]
    clinks = [args.url + el for el in links]
    # curses.wrapper(drive)
    drive(auto)
Ejemplo n.º 27
0
 def test_create_model(self):
     variables = train.initialized(random.PRNGKey(0), 224)
     x = random.normal(random.PRNGKey(1), (8, 224, 224, 3))
     y = train.model(train=False).apply(variables, x)
     self.assertEqual(y.shape, (8, 1000))
Ejemplo n.º 28
0
                        help="path to directory that contains data")
    parser.add_argument("fruit",
                        help="name of the fruit which you which to classify")
    parser.add_argument("image_path",
                        help="path to image that you'd like to classify")
    args = parser.parse_args()

    # prepares the data
    x_train, x_test, y_train, y_test = preprocess.prepare_data(
        args.directory_path, args.fruit)

    # trains the model
    model = train.model(x_train,
                        y_train,
                        x_test,
                        y_test,
                        num_iterations=int(args.num_iterations),
                        learning_rate=float(args.learning_rate),
                        print_cost=False)

    # loads and reads the image you want to classify
    im_pix = imageio.imread(args.image_path)
    im_pix = im_pix / 255.
    im_pix = resize(im_pix, (100, 100), anti_aliasing=True)
    im_pix = im_pix.reshape(100 * 100 * 3, 1)
    prediction = train.predict(model["w"], model["b"], im_pix)

    # prints out a prediction
    if str(prediction[0][0]) == '1.0':
        print(f"Prediction: {args.fruit}")