Esempio n. 1
0
def main():
    # print "Naive Bayes"
    # NaiveBayes(train_raw, cv=6, parallism=20)
    # print "Random Forest"
    # RandomForest(train_raw, cv=6, parallism=20)
    # print "AdaBoostClassifier"
    # AdaBoost(train_raw, cv=6, parallism=20)
    # print "LogisticRegression"
    # LogisticReg(train_raw, cv=6, parallism=20)
    # print "linearSVC"
    # SVC(train_raw, cv=6, parallism=20)
    predict(train_raw, test_raw, encoder)
    pass
Esempio n. 2
0
    def run(self):
        init = sl.InitParameters()
        cam = sl.Camera()
        if not cam.is_opened():
            print("Opening ZED Camera...")
        status = cam.open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            exit()

        runtime = sl.RuntimeParameters()
        mat = sl.Mat()

        encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]

        while not self.stopped:
            err = cam.grab(runtime)
            if err == sl.ERROR_CODE.SUCCESS:
                cam.retrieve_image(mat, sl.VIEW.VIEW_LEFT)
                frameLocal = cv2.resize(
                    mat.get_data(), (self.image_shape[1], self.image_shape[0]))

                # Transform a PNG frame to JPG, removing the last dimension.
                frameLocal = frameLocal[:, :, 0:3]

                self.frame = np.array(
                    helper.predict(self.sess, frameLocal, self.input_image,
                                   self.keep_prob, self.logits,
                                   self.image_shape))
Esempio n. 3
0
def get_prediction():
    req_json = request.get_json()
    file_id = req_json.get('id')
    temp = UserData.query.filter(UserData.file_name.contains(file_id)).first()
    file_name = temp.file_name
    file_name = os.path.join(UPLOAD_FOLDER, file_name)
    try:
        result = predict(file_name)
    except Exception as e:
        temp.status = False
        db.session.commit()
    normal_prob, pneumonic_prob = result[2]
    normal_percent, pneumonic_percent = result[2] * 100
    answer = "Normal" if normal_percent.item() > pneumonic_percent.item(
    ) else "Penumonic"
    response = {
        "normal": {
            "prob": round(normal_prob.item(), 2),
            "percent": round(normal_percent.item(), 2)
        },
        "pneumonic": {
            "prob": round(pneumonic_prob.item(), 2),
            "percent": round(pneumonic_percent.item(), 2)
        },
        "answer": answer
    }

    temp.prediction = answer
    temp.status = True
    db.session.commit()
    return response
Esempio n. 4
0
def main():
    
    # Create a parser object
    parser = argparse.ArgumentParser(description="Neural Network Prediction")

    # Add argument to the parser object
    parser.add_argument('--top_k', action='store', dest='top_k', type=int, default=3, help='Number of top result')
    parser.add_argument('--category_names', action='store', dest='cat', type=str, default='cat_to_name.json', help='json name to map catgories')
    parser.add_argument('--gpu', action='store_true', dest='gpu', default=False, help='Use GPU if --gpu')
    parser.add_argument('--st', action='store_true', default='False', dest='start', help='--st to start predicting')
    parser.add_argument('--img', action = 'store', dest = 'img', type = str, default = 'Sample_image.jpg',help = 'Store Img Name')
    
    # Parse the argument from standard input
    args = parser.parse_args()

    # Print out parsing/default parameters
    print('---------Parameters----------')
    print('gpu              = {!r}'.format(args.gpu))
    print('img              = {!r}'.format(args.img))
    print('top_k            = {!r}'.format(args.top_k))
    print('cat              = {!r}'.format(args.cat))
    print('start            = {!r}'.format(args.start))

    print('-----------------------------')
    
    if args.start == True:
        model, class_labels = helper.load_saved_models()
        cat_to_name, label_order = helper.load_json(args.cat)
        ps, labels, index = helper.predict(args.img, model, args.top_k, cat_to_name, class_labels, args.gpu)
        print("------------------Prediction------------------")
        for i in range(len(ps)):
            print("The probability of the flower to be {} is {:.2f} %.".format(labels[i], ps[i] * 100))
Esempio n. 5
0
def main():

    ap = argparse.ArgumentParser(description='predict.py')
    ap.add_argument('image_path',
                    nargs='*',
                    action="store",
                    default="./flowers",
                    type=str)
    ap.add_argument('checkpoint', action="store", type=str)
    ap.add_argument('--top_k',
                    dest="top_k",
                    action="store",
                    type=int,
                    default=1)
    ap.add_argument('--category_names',
                    dest="category_names",
                    action="store",
                    type=str)
    ap.add_argument('--gpu', dest="gpu", action='store_true')

    args = ap.parse_args()

    if args.category_names:
        with open(args.category_names, 'r') as f:
            cat_to_name = json.load(f)
    else:
        cat_to_name = None
    model = helper.load_model(args.checkpoint)
    ps, cl = helper.predict(args.image_path[0], model, args.top_k, args.gpu)
    helper.show_results(ps, cl, args.top_k, cat_to_name)
def main():
    input_args = get_input_args()
    model, class_to_idx = helper.load_checkpoint(input_args.checkpoint)
    model.class_to_idx = class_to_idx
    #print(model)
    topk = input_args.top_k
    gpu = input_args.gpu
    image = input_args.image

    #to do: welche input args brauche ich wirklich mindestens?
    # process input image, then use the model - output the image, that should go into the model
    #-> is implemented within the predict function

    # remarks about predict function :
    #  define gpu
    # full path to image has to go in there
    #return classes probabilities from  prediction function
    [probs], classes = helper.predict(image, model, topk, gpu)
    #

    cat_to_name = helper.load_categories(input_args.category_names)
    # Output result (Hier werden die Classes und die wahrscheinlichkeiten rausgeschrieben)
    #print(cat_to_name, probs, classes)
    for a, b in zip(classes, probs):
        print("{} = {:.2f}%".format(cat_to_name[a], b * 100))
Esempio n. 7
0
def main(image_path, saved_model_path, top_k=1, json_path=None):
    '''
    Input: image_path, saved_model_path, top_k, json_path
    Output: plot with the photo provided with the predicted class as title, together with a histogram
            with top_k most likely classes and their probabilities.
    '''
    model = load_model(saved_model_path)
    probs, class_probs, image = predict(image_path, model, top_k)
    plot(image, probs, class_probs, json_path, top_k)
Esempio n. 8
0
def player(prev_play, opponent_history=[]):
  # --------- CONFIG ---------
  # the more, the better and slower
  n = 1000      # train on every n-th move
  epochs = 10 # how often run through traning loop

  global weight1, weight2
  info = False
  
  d = [[0,0,0]]
  i = "RPS".index(prev_play)
  d[0][i] = 1

  # TEST with cartesian -> out of memory
  # cartesian product (order matters, with replacement)
  dataY = list(itertools.product([0,1], repeat=3))
  dataY = list(filter(lambda x: x[i]==1, dataY))
  dataY = [list(x) for x in dataY] # tuples 2 list

  for dy in dataY:
    bt.addRows(d, [dy])  

  # ????
  # model fails at quincy/poor2, but basically works with poor!
  #bt.addRows(d, d)
  X, y = bt.getData()

  # we will retrain our model not on every move
  pr = []

  if len(X) % n == 0:
    weight1, weight2 = hlp.train(X, y, False, False, epochs)
    info = str(len(X))

  i_pred = np.array([1,1,1])
  pr = hlp.predict([weight1, weight2], i_pred)
  #print(pr)
  #sys.exit()

  # TODO
  # check foreach move prob and return a move
  # eg if most prob move is R the return P
  # -> extremely slow! 
  if pr[0]>pr[1] and pr[0]>pr[2]: # R
    guess = 'P'
  elif pr[1]>pr[0] and pr[1]>pr[2]: # P
    guess = 'S'
  else: # S
    guess = 'R'

  pr2 = [round(x,5) for x in pr]
  if info != False:
    print(info, pr2)
  return guess
Esempio n. 9
0
 def get(self):
     while not self.stopped:
         if not self.grabbed:
             self.stop()
         else:
             (self.grabbed, frameLocal) = self.stream.read()
             frameLocal = cv2.resize(
                 frameLocal, (self.image_shape[1], self.image_shape[0]))
             self.frame = np.array(
                 helper.predict(self.sess, frameLocal, self.input_image,
                                self.keep_prob, self.logits,
                                self.image_shape))
def predict_by_model():
    if path_data is False and pred_data_from != 'zed':
        exit("Path video not set, pass the properly argument")
    """
    :param nn_last_layer: TF Tensor of the last layer in the neural network
    :param num_classes: Number of classes to classify
    :param input_image: TF Placeholder for input images
    :param keep_prob: TF Placeholder for dropout keep probability
    :param vgg_layer7_out: TF Tensor for VGG Layer 3 output
    :param vgg_layer4_out: TF Tensor for VGG Layer 4 output
    :param vgg_layer3_out: TF Tensor for VGG Layer 7 output
    """

    # Path to vgg model
    vgg_path = os.path.join('./data', 'vgg')

    #IF EXCEED GPU MEMORY, USE THE CONFIG BELOW
    if disable_gpu:
        tf_config = tf.ConfigProto(device_count={'GPU': 0})
    else:
        tf_config = tf.ConfigProto()

    with tf.Session(config=tf_config) as sess:
        # Predict the logits
        input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(
            sess, vgg_path)
        nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out,
                               num_classes)
        logits = get_logits(nn_last_layer, num_classes)

        # Restore the saved model
        saver = tf.train.Saver()
        saver.restore(sess, path_model)

        if pred_data_from == 'video':
            # Predict a video
            helper.predict_video(path_data, sess, image_shape, logits,
                                 keep_prob, input_image)
        elif pred_data_from == 'image':
            # Predict a image
            image = scipy.misc.imresize(scipy.misc.imread(path_data),
                                        image_shape)
            street_im = helper.predict(sess, image, input_image, keep_prob,
                                       logits, image_shape)

            current_dir = os.path.dirname(os.path.abspath(__file__))
            imagePath = os.path.join(current_dir, "image_predicted.png")

            scipy.misc.imsave(imagePath, street_im)

            print(colored("Image save in {}".format(imagePath), 'green'))
        elif pred_data_from == 'zed':
            helper.read_zed(sess, image_shape, logits, keep_prob, input_image)
Esempio n. 11
0
def main():

    device = "cpu"
    if (arguments.gpu):
        if (torch.cuda.is_available()):
            device = "cuda"
            print(" GPU IS AVAILABLE")
        else:
            print("GPU is not avaliable, instead CPU will be used")

    print(
        h.predict(arguments.input, arguments.checkpoint, arguments.top_k,
                  arguments.category_names, device))
def main():
    model = helper.load_checkpoint(path)
    with open('cat_to_name.json', 'r') as json_file:
        cat_to_name = json.load(json_file)
    probabilities = helper.predict(path_image, model, topk, power,
                                   number_of_outputs)
    labels = [
        cat_to_name[str(index + 1)] for index in np.array(probabilities[1][0])
    ]
    probability = np.array(probabilities[0][0])
    i = 0
    while i < number_of_outputs:
        print("{} with a probability of {}".format(labels[i], probability[i]))
        i += 1
    print("Predicting Done!")
def Load_and_predict(model_save_path, model_name, processed_path, test_file_name, saved_csv_path, saved_csv_name):
    
    #TODO: delete class!
    
    # load json and create model
    json_file = open(model_save_path+model_name+'.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    print(model.summary())
    # load weights into new model
    model.load_weights(model_save_path+model_name+'.h5')

    ## Load test data
    test_file= test_file_name
    test_data= load_pkl(processed_path, test_file)

    test_inputs = test_data['input_obs']
    test_ruitu = test_data['input_ruitu']

    test_inputs = np.expand_dims(test_inputs, axis=0)
    test_ruitu = np.expand_dims(test_ruitu, axis=0)
    #add test ids
    test_ids=[]
    for i in range(10):
        test_ids.append(np.ones(shape=(1,37))*i)
    test_ids = np.stack(test_ids, axis=-1)
    # add time
    test_size = test_inputs.shape[0]
    test_times = np.array(range(37))
    test_times = np.tile(test_times,(test_size,1))

    pred_result, pred_var_result = predict(model, test_inputs, test_ruitu, test_ids, test_times)

    print(pred_result.shape)
    print(pred_var_result.shape)

    ### save the result for submit
    df_empty = renorm_for_submit(pred_mean=pred_result[0], pred_var=pred_var_result[0], ruitu_inputs=test_ruitu[0],
                                        timestep_to_ensemble=21, alpha=1)

    df_empty = df_empty.rename(columns={"t2m":"       t2m", 
                             "rh2m":"      rh2m",
                            "w10m":"      w10m"})

    save_path = saved_csv_path
    df_empty.to_csv(path_or_buf=save_path+saved_csv_name, header=True, index=False)
    print('Ok! You can submit now!')
Esempio n. 14
0
def classify_image(path_to_image, checkpoint, top_k, category_names, gpu):
    if not torch.cuda.is_available() and gpu:
        raise (
            "No gpu available to train the network. Please remove the --gpu argument to train using the cpu"
        )
    device = ('cuda' if gpu else 'cpu')

    model = helper.load_model(checkpoint)
    image_tensor = torch.tensor(helper.load_image(path_to_image))

    (probs, classes) = helper.predict(image_tensor, model, top_k, device)
    if category_names != None:
        #convert the classes array to hold the string representation of the category
        with open(category_names, 'r') as f:
            cat_to_name = json.load(f)
        classes = [cat_to_name[class_] for class_ in classes]

    return (classes, probs)
def main():

    # Get input arguments
    args = arg_parser()

    # Check if GPU is available
    device = helper.check_gpu(args.gpu)
    print('Using {} for computation.'.format(device))

    # Load the model from checkpoint
    model = helper.load_checkpoint(args.checkpoint_path)
    print("Model has been loaded from the checkpoint.")
    print("You will get predictions in a bit...")

    # Get predictions for the chosen image
    top_probs, top_labels, top_flowers = helper.predict(
        model, args.img_path, args.top_k)

    # Print top n probabilities
    helper.print_probability(top_flowers, top_probs)
Esempio n. 16
0
def predict_labels(review, model, tokenizer, label_index):
    """
    This function takes in list of sentences, runs prediction on each sentence,
    removes all the sentences whose confidence was less than 0.5.
    - review: list of raw sentences
    - model: Model object
    - tokenizer: 
    - label_index:
    Returns :
    - prediction_list: a dictionary with key: index, value: ([(confidence, label) ... (confidence, label)], sentence)
    """
    seq = tokenizer.texts_to_sequences(review)
    padded = pad_sequences(seq, maxlen = 100)
    pred = helper.predict(model, padded)
    
    filtered_indices = np.where(np.array(pred) > 0.5)
    i_review, j_label = filtered_indices[0], filtered_indices[1]
    
    prediction_list = helper.return_labels(i_review, j_label, pred, review, label_index)
    return prediction_list
Esempio n. 17
0
    def get(self):
        #User Information
        username = session["username"]

        user = User.query.get(1)
        for u in User.query.all():
            if u.username == username:
                user = u

        account = user.account
        transactions = account.transactions
        balance = account.balance

        #Retrieve the linear prediction
        linearEqn = predict(transactions)

        #Retrieve the past 10 days of balance transformation
        past10DaysBalance = getLastNDays(transactions, balance, 10)

        predictionResponse = [linearEqn[0], linearEqn[1], past10DaysBalance]

        return predictionResponse
Esempio n. 18
0
def main():

    input_args = get_input_args()
    gpu = torch.cuda.is_available() and input_args.gpu
    print("Predicting on {} using {}".format("GPU" if gpu else "CPU",
                                             input_args.checkpoint))

    model = helper.load_checkpoint(input_args.checkpoint)

    if gpu:
        model.cuda()

    use_mapping_file = False

    if input_args.cat_names:
        with open(input_args.cat_names, 'r') as f:
            cat_to_name = json.load(f)
            use_mapping_file = True

    probs, classes = helper.predict(input_args.input, model, gpu,
                                    input_args.top_k)

    for i in range(input_args.top_k):
        print("probability of class {}: {}".format(classes[i], probs[i]))
Esempio n. 19
0
import argparse
from helper import load_checkpoint, process_image, predict, convertJSON

# Get data from command line
parser = argparse.ArgumentParser( description ='Script to predict the probability of type of flower in supplied image' )
parser.add_argument("image_path", help="Path to image for processing" )
parser.add_argument("checkpoint", help="Name and path for checkpoint file containing the trained network" )
parser.add_argument("--category_name", help="The path and name of *.json file containing the mapping of flower categories")
parser.add_argument("--top_k", help="Top probabilities to return. Between 1 to 102", type=int, default=3 )
parser.add_argument("--gpu", help="Train model via GPU", action="store_true", default=False)

args = parser.parse_args()

# Load the checkpoint and rebuild the model
model = load_checkpoint(args.checkpoint)

# Process image
tmp = process_image(args.image_path)

# Inference
top_k_probs, classes = predict(args.image_path, model, args.top_k, args.gpu )
print("Top K probabilities: {}".format( top_k_probs ))
if args.category_name == None:
    print("classes: {}".format( classes ))
else:
    cat_to_name = convertJSON( args.category_name )
    index_to_class = {val: key for key, val in model.class_to_idx.items()}
    top_classes = [index_to_class[each] for each in classes]
    names = [cat_to_name[x] for x in top_classes]

    print("Names of top K flowers: {}".format( names ))
Esempio n. 20
0
                    dest = 'hidden_units',
                    type = int, 
                    default = 512,
                    help = 'Number of hidden units')

parser.add_argument('--arch', action = 'store',
                    dest = 'arch',
                    type = str, 
                    default = 'densenet',
                    help = 'PreTrained Model Architecture, densenet or vgg')

results = parser.parse_args()
print('---------Parameters----------')
print('gpu              = {!r}'.format(results.gpu))
print('img              = {!r}'.format(results.img))
print('top_k            = {!r}'.format(results.top_k))
print('cat              = {!r}'.format(results.cat))
print('start            = {!r}'.format(results.start))

print('-----------------------------')

if results.start == True:
    model, class_labels = helper.load_saved_model()
    cat_to_name, label_order = helper.load_json(results.cat)
    ps, labels, index = helper.predict(results.img, model, results.top_k, cat_to_name, class_labels, results.gpu)
    print("------------------Prediction------------------")
    for i in range(len(ps)):
        print("The probability of the flower to be {} is {:.2f} %.".format(labels[i], ps[i] * 100))
    
    
    
Esempio n. 21
0
import argparse
import helper
import torch
import json

# Parsing arguments
parser = argparse.ArgumentParser(description='Predict flower type from image')
parser.add_argument('image', help='path to the image')
parser.add_argument('model', help='path to the model')
parser.add_argument('--top_k', default=1, help='return top KKK most likely classes')
parser.add_argument('--category_names', default=False, help='path to a mapping of categories to real names')
parser.add_argument('--gpu', action='store_const', const=True, default=False, help='use GPU for inference')
args = parser.parse_args()

# Loading model
model = helper.load_trained_model(args.model)
device = 'cuda' if args.gpu and torch.cuda.is_available else 'cpu'
model.to(device)

# Inference
probs, classes = helper.predict(args.image, model, topk=int(args.top_k), device=device)

# Output
print(f"Class{' '*22}Probability")
if args.category_names:
    with open(args.category_names, 'r') as f:
        cat_to_name = json.load(f)
        classes = [cat_to_name[cl] for cl in classes]
for p, cl in zip(probs, classes):
    print(f"{cl:27s}{p:.5f}")
Esempio n. 22
0
    for i in range(0, num_iterations):
        A2, cache = forward_prop(X, param)
        c = cost(A2, Y)
        grads = back_prop(param, cache, X, Y)
        param = update(param, grads)

        if print_cost and i % 1000 == 0:
            print('Cost is ' + str(c))

    return param


# Visual
parameters = nn_model(X, Y, 4, num_iterations=50000, print_cost=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(parameters, x.T).ravel(), X,
                       Y.ravel())
predictions = predict(parameters, X)
accuracy = float(
    (np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) /
    float(Y.size) * 100)
print("Accuracy for {} hidden units: {} %".format(4, accuracy))
plt.title("Decision Boundary for hidden layer size " + str(4))
plt.show()

# # Net Size
# plt.figure(figsize=(16, 32))
# hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]
# for i, n_h in enumerate(hidden_layer_sizes):
#     plt.subplot(5, 2, i+1)
#     plt.title('Hidden Layer of size %d' % n_h)
Esempio n. 23
0
import helper
from dataset import classes_names

from PIL import Image
import numpy as np
import pandas as pd

test_data_dir = 'test'

if __name__ == '__main__':
    model_path = helper.train_and_evaluate()
    model = helper.get_model(model_path)

    img_names = []
    img_pils = []
    for img_name in os.listdir(test_data_dir):
        img = Image.open(os.path.join(test_data_dir, img_name))
        img = img.convert('RGB')
        img_names.append(img_name)
        img_pils.append(img)

    predicts = helper.predict(model, img_pils)  # outputs are classes indexes
    predict_classes = [classes_names[cls] for cls in predicts]  # names here

    # img_names = ['1.png', '2.png']
    # predict_classes = ['hello', 'world']

    pd_data = {'file': img_names, 'species': predict_classes}
    df = pd.DataFrame(pd_data)
    df.to_csv(os.path.join(helper.outputs_dir, 'prediction.csv'), index=False)
Esempio n. 24
0
    parser.add_argument('--seed',
                        type=int,
                        default=1234,
                        metavar='S',
                        help='# random seed (default: 1234)')

    parser.add_argument('--gpu',
                        type=bool,
                        default=False,
                        help='Indicator of using GPU (default:False)')

    # args holds all passed-in arguments
    args = parser.parse_args()

    device = torch.device("cuda" if (
        args.gpu and torch.cuda.is_available()) else "cpu")

    print("Using device {}.".format(device))

    torch.manual_seed(args.seed)

    #     print(args)

    model = load_checkpoint(args.checkpoint, device)
    probs, pred_classes = predict(args.image_path, model, device, args.topk)

    actual_class, pred_classes = cat_to_name(args.image_path, pred_classes,
                                             args.category_names)
    print(actual_class)
    print(probs, pred_classes)
Esempio n. 25
0
#Use the parser 
parser = argparse.ArgumentParser()
parser.add_argument("image_path", help="The path of image file")
parser.add_argument("saved_model", help="Keras model file")
parser.add_argument("--top_k", type = int, help="Top k number of classes shown", default = 5)
parser.add_argument("--category_names", help="JSON file with the labels for classes", default = 'label_map.json')
args = parser.parse_args()

#Get the arguements
image_path = args.image_path
model_file = args.saved_model
top_k = args.top_k

class_names = {}
if args.category_names:
    category_names = args.category_names
    with open(category_names, 'r') as f:
        class_names = json.load(f)  

#load saved model
model = tf.keras.models.load_model(model_file, custom_objects={'KerasLayer':hub.KerasLayer})

#predict
probs, classes = helper.predict(image_path, model, top_k)

#print out results
print(f"probabilities: {probs}")
if class_names:
    classes = [class_names[str(n)] for n in classes]
print(f"classes: {classes}")
        param.requires_grad = False

    if checkpoint['model_arch'] == 'densenet169':
        model.classifier = model_architecture.ModelArch(
            'densenet169', checkpoint['input_size'], checkpoint['output_size'],
            checkpoint['hidden_layers'])
    elif checkpoint['model_arch'] == 'resnet34':
        model.fc = model_architecture.ModelArch('resnet34',
                                                checkpoint['input_size'],
                                                checkpoint['output_size'],
                                                checkpoint['hidden_layers'])

    model.load_state_dict(checkpoint['state_dict'])

    return model


model = load_checkpoint(args.Path_to_saved_model_checkpoint)

# Predict name of flower
probs, classes = helper.predict(args.Path_to_image_file, model, device,
                                args.top_k_classes)

# Mapping
with open(args.category_names_json, 'r') as f:
    cat_to_name = json.load(f)

# Print results
helper.print_prediction_results(args.Path_to_image_file, probs, classes,
                                cat_to_name)
Esempio n. 27
0
inputs = np.array(inputs).T
targets = np.array(targets).T

# Standardize data to have feature values between 0 and 1.
inputs = inputs / 255.

print("Inputs Shape : ", inputs.shape)
print("Targets Shape : ", targets.shape)

### CONSTANTS ###
layers_dims = [len(inputs), 30, 20, 10]  #  4-layer model
print("layers_dims: ", layers_dims)

parameters = L_layer_model(inputs,
                           targets,
                           layers_dims,
                           num_iterations=2500,
                           print_cost=True)

test_data, test_file = load_test_data("test_conv.txt", test_path)

test_data = np.array(test_data).T
print("\n\nTest data shape: ", test_data.shape)

test_data = test_data / 255.
predict = predict(test_data, parameters)
print("Predict shape: ", predict.shape)

for i in range(0, len(test_file)):
    print(test_file[i], max(predict[i]),
          np.where(predict[i] == predict[i].max()))
Esempio n. 28
0
    'checkpoint',
    default='/home/workspace/aipnd-project/checkpointlearnrate1.pth',
    nargs='*',
    action="store",
    type=str)
ap.add_argument('--top_k', default=5, dest="top_k", action="store", type=int)
ap.add_argument('--category_names',
                dest="category_names",
                action="store",
                default='cat_to_name.json')
ap.add_argument('--gpu', default="gpu", action="store", dest="gpu")

pa = ap.parse_args()
img_path = pa.input_img
num_outputs = pa.top_k
power = pa.gpu
input_img = pa.input_img
path = pa.checkpoint

trainloader, vldtnloader, testloader = helper.load_data()
helper.load_checkpoint(path)
with open('cat_to_name.json', 'r') as json_file:
    cat_to_name = json.load(json_file)
p = helper.predict(img_path, model, num_outputs, power)
labels = [cat_to_name[str(index + 1)] for index in np.array(p[1][0])]
probability = np.array(p[0][0])
i = 0
while i < num_outputs:
    print("{} has a probability of {}".format(labels[i], probability[i]))
    i += 1
print("finished")
import helper

parser = argparse.ArgumentParser(description='Image Classifier')
parser.add_argument('--inp_image',type = str, default = 'flowers/valid/1/image_06755.jpg', help = 'Path to dataset directory')
parser.add_argument('--checkpoint',type=str,default='trained1.pth',help='Checkpoint')
parser.add_argument('--gpu',type=str,default='cpu',help='GPU')
parser.add_argument('--json_class',type=str,default='cat_to_name.json',help='JSON of key value')
parser.add_argument('--top_k',type=int,default=5,help='Top k classes and probabilities')
args=parser.parse_args()


class_to_name= helper.load_class(args.json_class)

model=helper.load(args.checkpoint)
print(model)

vals=torch.load(args.checkpoint)

image = helper.process_image(args.inp_image)


helper.imshow(image)

probs, classes = helper.predict(args.inp_image, model, args.top_k, args.gpu)  

print(probs)
print(classes)

helper.display_image(args.inp_image, class_to_name, classes,probs)

Esempio n. 30
0
                    default='cat_to_name.json')
parser.add_argument('--gpu', default="gpu", action="store", dest="gpu")

pa = parser.parse_args()
image_path = pa.input_img
topk = pa.top_k
gpu_cpu = pa.gpu
input_img = pa.input_img
filepath = pa.checkpoint

training_loader, testing_loader, validation_loader = hp.load_data()

# load previously saved checkpoint
hp.load_checkpoint(filepath)

# load label conversion
with open('cat_to_name.json', 'r') as json_file:
    cat_to_name = json.load(json_file)

probabilities = hp.predict(image_path, model, topk, gpu_cpu)

top5_values = np.array(probabilities[0][0])
top5_value_categories = [
    cat_to_name[str(i)] for i in np.array(probabilities[1][0])
]

i = 0
while i < topk:
    print("{} is the category with a {} probability".format(
        top5_value_categories[i], top5_values[i]))
    i += 1
Esempio n. 31
0
    argparser = argparse.ArgumentParser(description='Submission Script')
    argparser.add_argument('-r',
                           '--run_folder',
                           type=str,
                           required=True,
                           help='Path to a run folder')

    args = argparser.parse_args()
    run_folder = args.run_folder

    neuroticism = 'configs/neuroticism.json'
    extroversion = 'configs/extroversion.json'
    openness = 'configs/openness.json'
    agreeableness = 'configs/agreeableness.json'
    conscientiousness = 'configs/conscientiousness.json'

    CONFIGS = OrderedDict([
        ('neuroticism', os.path.join(run_folder, neuroticism)),
        ('extroversion', os.path.join(run_folder, extroversion)),
        ('openness', os.path.join(run_folder, openness)),
        ('agreeableness', os.path.join(run_folder, agreeableness)),
        ('conscientiousness', os.path.join(run_folder, conscientiousness))
    ])

    outputfile_name = 'results.txt'

    predictions = predict(CONFIGS)
    print "Prediction Finished!"
    write_predictions(predictions, os.path.join(run_folder, outputfile_name))
    print "Wrote Predictions to {}".format(outputfile_name)