def start_predict(args):
    # ~ print(args)
    options = {
        'img_path': args.image_path,
        'weights_path': args.weights_path,
        'result_dir': args.result_image_dir
    }
    predict(options)
def main():
    
    # Measures total program runtime by collecting start time
    start_time = time()
    
    # Collect training parameter from the console interface
    in_arg = get_input_predict()
       
    # Check consistency of the input parameters    
    control_input_args_predict(in_arg.checkpoint, in_arg.mapping_file, in_arg.arch, in_arg.gpu, int(in_arg.topk))
    
    # Choose betwen GPU and CPU 
    if in_arg.gpu[:1].lower()== 'y':
        device = 'cuda:0'
    else:
        device = 'cpu'
    print(f"\n Running on '{device}' device...\n")
        
    # Predict the class of the entered flower image with the help of the chosen trained model
    probs, classes = predict(in_arg.flower_image, in_arg.checkpoint, in_arg.arch, device, int(in_arg.topk))  
    
    # Determine class with higest probability
    i_max = 0
    prob_max =0
    for i in range(len(probs)):
        if probs[i] > prob_max:
            i_max = i
            prob_max = probs[i]
    
    # Mapping of Class value to category name
    with open(in_arg.mapping_file, 'r') as f:
        cat_to_name = json.load(f)
       
    classes_name = []
    for classe in classes:
        for key, value in cat_to_name.items():
            if str(key) == str(classe) :
                classes_name.append(str(value))
    
    # Printing results of the prediction
    print(f"\nThe network used is for this classification is : {in_arg.checkpoint} \n")
    print(f"Top 5 classes are: {classes} \n ")
    print(f"Top 5 classes names are: {classes_name} \n ")
    print(f"Top 5 classes probabilities  are: {probs} \n ")
    print(f"The classe with highest probability is {classes_name[i_max]} with a probability of  {probs[i_max]} \n ")

    # Measure total program runtime by collecting end time
    end_time =time()    
    # Computes overall runtime in seconds & prints it in hh:mm:ss format
    tot_time = end_time-start_time
    print("\n** Total Elapsed Runtime:",
          str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
          +str(int((tot_time%3600)%60)) )
def main():

    # Parse the arguments passed by the user
    parsed_arguments = arg_parser()

    # Check if both checkpoint and image file exist
    if os.path.isfile(parsed_arguments.path_to_image) and os.path.isfile(
            parsed_arguments.checkpoint):

        # Make sure that the model is available on the used device
        device = torch.device("cuda:0" if (
            torch.cuda.is_available() and parsed_arguments.gpu) else "cpu")
        print(device)

        # Load a model from a checkpoint
        loaded_model = model_functions.load_checkpoint(
            parsed_arguments.checkpoint)
        loaded_model.to(device)
        #print(loaded_model)
        processed_image = data_functions.process_image(
            parsed_arguments.path_to_image)

        # Don't allow the top_K to be lower than 1
        topk = parsed_arguments.top_K
        if topk < 1:
            topk = 1
        probs, classes = model_functions.predict(processed_image, loaded_model,
                                                 parsed_arguments.gpu, topk)

        if os.path.isfile(parsed_arguments.category_names):
            # Create a mapping from category label to category name
            cat_to_name = data_functions.label_mapping(
                parsed_arguments.category_names)
            for item, prob in zip(classes, probs):
                print("The probability that image {} is a '{}' is {}%".format(
                    parsed_arguments.path_to_image, cat_to_name[str(item)],
                    prob))
        else:
            print(
                'Failed to get category to name mapping! File does not exist: %s',
                parsed_argument.category_names)
            print('Probabilities {}'.format(probs))
            print('Classes {}'.format(classes))
    else:
        print(
            "Either image or the checkpoint file doesn't exist. Please check the parameters."
        )
    return
    'Type your tweet (up to 280 characters) to check positivity or \'end\' if you\'re done: '
)
while userText != 'end':
    assert len(userText) < 280
    if input('Select the model to use: Tf-Idf (1) or Count Vectorizer (2): '
             ) == '1':

        model = TfIdf_Model
        vectorizer = TfIdf_Vectorizer
        modelLabel = 'Tf-Idf'

    else:
        model = CountVect_Model
        vectorizer = CountVect_Vect
        modelLabel = 'Count Vectorizer'
    tweetObj = MF.predict([userText], vectorizer, model, silence=True)

    tweetColor = np.ones((3, 3)) * tweetObj[0][2]
    tweetColor = tweetColor if tweetObj[0][1] == 'Positive' else -1 * tweetColor
    tweetColor = (tweetColor + 1) / 2

    fig, ax = plt.subplots()
    im = ax.imshow(tweetColor, cmap=cm.get_cmap('RdYlGn'), vmax=1.0, vmin=0.0)
    ax.axis('off')
    if len(userText) > 20:
        fontSize = 15
    else:
        fontSize = -3 / (2 * len(userText)) + 30
    plt.text(1,
             1,
             userText,
                    help='class_to_name json file')
parser.add_argument('--device', type=str, default='cuda', help='GPU or CPU')

arguments = parser.parse_args()

# Load in a mapping from category label to category name
class_to_name_map = utility_functions.load_json(arguments.json)

# Load pretrained network
model = model_functions.get_checkpoint(arguments.checkpoint)
print(model)

checkpoint = torch.load(arguments.checkpoint)

# Scales, crops, and normalizes a PIL image for the PyTorch model; returns a Numpy array
image = utility_functions.process_image(arguments.image_path)

# Display image
processing_functions.imshow(image)

# Highest k probabilities and the indices of those probabilities corresponding to the classes (converted to the actual class labels)
probabilities, classes = model_functions.predict(arguments.image_path, model,
                                                 arguments.topk,
                                                 arguments.device)

print(probabilities)
print(classes)

# Display the image along with the top 5 classes
processing_functions.display_image(arguments.image_path, class_to_name_map,
                                   classes)
示例#6
0
    return parser.parse_args()

arguments = get_input_args()

#getting the arguments ready to use
if arguments.gpu:
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
    device = "cpu"

#load checkpoint
loaded_checkpoint = load_checkpoint(arguments.checkpoint, device)
criterion = loaded_checkpoint['criterion']
optimizer = loaded_checkpoint['optimizer']
epochs = loaded_checkpoint['epochs']
model = loaded_checkpoint['model']

#load class names mapping
mapping_of_categories = load_classes_json(arguments.category_names)

# #predict probs
top_outputs = predict(arguments.path, model, mapping_of_categories,arguments.top_k, device)

# print("Command Line Arguments:\n    path =", arguments.path,
#       "\n    checkpoint =", arguments.checkpoint,
#       "\n    category_names =", arguments.category_names,
#       "\n    gpu =", arguments.gpu,
#       "\n    top_k =", arguments.top_k)

print(top_outputs)
示例#7
0
def classify():
    image = None

    #----------------- Uploading User Tree image from URL or File -------------------

    st.title(
        'Please provide an image - will be identified as pepper tree, willow tree, or neither.'
    )

    #----------------- Side Bar Options for User Image ------------------------------
    st.title("Upload Options")
    input_method = st.radio("Options", ('File Upload', 'URL'))

    flag = 0
    if input_method == 'File Upload':
        user_upload_image = st.file_uploader("Upload a picture of Tree",
                                             type=['png', 'jpeg', 'jpg'])
        if user_upload_image is not None:
            file_details = {
                "FileName": user_upload_image.name,
                "FileType": user_upload_image.type,
                "FileSize": user_upload_image.size
            }
            # st.write(file_details)
            flag = 1
        if flag == 1:
            image_source = user_upload_image.name
            image = Image.open(user_upload_image)
            st.image(
                image,
                caption=user_upload_image.name + '  ' +
                user_upload_image.type + '  ' + str(user_upload_image.size) +
                ' bytes',
                width=300,
                use_column_width=True,
            )

# Image from URL
    if input_method == 'URL':
        image_url = st.text_area("Enter the complete Url",
                                 key="user_url_choice")
        image_url_status = st.button('Upload')

        if image_url_status:
            image_source = image_url
            image = Image.open(urllib.request.urlopen(image_url))
            st.image(
                image,
                caption=str(image),
                width=300,
                use_column_width=True,
            )
        else:
            st.warning('click on upload')

    #----------------------  Choosing Classification Method ---------------------------
    st.title('Choose the model for Analysis')
    model_selected = st.radio("Options", ['Pre Trained Model'])

    # model_selected = st.sidebar.radio(
    # 	"Options",
    # 	('Pre Trained Model', 'CNN Model', 'FFN Model', 'Random Forest', 'Logistic Regression', 'Ensemble'),0)

    if model_selected == 'Pre Trained Model':
        model_selected2 = st.selectbox("Choose the Pretrained Model",
                                       ['VGG16'])

        # model_selected2 = st.sidebar.selectbox("Choose the Pretrained Model",
        # 	['VGG16','PTM2','PTM2','PTM2','PTM2','PTM2','PTM2','PTM2'])

    if model_selected2 == 'VGG16' and image != None:
        # note that the predict function returns top_probabilities, top_classes
        model = load_checkpoint(
            '/home/kate/data_and_models/ai_models/vgg16-tree-3class-model.pth')
        probs, classes = predict(image, model)

        st.title('Tree Classification Results')

        with open('tree_to_name.json', 'r') as f:
            tree_to_name = json.load(f)
        tree_names = [tree_to_name[i] for i in classes]
        chart_data = pd.DataFrame(data=[probs], columns=tree_names)

        # st.write("chart_data type ", type(chart_data))

        if (chart_data["willow tree"][0]) > 0.5:
            tree_detected = "Willow Tree"
        elif (chart_data["pepper tree"][0]) > 0.5:
            tree_detected = "Pepper Tree"
        else:
            tree_detected = "Not a Pepper Tree or a Willow Tree"

        st.write('The image is: ', tree_detected)
        st.write('Percentage confidence in the image identification ',
                 chart_data)

        st.bar_chart(chart_data)
style_image = cv2.resize(style_image,
                         (style_scaled_width, content_scaled_height))

# Draw content and style images
st.image([content_image, style_image],
         caption=['Content Image', 'Style Image'],
         width=None)

# Create options dictionary for predict function
options = dict()
options['img_path'] = content_image
options['weights_path'] = weights_path
options['result_dir'] = None

# Predict and display result
predicted_img = predict(options, write_result=False)
st.write("Result image")
st.image(predicted_img[:, :, ::-1], width=500)

# Download results
result = Image.fromarray(predicted_img)


def get_image_download_link(img):
    """Generates a link allowing the PIL image to be downloaded
	in:  PIL image
	out: href string
	"""
    buffered = BytesIO()
    img.save(buffered, format="JPEG")
    img_str = base64.b64encode(buffered.getvalue()).decode()
def check_mentions(api, since_id, time_now):
    new_since_id = since_id
    print('new check')

    for tweet in tweepy.Cursor(api.mentions_timeline,
                               since_id=since_id).items():
        user_handle = tweet.user.screen_name

        new_since_id = max(tweet.id, new_since_id)

        if time_now > tweet.created_at:
            continue

        print('new tweet since id', new_since_id)
        if tweet.in_reply_to_status_id is not None:
            continue

        if not tweet.user.following:
            tweet.user.follow()

        # Parse user input
        tweet_input = tweet.text.replace('@musicpsychic2', '')
        separator = tweet_input.find(',')
        song_name = tweet_input[:separator].strip()
        artist_name = tweet_input[separator + 1:].strip()

        print(artist_name)
        print(song_name)

        # Retrieve song dictionary
        song_dict = read_song_dict(artist_name, song_name)
        # print(song_dict['valence'])

        if song_dict == {}:
            api.update_status(
                status="@" + user_handle + " I can't find the song " +
                song_name + " or the artist " + artist_name +
                " please check your spelling and the list of artists I have data on in my pinned tweet.",
                in_reply_to_status_id=tweet.id)
            continue

        # retrieve top 5 words for song
        rank_df = get_song_tf_idf(song_dict, 5)
        # retrieve positive vs. negative prediction from predict function
        song_pred = predict(song_dict)

        # create response string
        # Change response to say: artist talks about blank, blank, ... and blank to convey generally ___ emotions
        response = ' ' + artist_name + ' uses words like'
        word_list = list(rank_df.index)
        for i in range(len(word_list)):
            if i == len(word_list) - 1:
                response += ' and ' + word_list[i]
            else:
                response += ' ' + word_list[i] + ','
        response += ' to convey '

        if song_pred == 1:
            response += "generally positive emotions in " + song_name + '.'
        else:
            response += "generally negative emotions in " + song_name + '.'

        api.update_status(status="@" + user_handle + response,
                          in_reply_to_status_id=tweet.id)
    return new_since_id
示例#10
0
                    help='class_to_name json file')
parser.add_argument('--gpu', type=str, default='cuda', help='GPU or CPU')

arguments = parser.parse_args()

# Load in a mapping from category label to category name
class_to_name_dict = processing_functions.load_json(arguments.json)

# Load pretrained network
model = model_functions.load_checkpoint(arguments.checkpoint)
print(model)

checkpoint = torch.load(arguments.checkpoint)

# Scales, crops, and normalizes a PIL image for the PyTorch model; returns a Numpy array
image = processing_functions.process_image(arguments.image_dir,
                                           checkpoint['hidden_layer_units'])

# Display image
processing_functions.imshow(image)

# Highest k probabilities and the indices of those probabilities corresponding to the classes (converted to the actual class labels)
probabilities, classes = model_functions.predict(arguments.image_dir, model,
                                                 arguments.topk, arguments.gpu)

print(probabilities)
print(classes)

# Display the image along with the top 5 classes
processing_functions.display_image(arguments.image_dir, class_to_name_dict,
                                   classes)
示例#11
0
                    type=int)
parser.add_argument('--category_names',
                    dest="category_names",
                    action="store",
                    default='cat_to_name.json')

parse = parser.parse_args()
outputs = parse.top_k
processor = parse.gpu
input_img = parse.img
chkt_path = parse.checkpoint

model_load = model_functions.load_checkpoint(chkt_path)

model_functions.load_checkpoint(filepath)

# cpu
device = torch.device("cpu")
# gpu
if parse.gpu:
    device = torch.device("cuda:0")

with open(category_names) as json_file:
    cat_to_name = json.load(json_file)

top_probabilities, top_classes = model_functions.predict(
    img_path, model, outputs)

print(top_probabilities)
print(top_classes)
parser.add_argument('image', action='store')
parser.add_argument('checkpoint', action='store')
parser.add_argument('--cat_names', action='store', default='cat_to_name.json')
parser.add_argument('--gpu', action='store', type=bool, default=True)
parser.add_argument('--top_k', action='store', type=int, default=5)

args = parser.parse_args()

##### PREDICTION
### Load model checkpoint and dictionary
#train_dataset, trainloader = load_train('flowers/train')
model, epochs, learn, optim_state, optimizer, criterion, classes_d = load_checkpoint(
    args.checkpoint)
label_dict = get_labels(args.cat_names)
#model.class_to_idx = train_dataset.class_to_idx

### Configure device to use - gpu or cpu
device = choose_device(args.gpu)

### Load and process image, run forward propagation to obtain probabilities and classes for top_k predictions
image = process_image(args.image)

probabilities, classes = predict(image, model, device, classes_d, k=args.top_k)

### Print result
print('The top {} predictions of the model are the following:'.format(
    args.top_k))

for p, c, r in zip(probabilities, classes, range(1, args.top_k + 1)):
    print('{}. {}, p = {}'.format(r, label_dict[str(c)], round(p, 3)))
示例#13
0
# Load dictionary mapping labels to category names
cat_to_name = 'cat_to_name.json'
label_dict = label_mapping(cat_to_name)

# Run the image through the model and obtain the topk predictions
topk = 5
# Identify the device on which the prediction will be performed
#GPU = arg_dict['gpu']
GPU = False
if GPU:
    device = torch.device('cuda')
else:
    device = torch.device('cpu')

probs, classes = predict(image, model, topk, device)

# Create list of labels for the topk classes
labels = []
for cls in classes:
    labels.append(label_dict[cls])

# Plot the cropped input image and the topk classes with probabilities
fig, (ax1,ax2) = plt.subplots(2,1,figsize=(4,8))
# Undo transposition of dimensions required by PyTorch
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
示例#14
0
        vectorizer = CountVect_Vect
        modelLabel = 'Count Vectorizer'
    print('Retrieving 100 of the most recent tweets of user: '******'data'][0]['name'], '...')

    userTweets, tweetDates = user_tweets.main(userDat['data'][0]['id'],
                                              tweetCount)
    # strip punctuation
    userTweets = [
        ''.join(c for c in s if c not in string.punctuation)
        for s in userTweets
    ]
    tweetWords = [word for line in userTweets for word in line.split()]
    wordCounter = Counter({})
    wordCounter = wordCounter + Counter(tweetWords)
    tweetObj = MF.predict(userTweets, vectorizer, model, silence=True)

    stopwords = set(STOPWORDS)
    redlist = []
    greenlist = []

    # count word frequency
    wordCounterDict1 = dict(wordCounter)
    wordCounterDict1 = dict(
        sorted(wordCounterDict1.items(),
               key=lambda item: item[1],
               reverse=True))
    listDict1 = list(wordCounterDict1.keys())
    wordCounterDict = wordCounterDict1
    listDict = list(wordCounterDict.keys())
from model_functions import build_model, load_checkpoint, predict
import argparse

parser = argparse.ArgumentParser(
    description='use trained model to predict class of a image ')
parser.add_argument('path_to_image', help='the path of image')
parser.add_argument('model_directory',
                    help='the directory of model that has been trained')
parser.add_argument('--top_K',
                    type=int,
                    default=3,
                    help='return the top K most possible classes')
parser.add_argument('--category_names',
                    default='',
                    help='output the actual class name')
parser.add_argument('--gpu',
                    action='store_true',
                    default=False,
                    help='the training mode')
args = parser.parse_args()

model, other_info = load_checkpoint(args.model_directory)
image = process_image(args.path_to_image)
classes, probs = predict(image,
                         model,
                         topk=args.top_K,
                         category_name=args.category_names,
                         gpu_truth=args.gpu,
                         other_info=other_info)
print(classes)
print(probs)
示例#16
0
args = vars(parser.parse_args())
args_dict = dict(
    filter(lambda elem: (elem[1] != None) and (elem[1] != False),
           args.items()))

kwargs_mapping = {
    k: v
    for (k, v) in args_dict.items() if k in ['category_names']
}
cat_to_name = utility.load_class_mapping(**kwargs_mapping)

model, optimizer = utility.load_checkpoint(args_dict['checkpoint'])

kwargs_predict = {
    k: v
    for (k, v) in args_dict.items() if k in ['top_k', 'gpu']
}
probs, classes = model_functions.predict(args_dict['image_path'], model,
                                         **kwargs_predict)

classes_names = [cat_to_name[cl] for cl in classes]
predictions = zip(classes_names, probs)
print("Prediction:")
[
    print("{} with a probability of {:.3}".format(cl, p))
    for cl, p in predictions
]

## Test: python predict.py "flowers/test/1/image_06743.jpg" checkpoint_1.pth --top_k 3