def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # Process input_data so that it is ready to be sent to our model. data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data)) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # Compute the result of applying the model to the input data. with torch.no_grad(): output = model(data) result = np.round(output.numpy()) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data)) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 with torch.no_grad(): output = model.forward(data) result = int(np.round(output.numpy())) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # Process input_data so that it is ready to be sent to our model. # It produces two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review data_x, data_len = convert_and_pad(model.word_dict, review_to_words(input_data)) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Put the model into evaluation mode model.eval() # Compute the result of applying the model to the input data. result = np.array(round(model(data).item())) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') words = review_to_words(input_data) data_X, data_len = convert_and_pad(model.word_dict, words) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) model.eval() with torch.no_grad(): output = model.forward(data) result = np.round(output.numpy()).astype(int) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review data_X_to_words = review_to_words(input_data) # See function parameters for convert_and_pad from utils.py. # Function has 2 outputs. 1) Review itself, 2) Length of the review, without padding. We can extract both values using Python indexing. data_X = convert_and_pad(model.word_dict, data_X_to_words)[0] data_len = convert_and_pad(model.word_dict, data_X_to_words)[1] # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 # https://discuss.pytorch.org/t/model-eval-vs-with-torch-no-grad/19615/2 # At the above link, you can find the discussion in a forum on why we should use torch.no_grad() in evaluation mode. HINT: It is much # faster and ligher on memory while making predictions. with torch.no_grad(): output = model.forward(data) result = np.round(output.numpy()) return result
def convert_and_pad_data(word_dict, data, pad=500): result = [] lengths = [] for sentence in data: converted, lengths = convert_and_pad(word_dict, sentence, pad) result.append(converted) lengths.append(lengths) return np.array(result), np.array(lengths)
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review test_X = review_to_words( input_data) # converting to words and applying filters """ We are only sending one review at a time, so we won't need to do it as we did during training data_X = [] data_len = [] for sentence in data: converted, leng = convert_and_pad(word_dict, sentence, pad) result.append(converted) lengths.append(leng) return np.array(result), np.array(lengths) """ data_X, data_len = convert_and_pad( model.word_dict, test_X) # converting to a 500 bow features # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 with torch.no_grad(): output = model.forward( data) # making forward pass through the trained network output = output.to( 'cpu') # switching to CPU to prevent deployment issue result = np.round(output.numpy()) result = int(result) return result
def predict_fn(input_data, model): """Function to infer the sentiment of provided input data and generate a prediction from the trained model. Args: input_data (str): unprocessed input data of IMDB reviews model (LSTMClassifier): type of input for deserialization Returns: Exception: thrown if content_type not equal to 'text/plain' """ print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # DONE: Process input_data so that it is ready to be sent to our model # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data)) # Using data_X and data_len we construct an appropriate input tensor # Remember that our model expects input data of the form 'len, review[500]' data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # DONE: Compute the result of applying the model to the input data # The variable `result` should be a numpy array which contains a # single integer which is either 1 or 0 with torch.no_grad(): output = model.forward(data) result = np.round(output.numpy()) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review print("Input data = {}".format(input_data)) #First - convert to words sentence = review_to_words(input_data) print("Converted to words = {}".format(sentence)) #Second - convert to int and pad to 500 data_X, data_len = convert_and_pad(model.word_dict, sentence, 500) print("Converted to int array = {}".format(data_X)) print("Length = {}".format(data_len)) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) print("Data formatted in a tensor = {}".format(data)) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 output = model(data) print("Model output = {}".format(output)) # convert to int and put in numpy array result = output.detach().numpy().round().astype(int) print("Result = {}".format(result)) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data)) data_X, data_len = np.array(data_X), np.array(data_len) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 # Reference: https://knowledge.udacity.com/questions/23616 # Reference: https://study-hall.udacity.com/rooms/community:nd101:633452-cohort-2455-project-2262/community:thread-271916691-456216?contextType=room # Reference: https://study-hall.udacity.com/rooms/community:nd101:633452-cohort-2455-project-2262/community:thread-u26834124-410140?contextType=room with torch.no_grad(): # Take a forward path output = model.forward(data) # TO DO: Study why this is required? It is non-intuitive. # Move the result to cpu (this is a must if GPU was used) output = output.to('cpu') # TO DO: Study why this is required? It is non-intuitive. # Make the result an int (0 or 1) result = np.round(output.numpy()) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review converted, length = convert_and_pad(model.word_dict, review_to_words(input_data)) data_X = converted data_len = length # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 # AdVo: source # https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#train-a-model-with-pytorch with torch.no_grad(): result = model(data) # Convert the result from Tensor to numpy result = result.numpy() # Round the result and convert to int result = np.array(int(np.around(result))) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data), pad=500) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 output = model(data) #print(output) output = torch.round(output.squeeze()) output_temp = torch.tensor(output, requires_grad=True) result = torch.round(output_temp.detach().numpy()) #print(result) # Citations: # https://github.com/udacity/deep-learning-v2-pytorch/blob/master/sentiment-rnn/Sentiment_RNN_Solution.ipynb # https://stackoverflow.com/questions/55466298/pytorch-cant-call-numpy-on-variable-that-requires-grad-use-var-detach-num return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review # using imported functions that already do this: words_in_review = review_to_words(input_data) words_in_review, length = convert_and_pad(model.word_dict, words_in_review) data_X = words_in_review data_len = length # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 #output = model(data).numpy() output = model(data).detach().cpu().numpy() #generates error without using detach! WHY?? //delete later print(output) result_real_rounded = np.round(output) result = result_real_rounded.astype(np.int) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review data_X = None data_len = None data_X = review_to_words(input_data) data_X, data_len = convert_and_pad(model.word_dict, data_X) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 result = None with torch.no_grad( ): #This provides us memory efficient. We don't need to calculate gradient to backward. output = model.forward(data) output = output.to('cpu') result = int(np.round(output.numpy())) #.numpy() provides retrieve tensor object as numpy array. I have taken this information from below link. #https://discuss.pytorch.org/t/how-to-transform-variable-into-numpy/104 return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review #data_X = None #data_len = None # As we did in the "More testing" section of the "Sage Maker Projec.ipynb" #test_review_words = review_to_words(test_review) input_data = review_to_words(input_data) #test_review_words, test_review_len = convert_and_pad(word_dict, test_review_words) # Check whether word_dict exist #print(list(model.word_dict)[:2]) data_X, data_len = convert_and_pad(model.word_dict, input_data) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 result = np.array(int(model(data).detach().round())) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # COMPLETED: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review words_predict = review_to_words(input_data) # convert_and_pad returns working_sentence, min(len(sentence), pad) # default pad value is 500 data_X, data_len = convert_and_pad(model.word_dict, words_predict) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # COMPLETED: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 # deactivate autograd engine with torch.no_grad(): outputs = model(data) # np.round() rounds the number # .cpu() make sure the tensor is moved to memory accessible to the CPU # .numpy() turns the tensor into ndarray for ease of computation result = int(np.round(outputs.cpu().numpy())) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') data_pack = np.hstack( convert_and_pad(model.word_dict, review_to_words(input_data))).reshape( (1, -1)) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() return int(np.round(model(data).detach().cpu().numpy()))
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review # First process the raw input data reviewed_words = review_to_words(input_data) word_dictionary = model.word_dict # Then use the convert_and_pad function providing it the word dictionary from the model object and the processed "rvw" data_X, data_len = convert_and_pad(word_dictionary, reviewed_words) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 # Predict the Output by just passing the data into the model and calculating one forward step (prediction) with torch.no_grad(): output = model.forward(data) # Round the data (so that we only have either 0 or 1 in it) output = output.numpy() rounded_output = output.round() return rounded_output
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review review_words = review_to_words(input_data) review_words, review_length = convert_and_pad(model.word_dict, review_words) data_X = review_words data_len = review_length # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 prediction = model(data).detach().cpu().numpy() #Move the value to cpu print(prediction) #Printing the model's prediction #Convert the prediction result = np.asarray(prediction).astype( np.int ) #Converting the prediction into a numpy array and changing the data type to int return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review # word_dict_path = os.path.join(model_dir, 'word_dict.pkl') processed_data = convert_and_pad(model.word_dict, review_to_words(input_data)) # print(input_data) # processed_data=input_data data_X = processed_data[0] data_len = processed_data[1] # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which cntains a single integer which is either 1 or 0 print("data input into the model is", data) output = model(data) print("output from the model is :", output) result = np.around(output.detach().numpy()).astype('int') return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review #data_X = [review_to_words(review) for review in input_data] #data_X = convert_and_pad(word_dict, data_X, 500) #data_len = 500 input_data = review_to_words(input_data) data_X, data_len = convert_and_pad(model.word_dict, input_data) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 result = float(model(data).detach().numpy()) if result >= 0.5: result = 1 else: result = 0 return np.array(result)
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review tokens = review_to_words(input_data) data_X, data_len = convert_and_pad(model.word_dict, tokens) print(tokens) #print #reshape function data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) print(data) #final model.eval() #model.eval() with torch.no_grad(): #print print(data) result = model(data).detach().numpy() print(result) return result
def input_handler(data, context): """ Pre-process request input before it is sent to TensorFlow Serving REST API Args: data (obj): the request data, in format of dict or string context (Context): an object containing request and configuration details Returns: (dict): a JSON-serializable dict that contains request body and headers """ print("calling input_handler") print("data:", data) # print("dir(data):", dir(data)) # print("data.read():", data.read().decode('utf-8')) print("context:", context) # Load the saved word_dict. # print(os.system('pwd')) # print(os.system('ls -Rla ./')) # print(os.system('ls -Rla /opt/ml/model')) word_dict_path = os.path.join( '/opt/ml/model', 'word_dict.pkl' ) # model is saved at /opt/ml/model of the deployed endpoint with open(word_dict_path, 'rb') as f: word_dict = pickle.load(f) print('word_dict:', word_dict) if context.request_content_type == 'application/json': d = data.read().decode('utf-8') print('Decoded data:', d) data_X, data_len = convert_and_pad(word_dict, review_to_words(d)) data_X = np.array(data_X)[np.newaxis] data_X_json = json.dumps({'instances': data_X.tolist()}) print('data_X_json:', data_X_json) return data_X_json raise ValueError('{{"error": "unsupported content type {}"}}'.format( context.request_content_type or "unknown"))
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data)) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 # result: b"tensor(0.9998, device='cuda:0')" # result: b"tensor(1.00000e-3 *\n 3.778, device='cuda:0')" re = str(model(data)) res = re.split("(") if "*" in res[1]: result = np.asarray(0) else: tmp = [1 if float(res[1].split(",")[0]) > 0.50 else 0] result = np.asarray(tmp[0]) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review # from the imported utils, we get the list of sanetized words of the review, that is clean # from html tags, punctuation, removes stopping words, and returns all to lowercase words = review_to_words(review=input_data) # now from the imported utils, match the first 500 sanetized words to the word_dict of the model data_X, data_len = convert_and_pad(word_dict=model.word_dict, sentence=words, pad=500) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 out = model.forward(data) result = np.array(round(float(out))) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') data_X, data_len = convert_and_pad(model.word_dict, review_to_words(input_data)) data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) model.eval() with torch.no_grad(): output = model.forward(data) return np.round(output.numpy())
def predict_fn(input_data, model): """ Make prediction on input data thanks to model """ # Begin predicting: print("Inferring sentiment of input data...") # Determine the device: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Check model word dictionary presence: if model.word_dict is None: raise Exception("Model has not been loaded properly: no word_dict") # Process input_data so that it is ready to be sent to our model: data_x, data_len = convert_and_pad(model.word_dict, review_to_words(input_data)) # Construct an appropriate input tensor: data_pack = np.hstack((data_len, data_x)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode: model.eval() # Apply model to input tensor: output_data = model.forward(data) # Transform output into a numpy array which contains a single integer, 1 or 0: if torch.cuda.is_available(): # NumPy doesn't support CUDA: output_data.to('cpu') result = int(np.round(output_data.detach().numpy())) # Return prediction: return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review words = review_to_words(input_data) data_X, data_len = convert_and_pad(model.word_dict, words) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 # ideas from https://github.com/AAnoosheh/MNIST_test/blob/master/test.py # solution with detach() taken from CloudWatch logs # solution with numpy() found in https://stackoverflow.com/questions/34097281/how-can-i-convert-a-tensor-into-a-numpy-array-in-tensorflow with torch.no_grad(): output = model.forward(data).round().detach().numpy() result = int(float(output)) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review input_data = review_to_words(input_data) data_X, data_len = convert_and_pad(model.word_dict, input_data) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 # Note: RuntimeError: Can't call numpy() on Variable that requires grad. Use var.detach().numpy() instead. # We need to remove grad in order to return a numpy array with torch.no_grad(): result = model(data).numpy() result = np.round(result) return result
def predict_fn(input_data, model): print('Inferring sentiment of input data.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model.word_dict is None: raise Exception('Model has not been loaded properly, no word_dict.') # TODO: Process input_data so that it is ready to be sent to our model. # You should produce two variables: # data_X - A sequence of length 500 which represents the converted review # data_len - The length of the review # converts string into list of stemmed words with html syntax removed clean_list = review_to_words(input_data) #encodes list of words into a list of numbers, padded with zeros to standardize length data_X, data_len = convert_and_pad(model.word_dict, clean_list) # Using data_X and data_len we construct an appropriate input tensor. Remember # that our model expects input data of the form 'len, review[500]'. data_pack = np.hstack((data_len, data_X)) data_pack = data_pack.reshape(1, -1) data = torch.from_numpy(data_pack) data = data.to(device) # Make sure to put the model into evaluation mode model.eval() # TODO: Compute the result of applying the model to the input data. The variable `result` should # be a numpy array which contains a single integer which is either 1 or 0 output = np.round(model(data).detach().numpy()).astype(np.int) return output