def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')
    
    # TODO: Process input_data so that it is ready to be sent to our model.
    #       You should produce two variables:
    #         data_X   - A sequence of length 500 which represents the converted review
    #         data_len - The length of the review

    sentence = review_to_words(input_data)
    data_X, data_len = convert_and_pad(model.word_dict, sentence)

    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)
    
    data = torch.from_numpy(data_pack)
    data = data.to(device)

    # Make sure to put the model into evaluation mode
    model.eval()

    # TODO: Compute the result of applying the model to the input data. The variable `result` should
    #       be a numpy array which contains a single integer which is either 1 or 0

    output = model(data)
    
    if output<0.5:
        result = np.intc(0)
    else:
        result = np.intc(1)

    return result
Esempio n. 2
0
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')
    
    # TODO: Process input_data so that it is ready to be sent to our model.
    #       You should produce two variables:
    #         data_X   - A sequence of length 500 which represents the converted review
    #         data_len - The length of the review
    
    words = review_to_words(input_data)
    data_X, data_len = convert_and_pad(model.word_dict, words)

    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)
    
    data = torch.from_numpy(data_pack)
    data = data.to(device)

    # Make sure to put the model into evaluation mode
    model.eval()

    # TODO: Compute the result of applying the model to the input data. The variable `result` should
    #       be a numpy array which contains a single integer which is either 1 or 0
    # ideas from https://github.com/AAnoosheh/MNIST_test/blob/master/test.py
    # solution with detach() taken from CloudWatch logs
    # solution with numpy() found in https://stackoverflow.com/questions/34097281/how-can-i-convert-a-tensor-into-a-numpy-array-in-tensorflow 
    with torch.no_grad():
        output = model.forward(data).round().detach().numpy()
        
    result = int(float(output))   

    return result
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')

    #  Process input_data so that it is ready to be sent to our model.
    #         data_X   - A sequence of length 500 which represents the converted review
    #         data_len - The length of the review
    input1 = np.array(review_to_words(input_data))
    data_X, data_len = convert_and_pad(model.word_dict, input1)
    #     data_X = None
    #     data_len = None

    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)

    data = torch.from_numpy(data_pack)
    data = data.to(device)

    # Make sure to put the model into evaluation mode
    model.eval()

    #  Compute the result of applying the model to the input data. The variable `result` should
    #       be a numpy array which contains a single integer which is either 1 or 0
    with torch.no_grad():
        prediction = model.forward(data)

#     result = round(prediction)

    result = np.round(prediction.numpy())
    #     result=int(result)

    return int(result)
Esempio n. 4
0
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')

    # TODO: Process input_data so that it is ready to be sent to our model.
    #       You should produce two variables:
    #         data_X   - A sequence of length 500 which represents the converted review
    #         data_len - The length of the review

    # converts string into list of stemmed words with html syntax removed
    clean_list = review_to_words(input_data)

    #encodes list of words into a list of numbers, padded with zeros to standardize length

    data_X, data_len = convert_and_pad(model.word_dict, clean_list)

    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)

    data = torch.from_numpy(data_pack)
    data = data.to(device)

    # Make sure to put the model into evaluation mode
    model.eval()

    # TODO: Compute the result of applying the model to the input data. The variable `result` should
    #       be a numpy array which contains a single integer which is either 1 or 0

    output = np.round(model(data).detach().numpy()).astype(np.int)

    return output
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')

    # TODO: Process input_data so that it is ready to be sent to our model.
    #       You should produce two variables:
    #         data_X   - A sequence of length 500 which represents the converted review
    #         data_len - The length of the review

    data_X = None
    data_len = None
    input_data_words = review_to_words(input_data)
    data_X, data_len = convert_and_pad(model.word_dict, input_data_words)
    data_pack = np.hstack((data_len, data_X))

    data_pack = data_pack.reshape(1, -1)

    data = torch.from_numpy(data_pack)
    data = data.to(device)

    # Make sure to put the model into evaluation mode
    model.eval()

    # TODO: Compute the result of applying the model to the input data. The variable `result` should
    #       be a numpy array which contains a single integer which is either 1 or 0

    with torch.no_grad():
        output = model.forward(data)

    result = np.round(output.numpy())
    print(result)

    return result
Esempio n. 6
0
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')

    # TODO: Process input_data so that it is ready to be sent to our model.
    #       You should produce two variables:
    #         data_X   - A sequence of length 500 which represents the converted review
    #         data_len - The length of the review

    data_X = review_to_words(input_data)
    data_X, data_len = convert_and_pad(model.word_dict, data_X)

    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)

    data = torch.from_numpy(data_pack)
    data = data.to(device)

    # Make sure to put the model into evaluation mode
    model.eval()

    # TODO: Compute the result of applying the model to the input data. The variable `result` should
    #       be a numpy array which contains a single integer which is either 1 or 0
    # ref https://machinelearningmastery.com/pytorch-tutorial-develop-deep-learning-models/
    # make prediction
    yhat = model(data)
    # retrieve numpy array
    yhat = yhat.detach().numpy()
    result = np.round(yhat).astype(int)

    return result
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')
    
    # TODO: Process input_data so that it is ready to be sent to our model.
    #       You should produce two variables:
    #         data_X   - A sequence of length 500 which represents the converted review
    #         data_len - The length of the review
    
    input_data = review_to_words(input_data)
    data_X, data_len = convert_and_pad(model.word_dict, input_data)
    
    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)
    
    data = torch.from_numpy(data_pack)
    data = data.to(device)

    model.eval()

    # TODO: Compute the result of applying the model to the input data. The variable `result` should
    #       be a numpy array which contains a single integer which is either 1 or 0

    # Note: RuntimeError: Can't call numpy() on Variable that requires grad. Use var.detach().numpy() instead.
    # We need to remove grad in order to return a numpy array
    with torch.no_grad():
        result = model(data).numpy()

    result = np.round(result)
        
    return result
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')

    # TODO: Process input_data so that it is ready to be sent to our model.
    #       You should produce two variables:
    #         data_X   - A sequence of length 500 which represents the converted review
    #         data_len - The length of the review

    data_X, data_len = convert_and_pad(model.word_dict,
                                       review_to_words(input_data))

    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)

    data = torch.from_numpy(data_pack)
    data = data.to(device)

    # Make sure to put the model into evaluation mode
    model.eval()

    # TODO: Compute the result of applying the model to the input data. The variable `result` should
    #       be a numpy array which contains a single integer which is either 1 or 0
    #basically fit the model hear  optimise, forward pass, backward pass, loss function and then convert the result into numpy
    with torch.no_grad(
    ):  #Turn off gradient calculation for weight.... Makes process faster
        output = model.forward(data)

    result = np.round(output.numpy())

    return result
Esempio n. 9
0
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')

    data_X, data_len = convert_and_pad(model.word_dict,
                                       review_to_words(input_data))

    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)

    data = torch.from_numpy(data_pack)
    data = data.to(device)

    # Make sure to put the model into evaluation mode
    model.eval()
    with torch.no_grad():
        result = np.round(model.forward(data).numpy())
    return result
Esempio n. 10
0
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')

    # TODO: Process input_data so that it is ready to be sent to our model.
    words = review_to_words(input_data)
    data_X, data_len = convert_and_pad(model.word_dict, words)
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)
    data = torch.from_numpy(data_pack)
    data = data.to(device)
    model.eval()
    # TODO: Compute the result of applying the model to the input data. The variable `result` should
    with torch.no_grad():
        output = model.forward(data)
        output = output.to('cpu')

    result = np.round(output.numpy())

    return result
Esempio n. 11
0
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')

    to_words = review_to_words(input_data)
    data_X, data_len = convert_and_pad(model.word_dict, to_words)

    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)

    data = torch.from_numpy(data_pack)
    data = data.to(device)

    model.eval()
    with torch.no_grad():
        output = model.forward(data)

    result = np.round(output.numpy())

    return result
Esempio n. 12
0
def predict_fn(input_data, model):
    print('Inferring sentiment of input data.')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    if model.word_dict is None:
        raise Exception('Model has not been loaded properly, no word_dict.')
    
    # TODO: Process input_data so that it is ready to be sent to our model.
    #       You should produce two variables:
    #         data_X   - A sequence of length 500 which represents the converted review
    #         data_len - The length of the review
    
    # Do we need the BeautifulSoup processing?
    def review_to_words(review):
        nltk.download("stopwords", quiet=True)
        stemmer = PorterStemmer()

        text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags
        text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case
        words = text.split() # Split string into words
        words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords
        words = [PorterStemmer().stem(w) for w in words] # stem

        return words
    
    words = review_to_words(input_data)

    def convert_and_pad(word_dict, sentence, pad=500):
        NOWORD = 0 # We will use 0 to represent the 'no word' category
        INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict

        working_sentence = [NOWORD] * pad

        for word_index, word in enumerate(sentence[:pad]):
            if word in word_dict:
                working_sentence[word_index] = word_dict[word]
            else:
                working_sentence[word_index] = INFREQ

        return working_sentence, min(len(sentence), pad)

    def convert_and_pad_data(word_dict, data, pad=500):
        result = []
        lengths = []

        for sentence in data:
            converted, leng = convert_and_pad(word_dict, sentence, pad)
            result.append(converted)
            lengths.append(leng)

        return np.array(result), np.array(lengths)
    
    data_X, data_len = convert_and_pad(model.word_dict, words)

    # data_X = None
    # data_len = None

    # Using data_X and data_len we construct an appropriate input tensor. Remember
    # that our model expects input data of the form 'len, review[500]'.
    data_pack = np.hstack((data_len, data_X))
    data_pack = data_pack.reshape(1, -1)
    
    data = torch.from_numpy(data_pack)
    data = data.to(device)

    # Make sure to put the model into evaluation mode
    model.eval()

    # TODO: Compute the result of applying the model to the input data. The variable `result` should
    #       be a numpy array which contains a single integer which is either 1 or 0

    result = model(data).detach().numpy()

    return result