Ejemplo n.º 1
0
def inference_coco(encoder_file: str, decoder_file: str, embed_size: int,
                   hidden_size: int, from_cpu: bool) -> None:
    """
    Displays an original image from coco test dataset and prints its associated caption.

    encoder_file:   Name of the encoder to load.
    decoder_file:   Name of the decoder to load.
    embed_size:     Word embedding size for the encoder.
    hidden_size:    Hidden layer of the LSTM size.
    from_cpu:       Whether the model has been saved on CPU.
    """
    # Define transform
    transform_test = transforms.Compose([
        transforms.Resize(256),  # smaller edge of image resized to 256
        transforms.RandomCrop(224),  # get 224x224 crop from random location
        transforms.ToTensor(),  # convert the PIL Image to a tensor
        transforms.Normalize(
            (0.485, 0.456, 0.406),  # normalize image for pre-trained model
            (0.229, 0.224, 0.225))
    ])

    # Device to use fo inference
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Create the data loader.
    data_loader = get_loader(transform=transform_test, mode='test')

    # Obtain sample image
    _, image = next(iter(data_loader))

    # The size of the vocabulary.
    vocab_size = len(data_loader.dataset.vocab)

    # Initialize the encoder and decoder, and set each to inference mode.
    encoder = EncoderCNN(embed_size)
    encoder.eval()
    decoder = DecoderRNN(embed_size, hidden_size, vocab_size)
    decoder.eval()

    # Load the trained weights.
    if from_cpu:
        encoder.load_state_dict(
            torch.load(os.path.join('./models', encoder_file),
                       map_location='cpu'))
        decoder.load_state_dict(
            torch.load(os.path.join('./models', decoder_file),
                       map_location='cpu'))
    else:
        encoder.load_state_dict(
            torch.load(os.path.join('./models', encoder_file)))
        decoder.load_state_dict(
            torch.load(os.path.join('./models', decoder_file)))

    # Move models to GPU if CUDA is available.
    encoder.to(device)
    decoder.to(device)

    get_prediction(encoder, decoder, data_loader, device)
def upload_file():
    if request.method == 'POST':

        # check if the post request has the file part
        if 'file' not in request.files:
            flash('No file part')
            return redirect(request.url)
        file = request.files['file']

        # if user does not select file, browser also
        # submit an empty part without filename
        if file.filename == '':
            flash('No selected file')
            return redirect(request.url)

        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            file.save(filepath)

            # Upload file to cloud storage
            upload_cloud(bucket_name, filename, filepath)

            # Open image file and convert to required formats
            img, upl_img = convert_img(file)

            # Get prediction from AutoML model
            prediction = get_prediction(img, project_id, model_id)

            # Convert prediction received from model into readable results
            response = lbl_score(prediction)

            return render_template('predict.html', data=response, img=upl_img)
    return render_template('index.html')
Ejemplo n.º 3
0
 def decisionTreeClassifier(self):
     clf = tree.DecisionTreeClassifier(max_depth=5)
     clf.fit(self.x_train, self.y_train)
     #prediction = clf.predict(x_test)
     proba = clf.predict_proba(self.x_test)
     predictions = get_prediction(proba, clf.classes_)
     return predictions
Ejemplo n.º 4
0
def instance_bbox_api(model,
                      img_path,
                      cat_names,
                      threshold=0.5,
                      rect_th=3,
                      text_size=1,
                      text_th=2):
    boxes, pred_cls, pred_id = utils.get_prediction(model, img_path, cat_names,
                                                    threshold)
    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    for i in range(len(boxes)):
        cv2.rectangle(img,
                      boxes[i][0],
                      boxes[i][1],
                      color=(0, 255, 0),
                      thickness=rect_th)
        cv2.putText(img,
                    pred_cls[i],
                    boxes[i][0],
                    cv2.FONT_HERSHEY_SIMPLEX,
                    text_size, (0, 255, 0),
                    thickness=text_th)
    if is_interactive:
        matplotlib.use('TkAgg')
        plt.figure(figsize=(20, 30))
        plt.imshow(img)
        plt.xticks([])
        plt.yticks([])
        plt.show(block=True)
    save_annos(boxes, pred_cls, pred_id, img_path, img)
Ejemplo n.º 5
0
def predict_loan():
    appincome = int(request.form['appincome'])
    coappincome = int(request.form['coappincome'])
    loanamount = int(request.form['loanamount'])
    loanterm = int(request.form['loanterm'])
    gender = request.form['gender']
    married = request.form['married']
    education = request.form['education']
    self_employed = request.form['self_employed']
    dependents = request.form['dependents']
    credit_history = int(request.form['credit_history'])
    property_area = request.form['property_area']
    pred = utils.get_prediction(gender, married, dependents, education,
                                self_employed, appincome, coappincome,
                                loanamount, loanterm, credit_history,
                                property_area)
    if pred == 1:
        res = 'Approved'
        alert = 'alert alert-success'
    else:
        res = 'Rejected'
        alert = 'alert alert-danger'
    response = jsonify({'result': res, 'alert': alert})
    response.headers.add('Access-Control-Allow-Origin', '*')
    print(response)
    return response
Ejemplo n.º 6
0
def predict():
    inputs = request.get_json(force=True)
    logging.warn(inputs)
    inputs = mapValues(inputs)
    logging.warn(inputs)
    prediction = get_prediction(list(inputs.values()))
    logging.warn(prediction)
    output = prediction[0][1]
    return (jsonify(output))
def house_price():
    CHAS = int(request.form['CHAS'])
    RM = int(request.form['RM'])
    PTRATIO = int(request.form['PTRATIO'])

    response = jsonify(
        {'estimated_price': utils.get_prediction(CHAS, RM, PTRATIO)})

    return response
Ejemplo n.º 8
0
    def naiveBayesClassifier(self):
        clf = MultinomialNB()
        clf.fit(self.x_train, self.y_train)  ####

        #prediction = clf.predict(self.x_test)
        proba = clf.predict_proba(self.x_test)
        predictions = get_prediction(proba, clf.classes_)

        return predictions
Ejemplo n.º 9
0
def get_accuracy(output, targets, prob=True):
    """ Get accuracy given output and targets
    """
    pred, _ = get_prediction(output, prob)
    cnt = 0
    for batch_ind, target in enumerate(targets):
        target = [v for v in target]
        if target == pred[batch_ind]:
            cnt += 1
    return float(cnt) / len(targets)
Ejemplo n.º 10
0
    def recognize(self):
        image_to_be_saved = cv2.resize(self.__mask, (50, 50))
        cv2.imwrite(constant.SAVED_IMAGES_DIRECTORY + 'predict.JPG',
                    image_to_be_saved)

        test_image = image.load_img(constant.SAVED_IMAGES_DIRECTORY +
                                    'predict.JPG',
                                    color_mode="grayscale")

        current_result = ut.get_prediction(test_image, self.__classifier)

        return current_result
Ejemplo n.º 11
0
def seed_classification():
    # Get the data and prediction from model
    names = 'area,perimeter,kernel_length,kernel_width,asymmetry_coef,length_of_groove'.split(
        ',')
    area, perimeter, kl, kw, ac, gl = [float(request.POST[i]) for i in names]
    compact = 4 * math.pi * area / (perimeter**2)
    feature = [area, perimeter, compact, kl, kw, ac, gl]
    pred, proba = get_prediction(feature)

    data = dict(seed_label=pred, confidence=proba)

    html = render('seed.html', data)
    return html
def predict():
    if request.method == 'POST':
        file = request.files['file']
        img_bytes = file.read()
        class_name = get_prediction(model, image_bytes=img_bytes)
        return jsonify(
            {
                "results": [{
                    'model': 'breed',
                    'prediction': class_name
                }],
                "image": base64.b64encode(img_bytes).decode("utf-8"),
                "status": "SUCCEEDED",
            }, )
Ejemplo n.º 13
0
def predict():
    if request.method == 'GET':
        Pclass = request.args.get('Pclass')
        Age = request.args.get('Age')
        Sex = request.args.get('Sex')
        Parch = request.args.get('Parch')
        raw_data = pd.DataFrame({'Pclass': [Pclass], 'Age': [Age], 'Sex':[Sex], 'Parch':[Parch]})
        transf_data = transform_data(raw_data)
        prediction = get_prediction(transf_data)
        prediction = prediction[0][1].item()
        # We take the first value of our predictions, representing the probability not to churn.
        data = {'prediction': prediction}
        return jsonify(data)
    else:
        return jsonify({'error': 'Only GET requests possible'})
Ejemplo n.º 14
0
def index(request):
    data = {}

    if request.method == 'GET':
        data[
            'message'] = 'you are not allowed to access this resource by this request method'
        return jsonify(data), 405
    elif request.method == 'POST':
        img_data = request.files.get('image')

        if img_data:
            img_filename = secure_filename(img_data.filename)
            img_data.save('/tmp/' + img_filename)

            CATEGORIES = ['nails', 'notnails']
            path = '/tmp/' + img_filename
            img = image.load_img(path, target_size=(256, 256))
            img = np.array(img)
            img = img / 255.
            xy = img
            xy = np.expand_dims(img, axis=0)

            images = np.vstack([xy])
            prediction = model.predict(images, batch_size=32)

            # check for nails or not
            res = "{} | {:2.0f}%".format(CATEGORIES[int(prediction[0][0])],
                                         100 * np.max(prediction))

            res = res.split(' | ')
            name = res[0]
            percent = res[1]

            if name == "notnails":
                data['is_nail'] = False
                data['is_disease_match'] = False
                data['name'] = ""
                data['percent'] = ""
                data['desc'] = ""
                data['treat'] = ""

            elif name == "nails":
                data = utils.get_prediction(path)

        else:
            data['message'] = 'please attach image file in your request'

        return jsonify(data)
Ejemplo n.º 15
0
def api():
    global ips
    filestr = request.files['file'].read()
    model_ver = request.form['model_ver']
    print(model_ver)
    img = decodeImage(filestr)
    img = load_preprocess_image(img)
    print(img.shape)
    preds = get_prediction(img=img, ver=model_ver, ips=ips)
    preds = np.array(preds['predictions'][0])
    idx = np.argsort(preds)[::-1][:3]
    top_scores = preds[idx]
    top_classes = class_names[idx]
    results = {'classes': list(top_classes), 'scores': list(top_scores)}
    return jsonify(
        results
    )  #"Image received!"  #(jsonify(list(zip(class_names, scores)))) #  # jsonify(preds)
Ejemplo n.º 16
0
def predict():
    project_id = 'stable-hybrid-249623'
    model_id = 'ICN4772510494057073039'
    message = request.get_json(force=True)
    encoded = message['image']
    decoded = base64.b64decode(encoded)
    prediction = get_prediction(decoded, project_id,  model_id)
    pred_label = prediction.payload[0].display_name
    lbl = get_label(pred_label)
    score = prediction.payload[0].classification.score

    response = {
        'prediction' : {
        'label' : lbl,
        'score' : score
        }
    }
    return jsonify(response)
Ejemplo n.º 17
0
def get_options(db, student, text):

    # Question
    text = get_text(text)
    if text == 'menu':
        return [ OPTIONS ], 5
    elif text == 'more':
        return [ MORE_OPTIONS ], 5
    elif text == 'progress':
        # Performance prediction
        prediction = get_prediction(db, student['username'])
        # Work
        work = get_work(db, student['username'])
        # Message
        grade = get_grade_message(prediction['prediction'], work['cum_programs_W11'])
        response = [ grade, REMINDER % _DATE, LAB ]
        # Coverage
        if work['coverage_W11']:
            response.append( COVERAGE_YES )
        return response, 5
    elif text == 'material':
        # Recommendation
        recommendation = get_recommendation(db, student['username'])
        # Message
        if 'labsheet' in recommendation:
            lab = LABSHEET_URI % (recommendation['labsheet']) 
            labsheet = MATERIAL_LAB % lab
            resources = MATERIAL_RES % ', '.join(LABSHEET_URI % (resource) for resource in recommendation['resources'])
            return [ labsheet, resources ], 5
        else:
            return [ NO_MATERIAL ], 5
    elif text == 'program':
        # Random gist
        snippet, desc =  gist()
        program = PROGRAM % (desc)
        return [ program, snippet ], 5
    elif text == 'terms':
        return [ TERMS ], 5
    elif text == 'help':
        return [ HELP ], 5
    elif text == 'opt-out':
        return [ OPT_OUT ], 6
    else:
        return [ NOT_FOUND ], 5
Ejemplo n.º 18
0
def detect_landmarks(image_inp):
	#image = cv2.imread(image_path)
	img_modif = image_inp
	image = imutils.resize(image_inp, width=512)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	rects = detector(gray, 1)
	# left_eye = [37, 38, 40, 41]
	# right_eye = [43, 44, 46, 47]
	for (i, rect) in enumerate(rects):

		shape = predictor(gray, rect)
		shape = shape_to_np(shape)

		# print(f"Difference between 40 and 37 in y is {shape[40][1] - shape[37][1]}")
		# print(f"Difference between 41 and 38 in y is {shape[41][1] - shape[38][1]}")
		# print(f"Difference between 46 and 43 in y is {shape[46][1] - shape[43][1]}")
		# print(f"Difference between 47 and 44 in y is {shape[47][1] - shape[44][1]}")
		med = (shape[40][1] - shape[37][1] + shape[41][1] - shape[38][1] + shape[46][1] - shape[43][1] + shape[47][1] - shape[44][1])/4
		(x, y, w, h) = rect_to_bb(rect)
		percentage = round((100*med)/h, 2)
		if percentage > 4:
			eyes = "Eyes opened"
		else:
			eyes = "Eyes closed"

		face = img_modif[y:y+h, x:x+w]
		face = cv2.resize(face, (512, 512))
		result = get_prediction(net, face, device).cpu()
		emotion = emotions[int(torch.argmax(result))]
		confidence = round(result.numpy()[0][int(torch.argmax(result))]*100, 2)
		# print(emotion, confidence)

		cv2.rectangle(img_modif, (x, y), (x + w, y + h), (0, 255, 0), 2)

		cv2.putText(img_modif, f"#{i + 1} -- {eyes} -- {emotion} {confidence}%", (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
		for (x, y) in shape:
			cv2.circle(img_modif, (x, y), 1, (0, 0, 255), -2)
	#cv2.imshow("Output", image)
	#cv2.waitKey(0)
	return img_modif
Ejemplo n.º 19
0
def predict():
    file = request.files['file']
    result = get_prediction(file.read())
    response = jsonify(result)
    response.headers.add('Access-Control-Allow-Origin', '*')
    return response
Ejemplo n.º 20
0
    progress = st.text("Crunching Image")
    my_bar = st.progress(0)
    i = 0

    # Reading the uploaded image
    image = Image.open(io.BytesIO(uploaded_file.read()))
    st.image(np.array(
        Image.fromarray(np.array(image)).resize((700, 400), Image.ANTIALIAS)),
             width=None)
    my_bar.progress(i + 40)

    # Cleaning the image
    image = clean_image(image)

    # Making the predictions
    predictions, predictions_arr = get_prediction(model, image)
    my_bar.progress(i + 30)

    # Making the results
    result = make_results(predictions, predictions_arr)

    # Removing progress bar and text after prediction done
    my_bar.progress(i + 30)
    progress.empty()
    i = 0
    my_bar.empty()

    # Show the results
    st.write(
        f"The plant {result['status']} with {result['prediction']} prediction."
    )
Ejemplo n.º 21
0
def play_qa_readable(args, data, DNC):
    criterion = nn.CrossEntropyLoss()
    cum_correct, cum_total = [], []

    for trial in range(args.iters):
        phase_masks = data.make_new_problem()
        n_total, n_correct, loss = 0, 0, 0
        dnc_state = DNC.init_state(grad=False)

        for phase_idx in phase_masks:
            if phase_idx == 0 or phase_idx == 1:

                inputs, msk = data.getitem()
                print(data.human_readable(inputs, msk))

                inputs = Variable(torch.cat([msk, inputs], 1))
                logits, dnc_state = DNC(inputs, dnc_state)
            else:
                final_moves = data.get_actions(mode='one')
                if final_moves == []:
                    break
                data.send_action(final_moves[0])
                mask = data.phase_oh[2].unsqueeze(0)
                vec = data.vec_to_ix(final_moves[0])
                print('\n')
                print(data.human_readable(vec, mask))

                inputs2 = Variable(torch.cat([mask, vec], 1))
                logits, dnc_state = DNC(inputs2, dnc_state)

                for _ in range(args.num_tests):
                    # ask where is ---?

                    masked_input, mask_chunk, ground_truth = data.masked_input(
                    )
                    print("Context:", data.human_readable(ground_truth))
                    print("Q:")

                    logits, dnc_state = DNC(Variable(masked_input), dnc_state)
                    expanded_logits = data.ix_input_to_ixs(logits)

                    #losses
                    lstep = l.action_loss(expanded_logits,
                                          ground_truth,
                                          criterion,
                                          log=True)

                    #update counters
                    prediction = u.get_prediction(expanded_logits, [3, 4])
                    print("A:")
                    n_total, n_correct = tick(n_total, n_correct, mask_chunk,
                                              prediction)
                    print("correct:", mask_chunk == prediction)

        cum_total.append(n_total)
        cum_correct.append(n_correct)
        sl.writer.add_scalar('recall.pct_correct', n_correct / n_total,
                             sl.global_step)
        print(
            "trial: {}, step:{}, accy {:0.4f}, cum_score {:0.4f}, loss: {:0.4f}"
            .format(trial, sl.global_step, n_correct / n_total,
                    u.running_avg(cum_correct, cum_total), loss.data[0]))
    return DNC, dnc_state, u.running_avg(cum_correct, cum_total)
Ejemplo n.º 22
0
# Specify values for embed_size and hidden_size - we use the same values as in training step
embed_size = 256
hidden_size = 512

# Get the vocabulary and its size
vocab = data_loader.dataset.vocab
vocab_size = len(vocab)

# Initialize the encoder and decoder, and set each to inference mode
encoder = EncoderCNN(embed_size)
encoder.eval()
decoder = DecoderRNN(embed_size, hidden_size, vocab_size)
decoder.eval()

# Load the pre-trained weights
encoder.load_state_dict(checkpoint['encoder'])
decoder.load_state_dict(checkpoint['decoder'])

# Move models to GPU if CUDA is available.
if torch.cuda.is_available():
    encoder.cuda()
    decoder.cuda()

# In[5]:

x = get_prediction(data_loader, encoder, decoder, vocab)

# In[6]:

print(x)
Ejemplo n.º 23
0
def train_plan(args, data, DNC, lstm_state, optimizer):
    """
        Things to test after some iterations:
         - on planning phase and on

         with goals - chose a goal and work toward that
        :param args:
        :return:
        """
    criterion = nn.CrossEntropyLoss().cuda(
    ) if args.cuda is True else nn.CrossEntropyLoss()
    cum_correct, cum_total, prob_times, n_success = [], [], [], 0
    penalty = 1.1

    for trial in range(args.iters):
        start_prob = time.time()
        phase_masks = data.make_new_problem()
        n_total, n_correct, prev_action, loss, stats = 0, 0, None, 0, []
        dnc_state = DNC.init_state(grad=False)
        lstm_state = DNC.init_rnn(grad=False)  # lstm_state,
        optimizer.zero_grad()

        for phase_idx in phase_masks:

            if phase_idx == 0 or phase_idx == 1:
                inputs = _variable(data.getitem_combined())
                logits, dnc_state, lstm_state = DNC(inputs, lstm_state,
                                                    dnc_state)
                _, prev_action = data.strip_ix_mask(logits)

            elif phase_idx == 2:
                mask = _variable(data.getmask())
                inputs = torch.cat([mask, prev_action], 1)
                logits, dnc_state, lstm_state = DNC(inputs, lstm_state,
                                                    dnc_state)
                _, prev_action = data.strip_ix_mask(logits)

            else:
                # sample from best moves
                actions_star, all_actions = data.get_actions(mode='both')
                if not actions_star:
                    break
                if args.zero_at == 'step':
                    optimizer.zero_grad()

                mask = data.getmask()
                prev_action = prev_action.cuda(
                ) if args.cuda is True else prev_action
                pr = u.depackage(prev_action)

                final_inputs = _variable(torch.cat([mask, pr], 1))
                logits, dnc_state, lstm_state = DNC(final_inputs, lstm_state,
                                                    dnc_state)
                exp_logits = data.ix_input_to_ixs(logits)

                guided = random.random() < args.beta
                # thing 1
                if guided:  # guided loss
                    final_action, lstep = L.naive_loss(exp_logits,
                                                       actions_star,
                                                       criterion,
                                                       log=True)
                else:  # pick own move
                    final_action, lstep = L.naive_loss(exp_logits,
                                                       all_actions,
                                                       criterion,
                                                       log=True)

                # penalty for todo tests this !!!!
                action_own = u.get_prediction(exp_logits)
                if args.penalty and not [tuple(flat(t)) for t in all_actions]:
                    final_loss = lstep * _variable([args.penalty])
                else:
                    final_loss = lstep

                if args.opt_at == 'problem':
                    loss += final_loss
                else:

                    final_loss.backward(retain_graph=args.ret_graph)
                    if args.clip:
                        torch.nn.utils.clip_grad_norm(DNC.parameters(),
                                                      args.clip)
                    optimizer.step()
                    loss = lstep

                data.send_action(final_action)

                if (trial + 1) % args.show_details == 0:
                    action_accs = u.human_readable_res(data, all_actions,
                                                       actions_star,
                                                       action_own, guided,
                                                       lstep.data[0])
                    stats.append(action_accs)
                n_total, _ = tick(n_total, n_correct, action_own,
                                  flat(final_action))
                n_correct += 1 if action_own in [
                    tuple(flat(t)) for t in actions_star
                ] else 0
                prev_action = data.vec_to_ix(final_action)

        if stats:
            arr = np.array(stats)
            correct = len([
                1 for i in list(arr.sum(axis=1)) if i == len(stats[0])
            ]) / len(stats)
            sl.log_acc(list(arr.mean(axis=0)), correct)

        if args.opt_at == 'problem':
            floss = loss / n_total
            floss.backward(retain_graph=args.ret_graph)
            if args.clip:
                torch.nn.utils.clip_grad_norm(DNC.parameters(), args.clip)
            optimizer.step()
            sl.writer.add_scalar('losses.end', floss.data[0], sl.global_step)

        n_success += 1 if n_correct / n_total > args.passing else 0
        cum_total.append(n_total)
        cum_correct.append(n_correct)
        sl.add_scalar('recall.pct_correct', n_correct / n_total,
                      sl.global_step)
        print(
            "trial {}, step {} trial accy: {}/{}, {:0.2f}, running total {}/{}, running avg {:0.4f}, loss {:0.4f}  "
            .format(trial, sl.global_step, n_correct, n_total,
                    n_correct / n_total, n_success, trial,
                    running_avg(cum_correct, cum_total), loss.data[0]))
        end_prob = time.time()
        prob_times.append(start_prob - end_prob)
    print("solved {} out of {} -> {}".format(n_success, args.iters,
                                             n_success / args.iters))
    return DNC, optimizer, lstm_state, running_avg(cum_correct, cum_total)
Ejemplo n.º 24
0
def train_qa2(args, data, DNC, optimizer):
    """
        I am jacks liver. This is a sanity test

        0 - describe state.
        1 - describe goal.
        2 - do actions.
        3 - ask some questions
        :param args:
        :return:
        """
    criterion = nn.CrossEntropyLoss()
    cum_correct, cum_total = [], []

    for trial in range(args.iters):
        phase_masks = data.make_new_problem()
        n_total, n_correct, loss = 0, 0, 0
        dnc_state = DNC.init_state(grad=False)
        optimizer.zero_grad()

        for phase_idx in phase_masks:
            if phase_idx == 0 or phase_idx == 1:
                inputs = _variable(data.getitem_combined())
                logits, dnc_state = DNC(inputs, dnc_state)
            else:
                final_moves = data.get_actions(mode='one')
                if final_moves == []:
                    break
                data.send_action(final_moves[0])
                mask = data.phase_oh[2].unsqueeze(0)
                inputs2 = _variable(
                    torch.cat([mask, data.vec_to_ix(final_moves[0])], 1))
                logits, dnc_state = DNC(inputs2, dnc_state)

                for _ in range(args.num_tests):
                    # ask where is ---?
                    if args.zero_at == 'step':
                        optimizer.zero_grad()
                    masked_input, mask_chunk, ground_truth = data.masked_input(
                    )
                    logits, dnc_state = DNC(_variable(masked_input), dnc_state)
                    expanded_logits = data.ix_input_to_ixs(logits)

                    # losses
                    lstep = L.action_loss(expanded_logits,
                                          ground_truth,
                                          criterion,
                                          log=True)
                    if args.opt_at == 'problem':
                        loss += lstep
                    else:
                        lstep.backward(retain_graph=args.ret_graph)
                        optimizer.step()
                        loss = lstep

                    # update counters
                    prediction = u.get_prediction(expanded_logits, [3, 4])
                    n_total, n_correct = tick(n_total, n_correct, mask_chunk,
                                              prediction)

        if args.opt_at == 'problem':
            loss.backward(retain_graph=args.ret_graph)
            optimizer.step()
            sl.writer.add_scalar('losses.end', loss.data[0], sl.global_step)

        cum_total.append(n_total)
        cum_correct.append(n_correct)
        sl.writer.add_scalar('recall.pct_correct', n_correct / n_total,
                             sl.global_step)
        print(
            "trial: {}, step:{}, accy {:0.4f}, cum_score {:0.4f}, loss: {:0.4f}"
            .format(trial, sl.global_step, n_correct / n_total,
                    running_avg(cum_correct, cum_total), loss.data[0]))
    return DNC, optimizer, dnc_state, running_avg(cum_correct, cum_total)
Ejemplo n.º 25
0
def test_shortest_path_planning(args, data, DNC, lstm_state):
    criterion = nn.CrossEntropyLoss().cuda(
    ) if args.cuda is True else nn.CrossEntropyLoss()
    cum_correct, cum_total, prob_times, n_success = [], [], [], 0
    action_score = 0
    goal_score = 0
    total_actions = 0
    total_problems = 2
    print("\n")
    print("Accuracy Test is started ....")
    output_dict = {}
    for i in range(total_problems):
        start_prob = time.time()
        phase_masks = data.make_new_graph()
        print("Shortest Path :: ", data.shortest_path)
        n_total, n_correct, prev_action, loss, stats = 0, 0, None, 0, []
        dnc_state = DNC.init_state(grad=False)
        lstm_state = DNC.init_rnn(grad=False)  # lstm_state,

        input_history = []
        access_output_v = []
        m = []
        rw = []
        ww = []
        l = []
        lw = []
        uuu = []

        for phase_idx in phase_masks:

            if phase_idx == 0 or phase_idx == 1:
                inputs = _variable(data.getitem_combined())
                logits, dnc_state, lstm_state = DNC(inputs, lstm_state,
                                                    dnc_state)
                _, prev_action = data.strip_ix_mask(logits)
                input_history.append(inputs.data[0].numpy().tolist())
                access_output_v.append(
                    torch.squeeze(dnc_state[0].data).numpy().tolist())
                m.append(torch.squeeze(dnc_state[1].data).numpy().tolist())
                rw.append(torch.squeeze(dnc_state[2].data).numpy().tolist())
                ww.append(torch.squeeze(dnc_state[3].data).numpy().tolist())
                l.append(torch.squeeze(dnc_state[4].data).numpy().tolist())
                lw.append(torch.squeeze(dnc_state[5].data).numpy().tolist())
                uuu.append(torch.squeeze(dnc_state[6].data).numpy().tolist())

            elif phase_idx == 2:
                mask = _variable(data.getmask())
                inputs = torch.cat([mask, prev_action], 1)
                logits, dnc_state, lstm_state = DNC(inputs, lstm_state,
                                                    dnc_state)
                _, prev_action = data.strip_ix_mask(logits)
                input_history.append(inputs.data[0].numpy().tolist())
                access_output_v.append(
                    torch.squeeze(dnc_state[0].data).numpy().tolist())
                m.append(torch.squeeze(dnc_state[1].data).numpy().tolist())
                rw.append(torch.squeeze(dnc_state[2].data).numpy().tolist())
                ww.append(torch.squeeze(dnc_state[3].data).numpy().tolist())
                l.append(torch.squeeze(dnc_state[4].data).numpy().tolist())
                lw.append(torch.squeeze(dnc_state[5].data).numpy().tolist())
                uuu.append(torch.squeeze(dnc_state[6].data).numpy().tolist())

            else:
                best_nodes, all_nodes = data.get_actions()
                if not best_nodes:
                    break
                mask = data.getmask()
                prev_action = prev_action.cuda(
                ) if args.cuda is True else prev_action
                pr = u.depackage(prev_action)

                final_inputs = _variable(torch.cat([mask, pr], 1))
                logits, dnc_state, lstm_state = DNC(final_inputs, lstm_state,
                                                    dnc_state)
                input_history.append(final_inputs.data[0].numpy().tolist())
                access_output_v.append(
                    torch.squeeze(dnc_state[0].data).numpy().tolist())
                m.append(torch.squeeze(dnc_state[1].data).numpy().tolist())
                rw.append(torch.squeeze(dnc_state[2].data).numpy().tolist())
                ww.append(torch.squeeze(dnc_state[3].data).numpy().tolist())
                l.append(torch.squeeze(dnc_state[4].data).numpy().tolist())
                lw.append(torch.squeeze(dnc_state[5].data).numpy().tolist())
                uuu.append(torch.squeeze(dnc_state[6].data).numpy().tolist())
                exp_logits = data.ix_input_to_ixs(logits)
                current_state = data.STATE
                guided = random.random() < args.beta
                final_action, lstep = L.naive_loss_for_shortest_path(
                    exp_logits, all_nodes, current_state, criterion, log=True)
                print(
                    str(data.current_index) + " index, from: " +
                    str(current_state) + ", to: " + str(final_action))
                if final_action in best_nodes:
                    action_score = action_score + 1
                total_actions = total_actions + 1
                action_own = u.get_prediction(exp_logits)
                data.STATE = final_action
                prev_action = torch.from_numpy(
                    np.array(data.vec_to_ix([current_state,
                                             final_action])).reshape(
                                                 (1, 61))).float()
        if data.goal == final_action:
            goal_score = goal_score + 1
        end_prob = time.time()
        prob_times.append(start_prob - end_prob)
        p_output_dict = {}
        p_output_dict["inputs"] = input_history
        p_output_dict["access_outputs"] = access_output_v
        p_output_dict["m"] = m
        p_output_dict["rw"] = rw
        p_output_dict["ww"] = ww
        p_output_dict["l"] = l
        p_output_dict["lw"] = lw
        p_output_dict["u"] = uuu
        p_output_dict["phases"] = torch.squeeze(phase_masks).numpy().tolist()
        output_dict[i] = p_output_dict
    action_score = float(action_score) / float(total_actions)
    goal_score = float(goal_score) / float(total_problems)
    print("Accuracy Test is ended .... " + "action score: " +
          str(action_score) + ", goal score: " + str(goal_score))
    print("\n")
    with open("output.json", "w") as f:
        json.dump(output_dict,
                  f,
                  ensure_ascii=False,
                  indent=4,
                  sort_keys=True,
                  separators=(',', ': '))
    return action_score, goal_score
Ejemplo n.º 26
0
"""

from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals


from lstm_char_model import LSTMCharModel
from utils import load_squad, get_prediction
from rouge import Rouge


x, y, ids = load_squad()

x_train = [x[0][:6400], x[1][:6400]]
y_train = [y[0][:6400], y[1][:6400]]
id_train = ids[:6400]
x_dev = [x[0][6400:7200], x[1][6400:7200]]
y_dev = [y[0][6400:7200], y[1][6400:7200]]
id_dev = ids[6400:7200]
x_test = [x[0][7200:8100], x[1][7200:8100]]
y_test = [y[0][7200:8100], y[1][7200:8100]]
ids_test = ids[7200:8100]


lstm = LSTMCharModel()
lstm.load()

y_truth, y_pred = get_prediction(lstm, x_test, y_test)

Ejemplo n.º 27
0
def train_shortest_path_plan(args, data, DNC, lstm_state, optimizer):
    criterion = nn.CrossEntropyLoss().cuda(
    ) if args.cuda is True else nn.CrossEntropyLoss()
    cum_correct, cum_total, prob_times, n_success = [], [], [], 0
    penalty = 1.1

    for trial in range(args.iters):
        if trial % 100 == 0:
            ___score, ___goal_score = test_shortest_path_planning(
                args, data, DNC, lstm_state)
        start_prob = time.time()
        phase_masks = data.make_new_graph()
        print("Shortest Path :: ", data.shortest_path)
        n_total, n_correct, prev_action, loss, stats = 0, 0, None, 0, []
        dnc_state = DNC.init_state(grad=False)
        lstm_state = DNC.init_rnn(grad=False)  # lstm_state,
        optimizer.zero_grad()

        for phase_idx in phase_masks:

            if phase_idx == 0 or phase_idx == 1:
                inputs = _variable(data.getitem_combined())
                logits, dnc_state, lstm_state = DNC(inputs, lstm_state,
                                                    dnc_state)
                _, prev_action = data.strip_ix_mask(logits)

            elif phase_idx == 2:
                mask = _variable(data.getmask())
                inputs = torch.cat([mask, prev_action], 1)
                logits, dnc_state, lstm_state = DNC(inputs, lstm_state,
                                                    dnc_state)
                _, prev_action = data.strip_ix_mask(logits)

            else:
                best_nodes, all_nodes = data.get_actions()
                if not best_nodes:
                    break
                if args.zero_at == 'step':
                    optimizer.zero_grad()

                mask = data.getmask()
                prev_action = prev_action.cuda(
                ) if args.cuda is True else prev_action
                # print("previous action: ", prev_action)
                pr = u.depackage(prev_action)

                final_inputs = _variable(torch.cat([mask, pr], 1))
                logits, dnc_state, lstm_state = DNC(final_inputs, lstm_state,
                                                    dnc_state)
                exp_logits = data.ix_input_to_ixs(logits)
                current_state = data.STATE
                guided = random.random() < args.beta
                sup_flag = None
                if guided:  # guided loss
                    final_action, lstep = L.naive_loss_for_shortest_path(
                        exp_logits,
                        best_nodes,
                        current_state,
                        criterion,
                        log=True)
                    sup_flag = "Yes"
                else:  # pick own move
                    final_action, lstep = L.naive_loss_for_shortest_path(
                        exp_logits,
                        all_nodes,
                        current_state,
                        criterion,
                        log=True)
                    sup_flag = "No"
                action_own = u.get_prediction(exp_logits)

                final_loss = lstep
                final_loss.backward(retain_graph=args.ret_graph)
                if args.clip:
                    torch.nn.utils.clip_grad_norm(DNC.parameters(), args.clip)
                optimizer.step()
                loss = lstep
                print(
                    "Supervised: " + sup_flag + ", " +
                    str(data.current_index) + " index, from: " +
                    str(current_state) + ", to: " + str(final_action) +
                    ", loss: ", final_loss.data[0])

                data.STATE = final_action

                prev_action = torch.from_numpy(
                    np.array(data.vec_to_ix([current_state,
                                             final_action])).reshape(
                                                 (1, 61))).float()

        #### under experiment ####
        goal_loss = L.action_loss_for_shortest_path(exp_logits,
                                                    data.goal,
                                                    current_state,
                                                    criterion,
                                                    log=True)
        goal_loss.backward(retain_graph=args.ret_graph)
        optimizer.step()
        print("Goal Loss: ", goal_loss.data[0])
        ####
        end_prob = time.time()
        prob_times.append(start_prob - end_prob)
    # print("solved {} out of {} -> {}".format(n_success, args.iters, n_success / args.iters))
    return DNC, optimizer, lstm_state, 0.  # running_avg(cum_correct, cum_total)