def auto_drive(img_name):

    res = 1 if not detect else check_position()
    if res == 1:
        # If we are in the right track
        if len(sa_lst) == len(block_lst):
            block_lst.append([])
        md_img, _ = process_image(img_name, None, False)
        pred_act = model.predict(np.array([md_img]))[0]
        print("Lft: %.2f | Fwd: %.2f | Rght: %.2f" %
              (pred_act[1], pred_act[0], pred_act[2]))
        act_i = np.argmax(pred_act)
        if block_lst:
            while pred_act[act_i] >= 0 and act_i in block_lst[-1]:
                pred_act[act_i] = -1.
                act_i = np.argmax(pred_act)
        if act_i == -1:
            block_lst.pop()
            reverse_motion()
        else:
            send_control(act_i, img_name)
        return pred_act, act_i
    elif res == -1:
        # If we cannot detect where we are
        print("Error: cannot identify position")
        return -1, -1
    else:
        # If we are outside
        try:
            reverse_motion()
        except:
            print("Error: cannot reverse an action")
Exemple #2
0
def draw_predict():
	## Get prediction steering angle
	md_img, _ = process_image(img_name, None, False)
	pred_prob = model.predict(np.array([md_img]))[0]
	pred_angle = get_steering(np.argmax(pred_prob))

	## Draw green prediction line
	x_shift = r * sin(pred_angle)
	y_shift = r * cos(pred_angle)
	cv2.line(img,(int(width / 2 - x_shift), int(height - y_shift)),(width // 2, height),(0,255,0),2)
	return pred_angle, pred_prob
def predict_animal(file):
    print("Predicting .................................")
    ar = convert_to_array(file)
    ar = ar / 255
    a = [ar]
    a = np.array(a)
    score = model.predict(a, verbose=1)
    label_index = np.argmax(score)
    acc = np.max(score)
    animal = get_animal_name(label_index)

    print("The predicted animal is a " + animal + " with accuracy =    " + str(acc))
Exemple #4
0
def draw_predict():
	## Get prediction steering angle
	md_img, _ = process_image(img_name, None, False)
	pred_angle = model.predict(np.array([md_img]))[0][0]
	if pred_angle > max_angle:
		pred_angle = max_angle
	elif pred_angle < -max_angle:
		pred_angle = -max_angle

	## Draw green prediction line
	x_shift = 1.2 * r * sin(pred_angle)
	y_shift = 1.2 * r * cos(pred_angle)
	cv2.line(img,(int(width / 2 - x_shift), int(height - y_shift)),(width // 2, height),(0,255,0),3)
	return pred_angle
Exemple #5
0
def auto_drive(img_name):

    md_img, _ = process_image(img_name, None, False)
    pred_angle = model.predict(np.array([md_img]))[0][0]
    if pred_angle >= max_angle / 2.0:
        act_i = 1
        if pred_angle > max_angle:
            pred_angle = max_angle
    elif pred_angle <= -max_angle / 2.0:
        act_i = 2
        if pred_angle < -max_angle:
            pred_angle = -max_angle
    else:
        act_i = 0
    # send_control(act_i)
    return pred_angle, act_i
Exemple #6
0
def telemetry(sid, data):
    # The current image from the center camera of the car
    img_str = data["image"]
    speed = float(data["speed"])

    # Set the throttle.
    throttle = 1.2 - (speed / target_speed)

    # read and process image
    image_bytes = BytesIO(base64.b64decode(img_str))
    image, _ = process_image(image_bytes, None, False)

    # make prediction on steering
    sa = model.predict(np.array([image]))[0][0]

    print(sa, throttle)
    send_control(sa, throttle)
def label_predict(test_data):

    okt = Okt()
    clean_test = []

    for x in tqdm(test_data):

        if type(x) == str:
            clean_test.append(
                train.preprocessing(x,
                                    okt,
                                    remove_stopwords=True,
                                    stop_words=stop_words))
        else:
            clean_test.append([])

    test_tokenizer = Tokenizer()
    test_tokenizer.fit_on_texts(clean_train_text)
    test_sequences = test_tokenizer.texts_to_sequences(clean_test)
    test_word_vocab = test_tokenizer.word_index
    MAX_SEQUENCE_LENGTH = 30

    test_inputs = pad_sequences(test_sequences,
                                maxlen=MAX_SEQUENCE_LENGTH,
                                padding='post')

    x_test = train.vectorize_sequences(test_inputs)

    predictions = model.predict(x_test)

    test_predict = []
    for i in range(len(test_data)):
        a = np.argmax(predictions[i])
        test_predict.append(a)

    PREDICT = []
    for i in range(len(test_data)):
        for j in range(len(LABEL)):
            if test_predict[i] == LABEL[j][0]:
                PREDICT.append(LABEL[j][1])

    with open('result.txt', 'w') as f:
        for line in PREDICT:
            f.write(line)
            f.write("\n")
		def loop():
			action=np.zeros(5)
			while self.control:
				#action = (self.cost_position, action, maxiter=1000, maxfun=10000)
				#action = fmin_bfgs(f=self.cost_position, x0=action, gtol=0.1, epsilon=0.001)
				#action = fmin_ncg(f=self.cost_position, fprime=None, x0=action, epsilon=0.001)
				(action, nfeval, rc) = fmin_tnc(self.cost_position, approx_grad=True, fprime=None, x0=action, epsilon=0.001)

				print action


				#exit()

				pred_y = model.predict(np.hstack([latest, action]))

				action[4] = 0
				action[0] = 0

				'''
				if len(np.zeros(action)) == 5:
					print "no data for command, doing a random"
					action = np.random.rand(5) - 0.5
					action[4] = 0
					action[0] = 0
				'''

				x = np.hstack([latest, action])
				y = applyMove(action)


				print " self.target ",  self.target
				print " x           ",  x
				print " pred dx     ",  pred_y
				print " actual      ",  y

				model.update(x,y)
plt.ylabel('dice_coef')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()

# Draw Loss
plt.plot(results.history['loss'])
plt.plot(results.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()

# Getting the predictions
predictions = model.predict([X_scan, X_seg])
predictions = predictions[:, :, :, 0]  # ditch the channel dimension
print(predictions.shape)

# Writing the results to the output file
for i in range(predictions.shape[0]):
    output_file = "./output/output" + str(i + 1)
    with open(output_file, 'w+') as f:
        matrix_to_write = predictions[i]
        np.savetxt(output_file, matrix_to_write)

# Visualize the segmentations
fig, axs = plt.subplots(1, 4, figsize=(15, 15))

for i in range(predictions.shape[0]):
    seg_to_show = predictions[i]
Exemple #10
0
# from lib code
model.layers[2].load_weights('weights/weights.h5')

anchor_audios = glob.glob('data/{}/*.wav'.format(
    sorted(os.listdir('data'))[0]))
anchor_audio = audio2vector(anchor_audios[random.randint(
    0,
    len(anchor_audios) - 1)])

threshold = 0.3
with open('weights/threshold.txt') as f:
    threshold = float(f.read())

text = 0

try:
    write('sample.wav', fs, myrecording)  # Save as WAV file

    audio = audio2vector('sample.wav')
    prediction = model.predict([[audio.reshape((20, 400, 1))],
                                [anchor_audio.reshape((20, 400, 1))]])
    prediction = prediction[0, 0]

    text = 1 if prediction < threshold else 0
    # print(threshold, prediction)

except Exception as e:
    pass

print(text, end='')
def apply_mask(img, mask_size, step, nums):
    global model_time
    width = 320
    height = 120
    mask = img.copy()
    map0 = Image.new("RGB", (width, height))
    map1 = Image.new("RGB", (width, height))
    map2 = Image.new("RGB", (width, height))
    map3 = Image.new("RGB", (width, height))
    pixels = mask.load()
    pixels0 = map0.load()
    pixels1 = map1.load()
    pixels2 = map2.load()
    pixels3 = map3.load()
    for i in range(mask_size):
        for j in range(mask_size):
            if i < height and j < width:
                pixels[j, i] = (127, 127, 127)
    for i in range(0, height, step):
        for j in range(0, width, step):
            new_img = img.copy()
            pixels = new_img.load()
            for r in range(i - mask_size // 2, i + (mask_size + 1) // 2):
                for c in range(j - mask_size // 2, j + (mask_size + 1) // 2):
                    if r >= 0 and r < height and c >= 0 and c < width:
                        pixels[c, r] = (127, 127, 127)
            model_start = datetime.datetime.now()
            aimage = img_to_array(new_img)
            aimage = aimage.astype(np.float32) / 255
            aimage = aimage - 0.5
            res = model.predict(np.array([aimage]))[0]
            model_time += datetime.datetime.now() - model_start
            # print(res)
            for r in range(i, min(i + step, height)):
                for c in range(j, min(j + step, width)):
                    pixels0[c, r] = (int(
                        (1 - res[0]) * 255), 0, int(res[0] * 255))
                    pixels1[c, r] = (int(
                        (1 - res[1]) * 255), 0, int(res[1] * 255))
                    pixels2[c, r] = (int(
                        (1 - res[2]) * 255), 0, int(res[2] * 255))
                    pixels3[c, r] = (int(
                        (1 - res[3]) * 255), 0, int(res[3] * 255))
    draw0 = ImageDraw.Draw(map0)
    draw1 = ImageDraw.Draw(map1)
    draw2 = ImageDraw.Draw(map2)
    draw3 = ImageDraw.Draw(map3)
    draw = ImageDraw.Draw(mask)
    draw0.text((0, 0), "Forward %.2f" % nums[0], (0, 255, 0), font)
    draw1.text((0, 0), "Left %.2f" % nums[1], (0, 255, 0), font)
    draw2.text((0, 0), "Right %.2f" % nums[2], (0, 255, 0), font)
    draw3.text((0, 0), "Reverse %.2f" % nums[3], (0, 255, 0), font)
    draw.text((0, 0), str(mask_size), (0, 255, 0), font)
    final = Image.new("RGB", (width * 2, 12 * height))
    img = img.resize((640, 240), PIL.Image.ANTIALIAS)
    map0 = map0.resize((640, 240), PIL.Image.ANTIALIAS)
    map1 = map1.resize((640, 240), PIL.Image.ANTIALIAS)
    map2 = map2.resize((640, 240), PIL.Image.ANTIALIAS)
    map3 = map3.resize((640, 240), PIL.Image.ANTIALIAS)
    mask = mask.resize((640, 240), PIL.Image.ANTIALIAS)

    final.paste(img, (0, 0, 640, 240))
    final.paste(map0, (0, 240, 640, 480))
    final.paste(map1, (0, 480, 640, 720))
    final.paste(map2, (0, 720, 640, 960))
    final.paste(map3, (0, 960, 640, 1200))
    final.paste(mask, (0, 1200, 640, 1440))
    images.append(final)
    map2 = map2.resize((640, 240), PIL.Image.ANTIALIAS)
    map3 = map3.resize((640, 240), PIL.Image.ANTIALIAS)
    mask = mask.resize((640, 240), PIL.Image.ANTIALIAS)

    final.paste(img, (0, 0, 640, 240))
    final.paste(map0, (0, 240, 640, 480))
    final.paste(map1, (0, 480, 640, 720))
    final.paste(map2, (0, 720, 640, 960))
    final.paste(map3, (0, 960, 640, 1200))
    final.paste(mask, (0, 1200, 640, 1440))
    images.append(final)


inp = Image.open(sys.argv[1])

model = model(True, (120, 320, 3), tr_model=sys.argv[2])
inp = inp.resize((320, 120))
font = ImageFont.truetype("/System/Library/Fonts/SFNSText.ttf", 16)

aimage = img_to_array(inp)
aimage = aimage.astype(np.float32) / 255
aimage = aimage - 0.5
res = model.predict(np.array([aimage]))[0]

for i in range(1, 120, 4):
    apply_mask(inp, i, 5, res)
    img = images.pop(0)
    img.save("map" + str(i) + ".jpg")
print(datetime.datetime.now() - start)
print(model_time)