def conv_neural_network_model(name="conv_neural_network_model",
                              imconstpar=constantParametersImage(),
                              netconstpar=constantParametersNetwork()):

    convnet = input_data(
        shape=[None, imconstpar.height, imconstpar.width, imconstpar.channels],
        name='image_input')

    convnet = conv_2d(convnet, 32, 8, strides=4, activation='relu')
    #convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 4, strides=2, activation='relu')
    #convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 3, strides=1, activation='relu')
    #convnet = max_pool_2d(convnet, 2)

    convnet = fully_connected(convnet, 512, activation='relu')
    convnet = dropout(convnet, .8)

    convnet = fully_connected(convnet, 2, activation='softmax')

    convnet = regression(convnet,
                         optimizer='adam',
                         loss='mean_square',
                         name='targets')

    model = tflearn.DNN(convnet)

    return model
def conv_reflectance_neural_network_model5(
    n_actions=3,
    name="conv_ref4",
    shape=None,
    imconstpar=constantParametersImage(),
    netconstpar=constantParametersNetwork()):

    convnet = input_data(
        shape=[None, imconstpar.height, imconstpar.width, imconstpar.channels],
        name='image_input')
    reflectance = tflearn.input_data(shape=[None, 6], name="reflectance_input")

    convnet = conv_2d(convnet,
                      32,
                      8,
                      strides=4,
                      activation='prelu',
                      name='conv1')
    #convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet,
                      64,
                      4,
                      strides=2,
                      activation='prelu',
                      name='conv2')
    #convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet,
                      64,
                      3,
                      strides=1,
                      activation='prelu',
                      name='conv3')
    #convnet = max_pool_2d(convnet, 2)

    convnet = fully_connected(convnet, 128, activation='softmax', name='fc1')
    convnet = dropout(convnet, .8)

    convnet = fully_connected(convnet, 3, activation='sigmoid', name='fc2')

    merged_net = merge([convnet, reflectance], mode='concat', axis=1)
    #convnet = regression(convnet, optimizer='SGD', loss='categorical_crossentropy', name='targets')

    merged_net = fully_connected(merged_net,
                                 12,
                                 activation='softmax',
                                 name='m_fc1')
    merged_net = fully_connected(merged_net,
                                 n_actions,
                                 activation='linear',
                                 name='m_fc2')
    merged_net = regression(merged_net,
                            optimizer='adam',
                            learning_rate=netconstpar.l_rate,
                            loss='mean_square',
                            name='targets')
    model = tflearn.DNN(merged_net)

    return model, name
Exemple #3
0
def main():
    imgPars   = constantParametersImage()
    camera    = Camera(width=imgPars.width, height=imgPars.height)
    camera.sensor_get_value()
    find_object = FindObject( , [camera])	
    #motors    = Motors()
    #infrared  = ReflectanceSensors()
    #infrared.calibrate()
    #infrared.update()
    sleep(5)
    print("setup")
        
    # To exit the program properly on keyboardinterrupt
    def signal_handler(signal, frame):
        #motors.stop()
        GPIO.cleanup()
        sys.exit(0)
       
    signal.signal(signal.SIGINT, signal_handler)
    
    
    ##motors.forward(dur=1)
    iteration = 0
    # while(not isGoal(infrared) and iteration < 5):
	
        # motors.forward(dur=1)
        # infrared.update()
        # sleep(1)
        # iteration += 1
    #motors.stop()
    GPIO.cleanup()
    def build_neural_network(self):
        param = constantParametersImage()
        n_classes = 2

        b_size = 128
        l_rate = 0.001
        convnet = input_data(
            shape=[None, param.height, param.width, param.channels],
            name='input')

        convnet = conv_2d(convnet, 32, 8, strides=4, activation='relu')
        #convnet = max_pool_2d(convnet, 2)

        convnet = conv_2d(convnet, 64, 4, strides=2, activation='relu')
        #convnet = max_pool_2d(convnet, 2)

        convnet = conv_2d(convnet, 64, 3, strides=1, activation='relu')
        #convnet = max_pool_2d(convnet, 2)

        convnet = fully_connected(convnet, 512, activation='relu')
        convnet = dropout(convnet, .8)

        convnet = fully_connected(convnet, n_classes, activation='softmax')

        convnet = regression(convnet,
                             optimizer='adam',
                             batch_size=b_size,
                             learning_rate=l_rate,
                             loss='categorical_crossentropy',
                             name='targets')

        model = tflearn.DNN(convnet)
        model.load('tflearncnnmug.model')

        return model
def __main__():
    print("start")
    discount_factor = 0.8
    n_actions = 3
    constant = cpi.constantParametersImage()
    constantNet = cpi.constantParametersNetwork()

    print("setting up model")

    q_net, name = neuralnets.ref2(n_actions=n_actions)

    print("creating trainer")

    trainer = Trainer(q_net, constant, constantNet, n_actions=n_actions)

    modelh = ModelHandler()
    exph = ExperienceHandler()

    if (len(sys.argv) > 1):
        print("loading model")
        modelh.load(sys.argv[1], q_net)

    q_dash = q_net

    training = True

    experiences = exph.load_experiences()
    experi = np.array(experiences).shape
    print(experi)
    sleep(5)
    q_net = trainer.simulate_training(experiences, q_net)

    overwrite = False
    name += ".model"
    if (len(sys.argv) > 2):
        if (sys.argv[2] == 'o'):
            overwrite = True
        if (sys.argv[1] != name):
            print("input name does not match net name")
            name = sys.argv[1]

    modelh.save(name, q_net, overwrite=overwrite)
def __main__():
    #initialize all components

    def signal_handler(signal, frame):
        print("free resources")
        motors.stop()
        camera.close()
        GPIO.cleanup()
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)

    discount_factor = 0.9
    constant = cpi.constantParametersImage()
    constNet = cpi.constantParametersNetwork()
    motors = Motors()
    camera = None
    reflectance_sensors = None
    if constNet.has_camera:
        camera = Camera(constant.width, constant.height, save=True)
    print("completed stting up cam")
    print(camera)
    if constNet.has_ref:
        reflectance_sensors = ReflectanceSensors(motob=motors,
                                                 auto_calibrate=True)

    reward_function = LineFollowerRewardFunction(RewardFunction,
                                                 reflectance_sensors)
    action_executor = RobotActionExecutor(motors)
    q_net, name = neuralnets.cam1(n_actions=action_executor.n_actions)

    trainer = Trainer(q_net,
                      constant,
                      constNet,
                      n_actions=action_executor.n_actions)
    modelh = ModelHandler()

    # check if we want to load some previous model or start
    # with a fresh one
    if (len(sys.argv) > 1):
        modelh.load(sys.argv[1], q_net)

    q_dash = q_net

    #train_y = train_y.reshape([-1, 2])
    episodes = 20
    max_step = 1000
    sec_cd = 5
    experience = []
    current_cam_state = False
    updated_cam_state = False
    training = True
    for i in range(episodes):  # epiode for loop
        motors.stop()
        print("get ready for the next episode(" + str(i) + ")...")
        print(sec_cd)
        sleep(sec_cd)
        is_final_state = False
        step = 0

        reflectance_sensors.update()
        updated_ref_state = reflectance_sensors.get_value()
        print(updated_ref_state)
        camera.update()
        updated_cam_state = camera.get_value()

        while step < max_step and not (is_final_state):  #main while loop
            # Store current state
            action = -1
            print("step " + str(step))
            print(updated_ref_state)
            current_ref_state = updated_ref_state
            current_cam_state = updated_cam_state

            # Chose action randomly or pick best action
            was_random = True
            if (rdm.random() < (.1 + (1 / (1 + step * i))) and training):

                action = rdm.randint(0, action_executor.n_actions - 1)
            else:
                q_values = q_net.predict({
                    #'reflectance_input': current_ref_state.reshape([-1, 6])
                    #,
                    'image_input':
                    current_cam_state.reshape([
                        -1, constant.height, constant.width, constant.channels
                    ])
                })
                print("q_values for actions are:")
                print(q_values)
                was_random = False
                action = np.argmax(q_values)
            #end random if-else

            # get reward for current state

            # Do selected action (update state)
            action_executor.do_action(action)
            sleep(0.1)
            motors.stop()
            step += 1

            reflectance_sensors.update()
            camera.update()
            updated_ref_state = reflectance_sensors.get_value()
            updated_cam_state = camera.get_value()
            reward, is_final_state = reward_function.calculate_reward(
                updated_ref_state, action, was_random)
            # Store transition of (current_state, action, reward, updated_state)
            # Change to append if you want to store all experiences
            if (training):
                q_net = trainer.train(q_net, [
                    action, reward, current_ref_state, updated_ref_state,
                    is_final_state, current_cam_state, updated_cam_state
                ],
                                      step,
                                      i,
                                      discount_factor,
                                      motors=motors)

        #end main while
    #end episode for loop
    if (training):
        print("save from main")
        trainer.save_experiences_to_file()

    motors.stop()
    overwrite = False
    if (len(sys.argv) > 2):
        if (sys.argv[2] == 'o'):
            overwrite = True
    print("saving as")
    print(name)
    modelh.save(name + ".model", q_net, overwrite=overwrite)

    print("free resources")
    motors.stop()
    if (constNet.has_camera):
        camera.close()
    GPIO.cleanup()
    sys.exit(0)

    print("done")
import tflearn
import tensorflow as tf
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from create_image_data_set import create_feature_sets_and_labels
import numpy as np
import sys
import os
from constparimg import constantParametersImage, constantParametersNetwork
# with open( "image_dataset.pickle", "rb" ) as f: # b for binary
# train_x, train_y, test_x, test_y = pickle.load(f)

tf.logging.set_verbosity(tf.logging.ERROR)

constantsImg = constantParametersImage()
constantsNet = constantParametersNetwork()

train_x, train_y, test_x, test_y = create_feature_sets_and_labels(
    'posBat', 'negBat')

print(len(train_y[0]))
n_classes = len(train_y[0])
n_epochs = constantsNet.n_epochs
b_size = constantsNet.batch_size
l_rate = constantsNet.learning_rate

shape = train_x[0].shape
print("shape")
print(shape)