Beispiel #1
0
def perform_inference(args):
    '''
    Performs inference on an input image, given a model.
    '''
    # Create a Network for using the Inference Engine
    inference_network = Network()
    # Load the model in the network, and obtain its input shape
    n, c, h, w = inference_network.load_model(args.m, args.d, args.c)

    # Read the input image
    image = cv2.imread(args.i)

    ### TODO: Preprocess the input image
    preprocessed_image = preprocessing(image, h, w)

    # Perform synchronous inference on the image
    inference_network.sync_inference(preprocessed_image)

    # Obtain the output of the inference request
    output = inference_network.extract_output()

    ### TODO: Handle the output of the network, based on args.t
    ### Note: This will require using `handle_output` to get the correct
    ###       function, and then feeding the output to that function.
    processed_output = handle_pose(output, image.shape)

    # Create an output image based on network
    output_image = create_output_image(image, processed_output, args)

    # Save down the resulting image
    print("You look good!")
    cv2.imwrite("outputs/tshirt_try_on.png", output_image)
    def test(self, video=False):
        env = gym.make(env_name)
        if video:
            env = gym.wrappers.Monitor(env=env,
                                       directory=path + "\\videos\\assault",
                                       force=True)
        done = False
        score = 0
        curr_frame = env.reset()

        next_frame = curr_frame

        #  do nothing at the start
        for _ in range(random.randint(1, self.wait_at_start)):
            curr_frame = next_frame
            next_frame, _, _, _ = env.step(1)

        # At start of episode, there is no preceding frame. So just append 3 more times the  initial frames
        state = preprocessing(next_frame, curr_frame)
        history = np.stack((state, state, state, state), axis=2)
        history = np.reshape([history], (1, 84, 84, 4))

        while not done:

            env.render()

            act = self.actor.predict(np.float32(history / 255.))[0]

            actId = np.argmax(act)

            permitted_action = actId + 1

            next_frame, reward, done, info = env.step(permitted_action)
            next_state = preprocessing(next_frame, curr_frame)
            next_state = np.reshape([next_state], (1, 84, 84, 1))
            next_history = np.append(next_state, history[:, :, :, :3], axis=3)

            score += reward
            reward = np.clip(reward, -1., 1.)
            history = next_history
        return score
def telemetry(sid, data):
    # The current steering angle of the car
    steering_angle = data["steering_angle"]
    # The current throttle of the car
    throttle = data["throttle"]
    # The current speed of the car
    speed = data["speed"]
    # The current image from the center camera of the car
    imgString = data["image"]
    image = Image.open(BytesIO(base64.b64decode(imgString)))
    image_array = np.asarray(image)
    image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2HSV)

    image_array, _ = helper.preprocessing(image_array, augmentation=False)
    transformed_image_array = image_array[None, :, :, :]
    # This model currently assumes that the features of the model are just the images. Feel free to change this.
    steering_angle = float(model.predict(transformed_image_array,
                                         batch_size=1))
    # The driving model currently just outputs a constant throttle. Feel free to edit this.
    throttle = 0.4 if float(speed) > 7.0 else 1
    print(steering_angle, throttle)
    send_control(steering_angle, throttle)
                                      top_url)
                        except Exception:
                            print("Error while crawling url")
                            pass
                    else:
                        print("Not allowed by robot " + base_url)
                else:
                    print("Url already crawled")
            except Exception:
                pass
    result_file.close()


if __name__ == '__main__':
    start_time = time.time()
    maritime_keyword_list, stopword_list, aircraft_list = preprocessing()

    seed_urls = [
        'http://en.wikipedia.org/wiki/List_of_maritime_disasters',
        'http://en.wikipedia.org/wiki/Sinking_of_the_MV_Sewol',
        'https://www.nytimes.com/2019/06/10/world/asia/sewol-ferry-accident.html',
        'https://www.bbc.com/news/world-asia-39361944',
        'https://www.history.com/news/5-maritime-disasters-you-might-not-know-about'
    ]

    frontierManager = PQueue()
    score = 4
    for url in seed_urls:
        frontierManager.add_task(url, priority=-score)
        wave[url] = 1
        inlinks[url] = []
Beispiel #5
0
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
             cross_entropy_loss, input_image, correct_label, keep_prob,
             learning_rate):
    """
    Train neural network and print out the loss during training.
    :param sess: TF Session
    :param epochs: Number of epochs
    :param batch_size: Batch size
    :param get_batches_fn: Function to get batches of training data.
           Call using get_batches_fn(batch_size)
    :param train_op: TF Operation to train the neural network
    :param cross_entropy_loss: TF Tensor for the amount of loss
    :param input_image: TF Placeholder for input images
    :param correct_label: TF Placeholder for label images
    :param keep_prob: TF Placeholder for dropout keep probability
    :param learning_rate: This is actual not learning but nn_last_layer
    """

    # TODO: Split training to multiple GPUs. As I don't have multiple GPUs available for this
    # project I didn't implement that.

    # TODO: This project does not take advantage of Tf Queues, TF Dataset, Iterators because of
    # function restrictions and because I didn't want to define VGG from sctrach. We use unefficient
    # placeholders instead.

    # !NB -> because I needed the project tests to pass as well as summaries I just replaced
    # learning_rate with nn_last_layer.
    outputs = tf.cast(tf.expand_dims(tf.argmax(learning_rate, axis=-1), -1),
                      tf.uint8) * 100
    correct_lbl = tf.cast(
        tf.expand_dims(tf.argmax(correct_label, axis=-1), -1), tf.uint8) * 100

    outputs = tf.concat([
        tf.cast(input_image, tf.uint8),
        tf.concat([correct_lbl, correct_lbl, correct_lbl], axis=-1),
        tf.concat([outputs, outputs, outputs], axis=-1)
    ],
                        axis=1)

    loss_summ = tf.summary.scalar("training_loss", cross_entropy_loss)

    im_summ = tf.summary.image('images', outputs, max_outputs=10)

    if not TESTING:
        summary_writer = tf.summary.FileWriter(logdir='./logdir',
                                               graph=sess.graph,
                                               flush_secs=60,
                                               filename_suffix=FLAGS.test_name)

    # Initialize TF variables
    init_d = tf.global_variables_initializer()
    sess.run(init_d)

    logging.info("Going to train on %d epochs", epochs)

    counter = 1
    for epch in range(epochs):
        generator = get_batches_fn(batch_size)
        for i, batch_data in enumerate(generator):
            start_time = time.time()

            images = batch_data[0]  # Get batch images
            # Get batch labels (batch_size, h, w, num_classes) <-- bool
            labels = batch_data[1].astype(np.uint8)

            # Usually we would do augmentation using tensorflow ops but because training for this
            # project is fast, there are no much training samples and project structure is kinda
            # predefined we will sattle with cv2/numpy transformations.

            if len(images.shape) == 4:
                images, labels = helper.preprocessing(images, labels)

            feed_dict = {
                input_image: images,
                correct_label: labels,
                keep_prob: 0.5
            }

            if counter % 30 == 0:
                ops = [train_op, cross_entropy_loss, loss_summ, im_summ]
                _, loss, lsumm, imsumm = sess.run(ops, feed_dict=feed_dict)

                if not TESTING:
                    summary_writer.add_summary(imsumm, counter)
                    summary_writer.add_summary(lsumm, counter)
            else:
                _, loss, lsumm = sess.run(
                    [train_op, cross_entropy_loss, loss_summ],
                    feed_dict=feed_dict)

                if not TESTING:
                    summary_writer.add_summary(lsumm, counter)

            # _, loss = sess.run([train_op, cross_entropy_loss], feed_dict=feed_dict)

            counter += 1
            end_time = time.time() - start_time
            logging.info('[Time: %.3f] Epoch: %d, Batch: %d Loss: %f',
                         end_time, epch + 1, i + 1, loss)

        logging.info('Epoch %d done!', epch + 1)

    if not TESTING:
        summary_writer.close()
    logging.info('End of training!')
Beispiel #6
0
    def run(self):
        global episode_counter

        env = gym.make(self.env_name)

        step = 0

        while episode_counter < self.episodes:
            done = False
            dead = False
            score = 0
            curr_frame = env.reset()
            next_frame = curr_frame

            # on start do nothing
            for _ in range(random.randint(1, 30)):
                curr_frame = next_frame
                next_frame, _, _, info = env.step(1)

            start_life = info['ale.lives']

            #append the same frame 3 more times
            state = preprocessing(next_frame, curr_frame)
            history = np.stack((state, state, state, state), axis=2)
            history = np.reshape([history], (1, 84, 84, 4))

            while not done:
                step += 1
                self.t += 1
                curr_frame = next_frame
                # get action for the current history and go one step in environment
                action, policy = self.get_action(history)

                permitted_action = action + 1

                if dead:
                    action = 0
                    permitted_action = action + 1
                    dead = False

                next_frame, reward, done, info = env.step(permitted_action)

                # append the nrw frame after action
                next_state = preprocessing(next_frame, curr_frame)
                next_state = np.reshape([next_state], (1, 84, 84, 1))
                next_history = np.append(next_state,
                                         history[:, :, :, :3],
                                         axis=3)

                #calculate the max prob
                self.avg_p_max += np.amax(
                    self.actor.predict(np.float32(history / 255.)))

                # if they shoot me I die but the episode continues
                if start_life > info['ale.lives']:
                    dead = True
                    start_life = info['ale.lives']

                #keep the score
                score += reward
                #we don't want big reward values in order to reduce noise
                reward = np.clip(reward, -1., 1.)

                # save a sample (s, a, r, s')
                self.memory(history, action, reward)

                # If the agent dies just reset  the history
                if dead:
                    history = np.stack(
                        (next_state, next_state, next_state, next_state),
                        axis=2)
                    history = np.reshape([history], (1, 84, 84, 4))
                else:
                    history = next_history

                if self.t >= self.t_max or done:
                    self.train_model(done)
                    self.update_localmodel()
                    self.t = 0

                # In the end print the score over episodes
                if done:
                    episode_counter += 1
                    print("episode:", episode_counter, "  score:", score,
                          "  step:", step)

                    stats = [score, self.avg_p_max / float(step), step]
                    for i in range(len(stats)):
                        self.sess.run(self.update_ops[i],
                                      feed_dict={
                                          self.summary_placeholders[i]:
                                          float(stats[i])
                                      })
                    #update log files
                    summary_str = self.sess.run(self.summary_op)
                    self.summary_writer.add_summary(summary_str,
                                                    episode_counter + 1)
                    self.avg_p_max = 0
                    self.avg_loss = 0
                    step = 0