Exemplo n.º 1
0
 def mail(self, name=0, **kwargs):
     name = str(name)
     cfg = Config.smtp
     Logger.debug("sending mail %s%s from %s to %s", name, str(kwargs),
                  cfg['from'], cfg['to'])
     usr = cfg.get('user')
     pswd = cfg.get('password')
     msg = self.buildMsg(name, kwargs)
     dump = kwargs.get("dump")
     if dump:
         with open(dump, "w") as f:
             f.write(msg)
         Logger.info("Message %s saved to %s", name, dump)
         return
     cli = smtplib.SMTP(cfg['host'])
     if cfg.get('ssl') == True:
         cli.starttls()
     if usr and pswd:
         cli.ehlo()
         cli.login(usr, pswd)
     else:
         cli.helo()
     cli.sendmail(cfg['from'], kwargs.get('to') or cfg['to'], msg)
     cli.quit()
     Logger.info("Message %s sent", name)
Exemplo n.º 2
0
class Client(object):
    TOP_K = 3

    def __init__(self, config):

        if not os.path.exists(FLAGS.log_path):
            os.mkdir(FLAGS.log_path)
        self.logger = Logger(config.log_path, 'client')

        self.stub = service_grpc.RecognitionStub(
            grpc.insecure_channel('{}:{}'.format(FLAGS.ip, FLAGS.port)))

    def RecognizeTest(self, filepath):
        self.logger.log('--- Performing recognition ---')
        self.logger.info('--- Performing recognition ---')

        request = service_pb.Request()

        # Extract data from image file
        with open(filepath, 'rb') as file:
            request.image.data = file.read()

        # Define file extension
        request.image.format = os.path.basename(filepath).split('.')[-1]

        start_time = time.time()

        # Perform request
        responses = self.stub.Recognize(iter([request]))

        results = None
        for response in responses:
            if response.status.code == 0:
                print('Result = {} '.format(response.status.text))
                return False

            if len(response.label) == 0:
                return False

            results = sorted(response.label,
                             key=lambda l: -l.probability)[:self.TOP_K]

        end_time = (time.time() - start_time)
        self.logger.log("--- Recognition took %s seconds ---" % end_time)
        self.logger.info("--- Recognition took %s seconds ---" % end_time)

        print('Results:')
        for result in results:
            print("'{}' with probability {}%.".format(
                result.text, math.floor(result.probability * 100)))
        return True
Exemplo n.º 3
0
def serve():
    logger = Logger(FLAGS.log_path, 'server')

    server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))

    service = Server(FLAGS)

    service_grpc.add_RecognitionServicer_to_server(service, server)

    server.add_insecure_port('[::]:{}'.format(FLAGS.port))

    logger.log("--- Server has been started... ---")
    logger.info("--- Server has been started... ---")

    server.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        server.stop(0)
    (options, args) = optParser.parse_args()
    return options


# this is the main entry point of this script
if __name__ == "__main__":
    options = get_options()

    if not options.scenario_folder:
      sys.exit("You must specify the Scenario Folder using the '--scenario' option")

    Logger.set_globals(options.scenario_folder,'desenv',logging.INFO, options.withpreemption)

    logger = Logger('Runner').get()

    logger.info(options)

    #scenario = options.scenario

    # this script has been called from the command line. It will start sumo as a
    # server, then connect and run
    if options.nogui:
        sumoBinary = checkBinary('sumo')
    else:
        sumoBinary = checkBinary('sumo-gui')

    # this is the normal way of using traci. sumo is started as a
    # subprocess and then the python script connects and runs
    #traci.start([sumoBinary, "-c", './' + scenario + '/osm.sumocfg',
    #                         "--tripinfo-output", './' + scenario + '/tripinfo.xml'])
Exemplo n.º 5
0
class Classifier:
    def __init__(self,
                 graph_path,
                 labels_path,
                 input_layer,
                 output_layer,
                 input_height=224,
                 input_width=224,
                 input_mean=0,
                 input_std=255):

        self.logger = Logger()
        """ -------------------------------------- Original session -------------------------------------- """

        start_time = time.time()

        # We load the protobuf file from the disk and parse it to retrieve the unserialized graph_def
        with tf.gfile.GFile(graph_path, "rb") as f:
            self.graph = tf.Graph()
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

        # Then, we import the graph_def into a new Graph and returns it
        with self.graph.as_default():
            # The name var will prefix every op/nodes in your graph
            # Since we load everything in a new graph, this is not needed
            tf.import_graph_def(graph_def)

        # Creating a session one time to reduce the time for serving a lot of images
        self.session = tf.Session(graph=self.graph)

        self.logger.log(
            "--- Deep Neural Network session initialization took %s seconds ---"
            % (time.time() - start_time))
        self.logger.info(
            "--- Deep Neural Network session initialization took %s seconds ---"
            % (time.time() - start_time))
        """ -------------------------------------- Input & output -------------------------------------- """
        # Access input and output nodes
        self.input_operation = self.graph.get_tensor_by_name('import/' +
                                                             input_layer)
        self.output_operation = self.graph.get_tensor_by_name('import/' +
                                                              output_layer)
        """ --------------------------------- Image preprocessing session ---------------------------------- """

        start_time = time.time()

        # Image processing graph
        self.image_graph = tf.Graph()
        with self.image_graph.as_default():
            self.image_path = tf.placeholder(tf.string)
            file_reader = tf.read_file(self.image_path, "file_reader")

            # Define image extension
            ext = tf.string_split([self.image_path], '.').values[1]

            def read_jpg(fr):
                return tf.image.decode_jpeg(fr, channels=3, name="jpeg_reader")

            def read_png(fr):
                return tf.image.decode_png(fr, channels=3, name="png_reader")

            def read_bmp(fr):
                return tf.image.decode_bmp(fr, name="bmp_reader")

            def read_gif(fr):
                return tf.image.decode_gif(fr, name="gif_reader")

            # Load image bytes
            image = tf.case(
                {
                    tf.equal(ext, tf.constant('jpg', dtype=tf.string)):
                    lambda: read_jpg(file_reader),
                    tf.equal(ext, tf.constant('png', dtype=tf.string)):
                    lambda: read_png(file_reader),
                    tf.equal(ext, tf.constant('bmp', dtype=tf.string)):
                    lambda: read_bmp(file_reader),
                    tf.equal(ext, tf.constant('gif', dtype=tf.string)):
                    lambda: read_gif(file_reader)
                },
                default=lambda: read_jpg(file_reader),
                exclusive=True)

            # Convert input image and transform to [0; 1) range
            if image.dtype != tf.float32:
                image = tf.image.convert_image_dtype(image, dtype=tf.float32)

            # Central crop does not work without that
            image.set_shape((None, None, None))

            # Accidentally it increases overall accuracy
            image = tf.image.central_crop(image, central_fraction=0.875)

            # Resize the image to the specified height and width.
            if input_height and input_height:
                image = tf.expand_dims(image, 0)
                image = tf.image.resize_bilinear(image,
                                                 [input_height, input_width],
                                                 align_corners=False)

            # Transform to [-1; 1) range
            self.image_output = tf.multiply(tf.subtract(image, 0.5), 2.0)

            # float_caster = tf.cast(image, tf.float32)
            # dims_expander = tf.expand_dims(float_caster, 0)
            # resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
            # self.image_output = tf.divide(tf.subtract(resized, input_mean), input_std)

        self.image_sess = tf.Session(graph=self.image_graph)

        self.logger.log(
            "--- Image preprocessing session initialization took %s seconds ---"
            % (time.time() - start_time))
        self.logger.info(
            "--- Image preprocessing session initialization took %s seconds ---"
            % (time.time() - start_time))
        """ -------------------------------------- Labels loading -------------------------------------- """
        # Loading captions of labels
        self.labels = []
        self.load_labels(labels_path)

    # Launch image recognition session
    def recognize(self, image_path):

        start_time = time.time()

        data = self.read_tensor_from_image_file(image_path=image_path)

        self.logger.log("--- Image preprocessing took %s seconds ---" %
                        (time.time() - start_time))
        self.logger.info("--- Image preprocessing took %s seconds ---" %
                         (time.time() - start_time))

        start_time = time.time()

        probabilities = self.session.run(
            self.output_operation, feed_dict={self.input_operation: data})

        self.logger.log("--- DNN inference took %s seconds ---" %
                        (time.time() - start_time))
        self.logger.info("--- DNN inference took %s seconds ---" %
                         (time.time() - start_time))

        probabilities = np.squeeze(probabilities)

        return [(self.labels[i], prob) for i, prob in enumerate(probabilities)]

    # Launch image preprocessing session
    def read_tensor_from_image_file(self, image_path):
        return self.image_sess.run(self.image_output,
                                   feed_dict={self.image_path: image_path})

    # Load labels from file
    def load_labels(self, label_path):
        for l in open(label_path, 'r').readlines():
            self.labels.append(l.split(':')[1][:-1])
        return self.labels
Exemplo n.º 6
0
    instance_folder = os.path.join(options.scenario_folder, 'results')
    instance_folder = os.path.join(instance_folder, options.prefix)

    if not os.path.exists(instance_folder):
        os.makedirs(instance_folder, exist_ok=True)

    instance_opts = 'tripinfo_' + algorithm.instance_name()

    logfile = os.path.join(instance_folder, '{}.log'.format(instance_opts))

    Logger.set_globals(logfile, logging.INFO)

    logger = Logger('Runner').get()

    logger.info(options)

    trip_file = instance_folder + '/' + instance_opts + '.xml'

    statistics_values = StatisticsValues(options.scenario_folder,
                                         instance_folder, trip_file,
                                         options.algorithm, options.ev)

    if not statistics_values.skip_because_json_file(
            options.override, options.skip, algorithm.instance_name()):
        traci.start([
            sumoBinary, '-v', 'true', '-c',
            options.scenario_folder + '/osm-' + options.prefix + '.sumocfg',
            '--duration-log.statistics', '--tripinfo-output', trip_file,
            '--start', '--time-to-teleport', '300', '--seed',
            str(options.seedsumo), '--ignore-junction-blocker', '50'
Exemplo n.º 7
0
FLAGS = parse_args()

if __name__ == '__main__':

    if not os.path.exists(FLAGS.graph_path):
        raise FileNotFoundError('Graph file does not exist')

    if not os.path.exists(FLAGS.labels_path):
        raise FileNotFoundError('Labels file does not exist')

    if not os.path.exists(FLAGS.log_path):
        os.mkdir(FLAGS.log_path)
    logger = Logger(FLAGS.log_path, 'inference')

    start_time = time.time()
    classifier = Classifier(FLAGS.graph_path, FLAGS.labels_path,
                            FLAGS.input_layer, FLAGS.output_layer,
                            FLAGS.input_height, FLAGS.input_width,
                            FLAGS.input_mean, FLAGS.input_std)

    logger.log("--- Initialization took %s seconds ---" %
               (time.time() - start_time))
    logger.info("--- Initialization took %s seconds ---" %
                (time.time() - start_time))

    start_time = time.time()
    result = classifier.recognize(FLAGS.image_path)
    logger.log("--- %s seconds ---" % (time.time() - start_time))
    logger.info("--- %s seconds ---" % (time.time() - start_time))
Exemplo n.º 8
0
class Server(service_grpc.RecognitionServicer):
    def __init__(self, config):
        super(Server, self).__init__()

        self.logger = Logger(config.log_path, 'server')

        self.logger.log("--- Initialization started ---")
        self.logger.info("--- Initialization started ---")

        start_time = time.time()

        self.classifier = Classifier(config.graph_path, config.labels_path,
                                     config.input_layer, config.output_layer,
                                     config.input_height, config.input_width,
                                     config.input_mean, config.input_std)

        self.logger.log("--- Total initialization took %s seconds ---" %
                        (time.time() - start_time))
        self.logger.info("--- Total initialization took %s seconds ---" %
                         (time.time() - start_time))

    def Recognize(self, request_iterator, context):
        try:
            # Extract image data from the message
            request = service_pb.Request()
            request.image.data = b''

            for r in request_iterator:
                request.image.format = r.image.format

                if r.image.data:
                    request.image.data += r.image.data

            self.logger.log("--- Message has been successfully received ---")
            self.logger.info("--- Message has been successfully received ---")

            if request.image.format not in [
                    'jpg', 'jpeg', 'png', 'bmp', 'gif'
            ]:
                response = service_pb.Response()
                response.status.code = 0
                response.status.text = 'Not supported image format'

                self.logger.log("--- Not supported image format received ---")
                self.logger.info("--- Not supported image format received ---")

                return iter([response])

            filename = str(time.strftime("%Y-%m-%d %H:%M:%S"))
            filepath = os.path.join(DIR, 'images',
                                    filename + '.' + request.image.format)

            with open(filepath, 'wb') as file:
                file.write(request.image.data)
                file.close()

            self.logger.log("--- Recognition started ---")
            self.logger.info("--- Recognition started ---")

            start_time = time.time()

            output = self.classifier.recognize(filepath)

            self.logger.log("--- Recognition took %s seconds ---" %
                            (time.time() - start_time))
            self.logger.info("--- Recognition took %s seconds ---" %
                             (time.time() - start_time))

            # Create and send the response
            response = service_pb.Response()
            response.status.code = 1
            response.status.text = 'Success'

            for l, p in output:
                label = response.label.add()
                label.code = 1
                label.text = l
                label.probability = p

            self.logger.log("--- Recognition response sent ---")
            self.logger.info("--- Recognition response sent ---")

            os.remove(filepath)

            return iter([response])
        except Exception as e:
            self.logger.log("--- Error: %s ---" % str(e))
            self.logger.info("--- Error: %s ---" % str(e))
            response = service_pb.Response()
            response.status.code = 0
            response.status.text = 'Error: %s' % str(e)
            return iter([response])