示例#1
0
    def run(self):
        t0 = time.time()
        while True:
            time.sleep(Constants.LOOP_DELAY)
            t1 = time.time()
            try:

                if (t1 - t0) >= 10:
                    t0 = t1
                    # self.logg.log("self test")
                    if not self.mqtt_client.connected:
                        self.logg.log("disconnect detected, reconnect")
                        try:
                            self.mqtt_client.connect()
                        except:
                            self.logg.log(
                                Utils.format_exception(
                                    self.__class__.__name__))
                    self.mqtt_client.ping("self test")

                if not self.sensor_data_q.empty():
                    recv: MQTTMessage = self.sensor_data_q.get(block=False)
                    self.update_sensor_data(recv.id, recv)
                    if Constants.conf["ENV"]["LOG_SENSOR_DATA"]:
                        self.logg.log(recv.topic + " " + str(recv.id) + " " +
                                      str(recv.data))

            except:
                self.logg.log(Utils.format_exception(self.__class__.__name__))
示例#2
0
    def do_action(self, conf):
        if self.action == "create-project":
            return Project(conf, self.projectname).create_project()
        elif self.action == "delete-project":
            return Project(conf, self.projectname).delete_project()
        elif self.action == "list":
            conf.list_config()
        elif self.action == "create-sqs":
            return Queue(conf, self.sqsname).create_queue()
        elif self.action == "delete-sqs":
            return Queue(conf, self.sqsname).delete_queue()
        elif self.action == "deploy-project":
            return Project(conf,
                           self.projectname).deploy_project(self.rolename)
        elif self.action == "import-project":
            return Project(conf, self.projectname).import_project()
        elif self.action == "undeploy-project":
            return Project(conf, self.projectname).undeploy_project()
        elif self.action == "deploy-lambda-proxy":
            return Ltklambdaproxy(conf, self.lambdaname).deploy_lambda_proxy(
                self.rolename, self.sqsname)
        elif self.action == "undeploy-lambda-proxy":
            return Ltklambdaproxy(conf,
                                  self.lambdaname).undeploy_lambda_proxy()
        elif self.action == "receiver":
            try:
                Receiver(conf, self.sqsname, self.projectname).receiver()
            except KeyboardInterrupt:
                self.log.info("Stopping the receiver.")
        elif self.action == "tail":
            try:
                Tail(conf, self.lambdaname).tail_log()
            except KeyboardInterrupt:
                self.log.info("Stopping the tail.")
        elif self.action == "set-default-role":
            return Role(conf, self.rolename).set_default_role()
        elif self.action == "unset-default-role":
            return Role(conf, "bypassvalidator").unset_default_role()
        elif self.action == "create-star":
            Utils.define_lambda_role(conf, self.rolename)
            queue_name = Utils.append_fifo_in_queue(self.projectname +
                                                    "_queue")
            conf = Project(conf, self.projectname).create_project()
            conf = Queue(conf, queue_name).create_queue()
            conf = Ltklambdaproxy(conf, self.projectname +
                                  "_proxy").deploy_lambda_proxy(
                                      self.rolename, queue_name)
            return Project(conf,
                           self.projectname).deploy_project(self.rolename)
        elif self.action == "delete-all-configuration":
            conf.delete_all_config()
        else:
            Help.print_help("Invalid command")

        return conf
示例#3
0
    def run(self):
        n_max_log = 50
        dt_log = 10
        n_max_lines_file = 100000

        msg = "[Logg] " + "running"
        print(msg)

        first = True
        cnt = 0
        cnt_file = 0

        t1_log = time.time()
        buf = []

        while True:
            time.sleep(0.01)
            t1 = time.time()

            if not self.q.empty():
                dtime = datetime.datetime.now()
                crt_time = dtime.strftime("%H:%M:%S.%f")
                p = crt_time + ': ' + str(self.q.get(block=False))
                print(p)

                p = str(dtime.date()) + ' ' + p

                buf.append(p)
                cnt += 1
                cnt_file += 1

                if (cnt >= n_max_log) or (((t1 - t1_log) >= dt_log) and
                                          (cnt > 0)):
                    open_style = "a"
                    # split into multiple files if log becomes too long
                    if cnt_file >= n_max_lines_file:
                        cnt_file = 0
                        first = True

                    if first:
                        open_style = "w"
                        first = False
                    try:
                        with open(self.folder + "/" + self.filename,
                                  open_style) as myfile:
                            for e in buf:
                                myfile.write(e + '\r\n')
                            buf = []
                            cnt = 0
                    except:
                        Utils.print_exception(self.__class__.__name__)
 def reset_votes(self, bot, job):
     """Reset votes to zero"""
     # Reset votes
     self.votes = []
     # Send Arieeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeel
     bot.send_message(chat_id='-1001170387616',
                      text=('Ari' + ('e' * random.randint(8, 25) + 'l')))
     # Remove actual job from job queue
     job.schedule_removal()
     # Add new callback for tomorrow with a random hour
     random_time = Utils().random_time()
     tomorrow_day = Utils().tomorrow_day()
     job.job_queue.run_daily(self.reset_votes, random_time,
                             (tomorrow_day, ))
示例#5
0
    def run(self):
        t0 = time.time()
        while True:
            time.sleep(Constants.LOOP_DELAY)
            t1 = time.time()
            try:
                if (t1 - t0) >= self.default_log_rate or self.logstart:
                    self.logstart = False
                    t0 = t1
                    self.logg.log("Requesting ext api")
                    self.request_data()
                    # self.log_sensor_data()

            except:
                Utils.print_exception(self.__class__.__name__)
示例#6
0
    def connect(self):
        self.logg.log("connecting to db")
        try:
            dbconf = Constants.conf["ENV"]["DB"]
            host = dbconf["HOST"]
            user = dbconf["USER"]
            password = dbconf["PASS"]
            dbname = dbconf["NAME"]

            # db type e.g. mysql, postgresql
            dbtype = dbconf["TYPE"]

            if dbtype == "MYSQL":
                self.connection = pymysql.connect(
                    host=host,
                    user=user,
                    password=password,
                    database=dbname,
                    cursorclass=pymysql.cursors.DictCursor
                )
                # self.cursor = self.connection.cursor(pymysql.cursors.DictCursor)
                self.cursor = self.connection.cursor()
            else:
                pass

            self.connected = True
            self.logg.log("connected to db")
        except:
            self.logg.log(Utils.format_exception(self.__class__.__name__))
示例#7
0
    def create_sensor(self, sensor):
        sdata: MQTTMessage = sensor.current_data
        # self.logg.log(sdata.__dict__)
        if not sdata:
            return None

        self.logg.log("create sensor for topic: " + sensor.topic_name +
                      " (code " + str(sensor.topic_code) + ")")
        self.cursor.execute(
            'select * from topic where name=%s', (sensor.topic_name,))
        topic = self.cursor.fetchone()
        self.logg.log(
            "topic[" + str(sensor.topic_name) + "]: " + str(topic))
        # self.logg.log(topic["id"])
        sensor.id = Utils.get_sensor_id_encoding(
            sensor.raw_id, topic["code"])
        sql = "INSERT INTO sensor (sensor_id, log_rate, topic_code, timestamp) VALUES (%s, %s, %s, %s)"
        sensor.log_rate = topic["log_rate"]
        sensor.topic_code = topic["code"]

        ts = datetime.now()

        self.logg.log("sensor: " + str(sensor.__dict__))
        params = (sensor.id, sensor.log_rate, topic["code"], ts)
        self.logg.log(sql + str(params))
        self.cursor.execute(sql, params)
        # commit the changes to the database
        self.connection.commit()
        # close communication with the database
        # self.cursor.close()
        return sensor
示例#8
0
    def load_sensors(self):
        self.logg.log("load sensors")
        try:
            self.db = Database.instance()
            sensors = self.db.get_sensors()
            self.logg.log(sensors)
            t_create = time.time()
            if sensors is not None:
                for s in sensors:
                    s1: Sensor = Sensor()
                    s1.id = s["sensor_id"]
                    s1.log_rate = s["log_rate"]
                    s1.topic_name = s["topic_name"]
                    s1.topic_code = s["topic_code"]
                    s1.type = s["sensor_type_code"]
                    s1.ts = t_create
                    s1.log_ts = t_create
                    # self.logg.log(json.dumps(s1.__dict__))
                    self.sensors.append(s1)

            topics = self.db.get_topics()

            if topics is not None:
                for t in topics:
                    t1: MQTTTopic = MQTTTopic(t)
                    self.topics.append(t1)

            self.logg.log(self.topics)
            self.logg.log(self.sensors)
        except:
            self.logg.log(Utils.format_exception(self.__class__.__name__))
示例#9
0
 def on_disconnect(client, userdata, rc):
     self.logg.log("client: " + str(client) + " disconnected")
     self.connected = False
     try:
         if self.client:
             self.client.loop_stop()
     except:
         self.logg.log(Utils.format_exception(self.__class__.__name__))
示例#10
0
def run1():
    """
    Part 1
    """
    filepath = 'data/day03/input.plain'
    lines = Utils.load_items(filepath)
    total = how_many_overlaps(lines)
    print(total)
示例#11
0
 def wrap(self, *args, **kwargs):
     # print("inside wrap")
     self.check_connect()
     try:
         return func(self, *args, **kwargs)
     except pymysql.Error as e:
         self.logg.log(Utils.format_exception(self.__class__.__name__))
         if 'MySQL server has gone away' in str(e):
             # reconnect MySQL
             self.logg.log("attempt reconnect")
             self.connect()
         else:
             # No need to retry for other reasons
             pass
             return None
     except Exception:
         self.logg.log(Utils.format_exception(self.__class__.__name__))
         return None
示例#12
0
def run():
    """
    Part 2
    """
    filepath = 'data/input.plain'
    lines = Utils.load_items(filepath)
    # non_overlapped_coords = get_non_overlapped_coords(lines)
    non_overlapped_claim = get_non_overlapped_claim(lines)
    print(non_overlapped_claim.id)
示例#13
0
def get_sensors():
    try:
        data = db.get_sensors()
        return json.dumps(data)
    except:
        logg.log(Utils.format_exception(""))
        return json.dumps({
            "status": False
        })
示例#14
0
def run3():
    filepath = 'data/day02/input.plain'
    lines = Utils.load_items(filepath)
    freqs = []
    for line in lines:
        freq = freq_process(line)
        freqs.append(freq)
    result = check_process(freqs)
    print(result)
示例#15
0
def get_sensor_data_csv():
    try:
        id = request.args.get('id')
        chan = request.args.get('chan')
        limit = request.args.get('limit')
        file = request.args.get('file')
        data = db.get_sensor_data(id, chan, limit)
        # create a dynamic csv or file here using `StringIO`
        # (instead of writing to the file system)

        Utils.log(data)
        strIO = io.BytesIO()

        strdata = db.extract_csv_multichan(data)
        strIO.write(strdata.encode("utf-8"))

        # strIO.write(data)
        strIO.seek(0)

        if file:
            return send_file(strIO,
                             mimetype='text/csv',
                             attachment_filename='downloadFile.csv',
                             as_attachment=True)
        else:
            # assume bytes_io is a `BytesIO` object
            byte_str = strIO.read()

            # Convert to a "unicode" object
            # Or use the encoding you expect
            text_obj = byte_str.decode('UTF-8')

            # return json.dumps({
            #     "status": True,
            #     "data": text_obj
            # })
            return text_obj
    except:
        logg.log(Utils.format_exception(""))
        return json.dumps({
            "status": False
        })
示例#16
0
def run():
    filepath = 'data/input.plain'
    ids = Utils.load_items(filepath)
    for id1, id2 in product(ids, ids):
        if id1 == id2:
            continue
        elif checkrep_process(id1, id2):
            print(f'id1: {id1}, id2: {id2}')
            cc = commonchars_process(id1, id2)
            print(cc)
            break
示例#17
0
def get_sensor_data():
    try:
        id = request.args.get('id')
        chan = request.args.get('chan')
        limit = request.args.get('limit')
        data = db.get_sensor_data(id, chan, limit)
        return json.dumps(data, indent=4, sort_keys=True, default=str)
    except:
        logg.log(Utils.format_exception(""))
        return json.dumps({
            "status": False
        })
示例#18
0
        def on_message(client, userdata, message):
            try:
                # self.logg.log("message received, topic: ", message.topic, ", message: ", str(message.payload.decode("utf-8")))
                # self.logg.log("client: ", client)
                # self.logg.log("message topic =", message.topic)
                # self.logg.log("message qos =", message.qos)
                # self.logg.log("message retain flag =", message.retain)

                raw_data = str(message.payload.decode("utf-8"))
                msg = MQTTMessage()

                topic_elems = message.topic.split("/")
                n_topic_elems = len(topic_elems)

                raw_data_split = raw_data.split(",")

                # msg.topic = "/".join(topic_elems[0:n_topic_elems-2])
                msg.topic = "/".join(topic_elems)

                # self.logg.log(msg.topic)

                # the last-1 item is the sensor id
                # the last item is the input/output selector (cmd, sns)

                # msg.id = int(topic_elems[n_topic_elems-2])

                # extract sensor id, remove from data array for further processing
                msg.id = int(raw_data_split[0])

                # if len(raw_data_split) == 1:
                #     msg.data = None
                # elif len(raw_data_split) == 2:
                #     msg.data = raw_data_split[1]
                # else:
                #     msg.data = ",".join(raw_data_split[1:])

                msg.data = raw_data_split[1:]

                msg.ts = datetime.now()
                # fixed type at the moment
                msg.type = 1

                # TODO: use a different topic for each sensor type, e.g. wsn/indoor, wsn/outdoor
                # TODO: only log the known sensors in the db and filter them by the topic ID

                if not self.sensor_data_q.full():
                    self.sensor_data_q.put(msg)
            except:
                self.logg.log(Utils.format_exception(self.__class__.__name__))
示例#19
0
    def add(self, keypoints, descriptions, distance_threshold=5.0):
        points = Utils.kp2np(keypoints)
        if len(self.keypoints) == 0:
            self.keypoints = keypoints
            self.points = points
            self.descriptions = descriptions
            return self

        _matches = matcher.radiusMatch(points, self.points, maxDistance=distance_threshold)
        status = np.array([1 if len(match) == 0 else 0 for match in _matches])

        self.keypoints += [kp for kp, s in zip(keypoints, status) if s>0]
        self.points = np.concatenate( [self.points, points[status>0]] )
        self.descriptions = np.concatenate( [self.descriptions, descriptions[status>0]] )
        return self
示例#20
0
def get_sensor_data_plot():
    try:
        id = request.args.get('id')
        chan = request.args.get('chan')
        limit = request.args.get('limit')
        data = db.get_sensor_data(id, chan, limit)
        # Utils.log(data)

        if not chan:
            timeseries = []
            for i in range(len(data)):
                timeseries1 = Timeseries()
                timeseries1.x = []
                timeseries1.y = []

                for (i, row) in enumerate(data):
                    timeseries1.x.append(row["timestamp"])
                    timeseries1.y.append(row["value"])

                timeseries.append(timeseries1)

            strIO = graph.plot_timeseries_multi(
                timeseries, "sensor " + id, "time", "value")
        else:
            timeseries = Timeseries()
            timeseries.x = []
            timeseries.y = []
            for (i, row) in enumerate(data):
                timeseries.x.append(row["timestamp"])
                timeseries.y.append(row["value"])
            strIO = graph.plot_timeseries(
                timeseries, "sensor " + id + " chan " + chan, "time", "value")

        return strIO
        # Utils.log(strIO)
        # # attachment_filename = 'plot.png',
        # # as_attachment = True
        # return send_file(strIO,
        #                  mimetype='image/jpg',
        #                  attachment_filename='logo.png',
        #                  )
    except:
        logg.log(Utils.format_exception(""))
        return json.dumps({
            "status": False
        })
示例#21
0
 def get_args(self, args):
     try:
         opts, args = getopt.getopt(
             args, "p:q:l:r:",
             ["projectname=", "sqsname=", "lambdaname=", "rolename="])
     except getopt.GetoptError:
         Help.print_help("Getopterror")
         exit(1)
     for opt, arg in opts:
         if opt in ("-p", "--projectname"):
             self.projectname = arg
         elif opt in ("-q", "--sqsname"):
             self.sqsname = Utils.append_fifo_in_queue(arg)
         elif opt in ("-r", "--rolename"):
             self.rolename = arg
         elif opt in ("-l", "--lambdaname"):
             self.lambdaname = arg
示例#22
0
def run1():
    """
    minutes, guard #10
    """
    filepath = 'data/input-d-10.plain'
    lines = Utils.load_items(filepath)

    entries = []

    for line in lines:
        tokens = re.split(',', line)
        entry = {
            'date': tokens[0],
            'time': tokens[1],
            'ocurrence': tokens[2],
        }
        entries.append(entry)

    freqs = Reporter().get_freqs_by_date(entries)

    print(freqs)
示例#23
0
    def get_subcategories(self, category):

        logger = logging.getLogger('infoLogger')
        log_msg = '[{id}] {name}'.format(**category)
        logger.info(log_msg)

        url = self.base_path + '/' + category['url']
        content = Utils().get_url_content(url)

        soup = self.__get_soup(content)

        pattern = 'body > div[id=main] div.fill_menu_filters div.child-menu-container div.container ul'
        subcategories_html_block = soup.select(pattern)[0]

        items = subcategories_html_block.select('li a')

        categories = self.__get_subcategories_from_links(items, category['id'])

        log_msg = 'Done [{id}] {name}'.format(**category)
        logger.info(log_msg)

        return categories
示例#24
0
def run2():
    """
    minutes, guard #10
    """
    filepath = 'data/input-m-99.plain'
    lines = Utils.load_items(filepath)

    entries = []

    for line in lines:
        tokens = re.split(',', line)
        entry = {
            'time': tokens[0],
            'ocurrence': tokens[1],
        }
        entries.append(entry)

    print(entries)

    freqs = Reporter.get_m_freqs(entries)

    print(freqs)
    print(len(freqs))
示例#25
0
    def create_message_model(self, data):
        msg = MQTTMessage()
        msg.data = data
        msg.ts = datetime.now()
        return msg

    def run(self):
        t0 = time.time()
        while True:
            time.sleep(Constants.LOOP_DELAY)
            t1 = time.time()
            try:
                if (t1 - t0) >= self.default_log_rate or self.logstart:
                    self.logstart = False
                    t0 = t1
                    self.logg.log("Requesting ext api")
                    self.request_data()
                    # self.log_sensor_data()

            except:
                Utils.print_exception(self.__class__.__name__)


if __name__ == '__main__':
    Constants.load()
    Utils.log("config loaded")
    db = Database.instance()
    test = ExtApi.instance()
    print("requesting data")
    test.connect()
    test.request_data()
示例#26
0
def train():
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    with tf.Session() as sess:
        # Input placeholders
        with tf.name_scope('input'):
            x = tf.placeholder(tf.float32, [FLAGS.batch_size, 784],
                               name='x-input')
            y_ = tf.placeholder(tf.float32, [FLAGS.batch_size, 10],
                                name='y-input')
            keep_prob = tf.placeholder(tf.float32)

        # Model definition along with training and relevances
        with tf.variable_scope('model'):
            net = nn()
            y = net.forward(x)

        with tf.variable_scope('relevance'):
            if FLAGS.relevance:
                LRP = net.lrp(y, FLAGS.relevance_method, 1e-8)

                # LRP layerwise
                relevance_layerwise = []
                # R = y
                # for layer in net.modules[::-1]:
                #     R = net.lrp_layerwise(layer, R, 'simple')
                #     relevance_layerwise.append(R)
            else:
                LRP = []
                relevance_layerwise = []
        # Accuracy computation
        with tf.name_scope('correct_prediction'):
            correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

        # Merge all the summaries and write them out
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
                                             sess.graph)
        test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test')

        tf.global_variables_initializer().run()

        utils = Utils(sess, FLAGS.checkpoint_dir)
        if FLAGS.reload_model:
            utils.reload_model()

        trainer = net.fit(output=y,
                          ground_truth=y_,
                          loss='softmax_crossentropy',
                          optimizer='adam',
                          opt_params=[FLAGS.learning_rate])

        uninit_vars = set(tf.global_variables()) - set(
            tf.trainable_variables())
        tf.variables_initializer(uninit_vars).run()

        # iterate over train and test data
        for i in range(FLAGS.max_steps):
            if i % FLAGS.test_every == 0:
                #pdb.set_trace()
                d = feed_dict(mnist, False)
                test_inp = {x: d[0], y_: d[1], keep_prob: d[2]}
                summary, acc, relevance_test, op, rel_layer = sess.run(
                    [merged, accuracy, LRP, y, relevance_layerwise],
                    feed_dict=test_inp)
                test_writer.add_summary(summary, i)
                print('Accuracy at step %s: %f' % (i, acc))

            else:
                d = feed_dict(mnist, True)
                inp = {x: d[0], y_: d[1], keep_prob: d[2]}
                summary, _, relevance_train, op, rel_layer = sess.run(
                    [merged, trainer.train, LRP, y, relevance_layerwise],
                    feed_dict=inp)
                train_writer.add_summary(summary, i)

        # relevances plotted with visually pleasing color schemes
        if FLAGS.relevance:
            # plot test images with relevances overlaid
            images = test_inp[test_inp.keys()[0]].reshape(
                [FLAGS.batch_size, 28, 28, 1])
            images = (images + 1) / 2.0
            plot_relevances(
                relevance_test.reshape([FLAGS.batch_size, 28, 28, 1]), images,
                test_writer)
            # plot train images with relevances overlaid
            # images = inp[inp.keys()[0]].reshape([FLAGS.batch_size,28,28,1])
            # images = (images + 1)/2.0
            # plot_relevances(relevance_train.reshape([FLAGS.batch_size,28,28,1]), images, train_writer )

        train_writer.close()
        test_writer.close()
示例#27
0
def train():
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    with tf.Session() as sess:
        # Input placeholders
        with tf.name_scope('input'):
            x = tf.placeholder(tf.float32, [FLAGS.batch_size, 784],
                               name='x-input')

        # Model definition along with training and relevances
        with tf.variable_scope('model'):
            with tf.variable_scope('discriminator'):
                D = discriminator()
                D1 = D.forward(
                    x)  # Run the Discriminator with the True data distribution
                D_params_num = len(tf.trainable_variables())
            with tf.variable_scope('generator'):
                G = generator()
                Gout = G.forward(
                    tf.random_normal([FLAGS.batch_size, FLAGS.input_size
                                      ]))  # Run the generator to get Fake data

            with tf.variable_scope('discriminator') as scope:
                scope.reuse_variables()
                D2 = D.forward(
                    Gout
                )  # Run the Discriminator with the Fake data distribution

            # Image summaries
            packed = tf.concat(
                [Gout, tf.reshape(x,
                                  Gout.get_shape().as_list())], 2)
            tf.summary.image('Generated-Original',
                             packed,
                             max_outputs=FLAGS.batch_size)
            #tf.summary.image('Original', tf.reshape(x, Gout.get_shape().as_list()))

        # Extract respective parameters
        total_params = tf.trainable_variables()
        D_params = total_params[:D_params_num]
        G_params = total_params[D_params_num:]

        with tf.variable_scope('Loss'):
            # Compute every loss
            D1_loss, D2_loss = compute_D_loss(D1, D2)
            D_loss = tf.reduce_mean(D1_loss + D2_loss)
            G_loss = compute_G_loss(D2)
            # Loss summaries
            tf.summary.scalar('D_real', tf.reduce_mean(D1_loss))
            tf.summary.scalar('D_fake', tf.reduce_mean(D2_loss))
            tf.summary.scalar('D_loss', tf.reduce_mean(D_loss))
            tf.summary.scalar('G_loss', tf.reduce_mean(G_loss))

        # Create Trainers (Optimizers) for each network giving respective loss and weight parameters
        with tf.variable_scope('Trainer'):
            D_trainer = D.fit(loss=D_loss,
                              optimizer='adam',
                              opt_params=[FLAGS.D_learning_rate, D_params])
            G_trainer = G.fit(loss=G_loss,
                              optimizer='adam',
                              opt_params=[FLAGS.G_learning_rate, G_params])

        # create summaries files for D and G -
        # this is the main summaries file
        # it will store all the variables mentioned above for creating summaries
        merged = tf.summary.merge_all()
        D_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/D',
                                         sess.graph)
        G_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/G',
                                         sess.graph)

        # Init all variables
        tf.global_variables_initializer().run()

        utils = Utils(sess, FLAGS.checkpoint_dir)
        if FLAGS.reload_model:
            utils.reload_model()

        for i in range(FLAGS.max_steps):
            d = feed_dict(mnist, True)
            inp = {x: d[0]}
            # Run D once and G twice
            D_summary, _, dloss, dd1, dd2 = sess.run(
                [merged, D_trainer.train, D_loss, D1_loss, D2_loss],
                feed_dict=inp)
            G_summary, _, gloss, gen_images = sess.run(
                [merged, G_trainer.train, G_loss, Gout], feed_dict=inp)
            G_summary, _, gloss, gen_images = sess.run(
                [merged, G_trainer.train, G_loss, Gout], feed_dict=inp)

            if i % 100 == 0:
                print(gloss.mean(), dloss.mean())

            # Add summaries
            D_writer.add_summary(D_summary, i)
            G_writer.add_summary(G_summary, i)

        # save model if required
        if FLAGS.save_model:
            utils.save_model()

        D_writer.close()
        G_writer.close()
示例#28
0
ce = Colors.COLOR_END
# -> Patterns
plus = Patterns.PLUS
minus = Patterns.MINUS
astk = Patterns.ASTK

# --------------------------------------------------------------------
"""
Get input for:
- IP Address
- Port number
"""
try:
    hostname = raw_input(ys + "Enter IP Address: " + ce)
    # Check input
    Utils.check_ip_address_input(str(hostname))
    pass
except KeyboardInterrupt:
    Utils.print_exit_message_simple()
    sys.exit(1)

try:
    port_number = raw_input(ys + "Enter port number (1-65,535): " + ce)
    # Check input
    Utils.check_port_number_input(int(port_number))
    pass
except KeyboardInterrupt:
    Utils.print_exit_message_simple()
    sys.exit(1)
# ---------------------------------------------------------------------
"""
示例#29
0
def train():
    # Import data
    # train_file_path = str(FLAGS.image_dim)+"_train_y.csv"
    # test_file_path = str(FLAGS.image_dim)+"_test_y.csv"

    # mnist = TFLData( (train_file_path,test_file_path) )

    train_file_path = os.path.join("mnist_csvs", "mnist_train.csv")
    test_file_path = os.path.join("mnist_csvs", "mnist_test.csv")

    mnist = MnistData((train_file_path, test_file_path, (1000, 1000)))

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:

        #with tf.Session() as sess:
        # Input placeholders
        with tf.name_scope('input'):
            x = tf.placeholder(tf.float32,
                               [None, FLAGS.image_dim * FLAGS.image_dim],
                               name='x-input')
            y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
            keep_prob = tf.placeholder(tf.float32)

        with tf.variable_scope('model'):
            net = nn()
            inp = tf.pad(
                tf.reshape(
                    x,
                    [FLAGS.batch_size, FLAGS.image_dim, FLAGS.image_dim, 1]),
                [[0, 0], [2, 2], [2, 2], [0, 0]])
            op = net.forward(inp)
            y = tf.squeeze(op)

            trainer = net.fit(output=y,
                              ground_truth=y_,
                              loss='softmax_crossentropy',
                              optimizer='adam',
                              opt_params=[FLAGS.learning_rate])
        with tf.variable_scope('relevance'):
            if FLAGS.relevance:
                LRP = net.lrp(op, FLAGS.relevance_method, 1e-8)

                # LRP layerwise
                relevance_layerwise = []
                # R = y
                # for layer in net.modules[::-1]:
                #     R = net.lrp_layerwise(layer, R, 'simple')
                #     relevance_layerwise.append(R)

            else:
                LRP = []
                relevance_layerwise = []

        with tf.name_scope('accuracy'):
            accuracy = tf.reduce_mean(
                tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)),
                        tf.float32))
        tf.summary.scalar('accuracy', accuracy)

        # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
                                             sess.graph)
        test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test')

        tf.global_variables_initializer().run()

        utils = Utils(sess, FLAGS.checkpoint_reload_dir)
        if FLAGS.reload_model:
            utils.reload_model()

        for i in range(FLAGS.max_steps):
            if i % FLAGS.test_every == 0:  # test-set accuracy
                d = feed_dict(mnist, False)
                test_inp = {x: d[0], y_: d[1], keep_prob: d[2]}
                #pdb.set_trace()
                summary, acc, relevance_test, rel_layer = sess.run(
                    [merged, accuracy, LRP, relevance_layerwise],
                    feed_dict=test_inp)

                print_y = tf.argmax(y, 1)
                y_labels = print_y.eval(feed_dict=test_inp)

                test_writer.add_summary(summary, i)
                print('Accuracy at step %s: %f' % (i, acc))
                # print([np.sum(rel) for rel in rel_layer])
                # print(np.sum(relevance_test))

                # save model if required
                if FLAGS.save_model:
                    utils.save_model()

            else:
                d = feed_dict(mnist, True)
                inp = {x: d[0], y_: d[1], keep_prob: d[2]}
                summary, _, relevance_train, op, rel_layer = sess.run(
                    [merged, trainer.train, LRP, y, relevance_layerwise],
                    feed_dict=inp)
                train_writer.add_summary(summary, i)

        # relevances plotted with visually pleasing color schemes
        if FLAGS.relevance:
            #pdb.set_trace()
            relevance_test = relevance_test[:, 2:FLAGS.image_dim + 2,
                                            2:FLAGS.image_dim + 2, :]
            # plot test images with relevances overlaid
            images = test_inp[test_inp.keys()[0]].reshape(
                [FLAGS.batch_size, FLAGS.image_dim, FLAGS.image_dim, 1])
            #images = (images + 1)/2.0
            plot_relevances(
                relevance_test.reshape(
                    [FLAGS.batch_size, FLAGS.image_dim, FLAGS.image_dim, 1]),
                images, test_writer, y_labels)

            # plot train images with relevances overlaid
            # relevance_train = relevance_train[:,2:30,2:30,:]
            # images = inp[inp.keys()[0]].reshape([FLAGS.batch_size,28,28,1])
            # plot_relevances(relevance_train.reshape([FLAGS.batch_size,28,28,1]), images, train_writer )

        train_writer.close()
        test_writer.close()
示例#30
0
        # # as_attachment = True
        # return send_file(strIO,
        #                  mimetype='image/jpg',
        #                  attachment_filename='logo.png',
        #                  )
    except:
        logg.log(Utils.format_exception(""))
        return json.dumps({
            "status": False
        })


if __name__ == '__main__':

    Constants.load()
    Utils.log("config loaded")

    logg = Logg.instance()
    logg.start()

    mqtt_manager = MQTTManager()

    mqtt_manager.create_client()
    mqtt_manager.start()

    if Constants.conf["ENV"]["ENABLE_DB"]:
        Utils.log("enable db")
        db = Database.instance()
        # db.connect()
        mqtt_manager.load_sensors()
        db.run_process()