Beispiel #1
0
    def __init__(self, config):
        self.config = config

        if config.LOAD_PATH:
            self.model = None
            self.load_model(self.config.LOAD_PATH)
        else:
            with open('{}.dict.c2s'.format(config.TRAIN_PATH), 'rb') as file:
                subtoken_to_count = pickle.load(file)
                node_to_count = pickle.load(file)
                target_to_count = pickle.load(file)
                max_contexts = pickle.load(file)
                self.num_training_examples = pickle.load(file)
                print('Num training samples: {0}'.format(
                    self.num_training_examples))
                print('Dictionaries loaded.')

            if self.config.DATA_NUM_CONTEXTS <= 0:
                self.config.DATA_NUM_CONTEXTS = max_contexts
            self.subtoken_to_index, self.index_to_subtoken, self.subtoken_vocab_size = \
                Common.load_vocab_from_dict(subtoken_to_count, add_values=[Common.PAD, Common.UNK],
                                            max_size=config.SUBTOKENS_VOCAB_MAX_SIZE)
            print('Loaded subtoken vocab. size: %d' % self.subtoken_vocab_size)

            self.target_to_index, self.index_to_target, self.target_vocab_size = \
                Common.load_vocab_from_dict(target_to_count, add_values=[Common.PAD, Common.UNK, Common.SOS],
                                            max_size=config.TARGET_VOCAB_MAX_SIZE)
            print('Loaded target word vocab. size: %d' %
                  self.target_vocab_size)

            self.node_to_index, self.index_to_node, self.nodes_vocab_size = \
                Common.load_vocab_from_dict(node_to_count, add_values=[Common.PAD, Common.UNK], max_size=None)
            print('Loaded nodes vocab. size: %d' % self.nodes_vocab_size)

            self.model = Model(self.config, self.subtoken_vocab_size,
                               self.target_vocab_size, self.nodes_vocab_size,
                               self.target_to_index)

        if self.config.TRAIN_PATH:
            self.train_dataset_reader = reader.Reader(
                subtoken_to_index=self.subtoken_to_index,
                node_to_index=self.node_to_index,
                target_to_index=self.target_to_index,
                config=self.config,
                is_evaluating=False)
        else:
            self.train_dataset_reader = None

        self.test_dataset_reader = reader.Reader(
            subtoken_to_index=self.subtoken_to_index,
            node_to_index=self.node_to_index,
            target_to_index=self.target_to_index,
            config=self.config,
            is_evaluating=True)
Beispiel #2
0
def test():
    x_image = tf.placeholder(tf.float32, [None, 66, 200, 3])
    y = tf.placeholder(tf.float32, [None, 1])
    keep_prob = tf.placeholder(tf.float32)

    model = Nivdia_Model(x_image, y, keep_prob, FLAGS, False)

    # dataset reader
    dataset = reader.Reader(FLAGS.data_dir, FLAGS)

    # model saver used to resore model from model dir
    saver = tf.train.Saver()

    with tf.Session() as sess:
        path = tf.train.latest_checkpoint(FLAGS.model_dir)
        if not (path is None):
            saver.restore(sess, path)
        else:
            print("There is not saved model in the directory of model.")
        loss = batch_eval(model.loss, dataset.test, x_image, y, keep_prob, 500,
                          sess)
        print("Loss (MSE) in test dataset:", loss)
        mae = batch_eval(model.mae, dataset.test, x_image, y, keep_prob, 500,
                         sess)
        print("MAE in test dataset: ", mae)
Beispiel #3
0
    def __init__(self):

        self.rd = reader.Reader()
        self.bridge=CvBridge()
        self.register()
        self.left_features = Image()
        self.process()
Beispiel #4
0
 def extractcurve(self):
     a_setparam = np.array([])
     a_measparam = np.array([])
     a_measparam1 = np.array([])
     for file in self.files:
         #            print (file)
         r = reader.Reader(file)
         meas = r.getmeas()
         if self.typeofsignal == 'clock':
             setfall = float(meas.setvalues[1])
             setrise = float(meas.setvalues[0])
             measrise = float(meas.measvalue['Rise'])
             measfall = float(meas.measvalue['Fall'])
             #                print ('measrise = ' , measrise, ' measfall = ' , measfall)
             if self.scantype == 'fall':
                 a_setparam = np.append(a_setparam, setfall)
                 a_measparam = np.append(a_measparam, measfall)
             if self.scantype == 'rise':
                 a_setparam = np.append(a_setparam, setrise)
                 a_measparam = np.append(a_measparam, measrise)
         if self.typeofsignal == 'bias':
             setval = float(meas.setvalues[0])
             measmean = float(meas.measvalue['Mean'])
             measrms = float(meas.measvalue['RMS'])
             a_setparam = np.append(a_setparam, setval)
             a_measparam = np.append(a_measparam, measmean)
             a_measparam1 = np.append(a_measparam1, measrms)
     return [a_setparam, a_measparam, a_measparam1]
Beispiel #5
0
def read_ClintYaleAccentedTischendorf_write_linear():
    dir = ClintYaleAccentedTischbasedir
    suffix = ""
    rd = reader.Reader(dir, suffix)
    rd.read_NT(reader.read_AccentedTischendorf)
    rd.write_Linear()
    return rd
Beispiel #6
0
def execute(input_folder_name, output_folder_name, file_num, data_hash):
    input_name = ('input%0{}d.txt'.format(len(
        str(number_of_files)))) % file_num
    output_name = ('output%0{}d.txt'.format(len(
        str(number_of_files)))) % file_num

    rd = reader.Reader(input_folder_name=input_folder_name)
    writer = Writer(output_folder_name=output_folder_name)

    simplifier = Simplifier()

    execution_lt = [
        (simplifier.tautoly, "tautoly"),
        (simplifier.blocked_clause, "blocked_clause"),
        (simplifier.subsumption_elimination, "subsumption_elimination"),
        (simplifier.hidden_tautoly, "hidden_tautoly"),
        (simplifier.hidden_blocked_clause, "hidden_blocked_clause"),
        (simplifier.hidden_subsumption_elimination,
         "hidden_subsumption_elimination"),
        (simplifier.asymmetric_tautoly, "asymmetric_tautoly"),
        (simplifier.asymmetric_blocked_clause, "asymmetric_blocked_clause"),
        (simplifier.asymmetric_subsumption_elimination,
         "asymmetric_subsumption_elimination"),
        (simplifier.explicits, "explicits"),
        (simplifier.hiddens, "hiddens"),
        (simplifier.asymmetrics, "asymmetrics"),
        (simplifier.complete, "Complete"),
    ]

    for function, function_name in execution_lt:
        execution(function, rd, writer, input_name, output_name, function_name,
                  data_hash)
Beispiel #7
0
def freeze_graph_test():
    dataset = reader.Reader(FLAGS.data_dir, FLAGS)
    pb_path = './tools/pbmodel.pb'
    with tf.Graph().as_default():
        output_graph_def = tf.GraphDef()
        with open(pb_path, "rb") as f:
            output_graph_def.ParseFromString(f.read())
            tf.import_graph_def(output_graph_def, name="")
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            # 定义输入的张量名称,对应网络结构的输入张量
            input_image_tensor = sess.graph.get_tensor_by_name("Placeholder:0")
            input_keep_prob_tensor = sess.graph.get_tensor_by_name(
                "Placeholder_2:0")

            # 定义输出的张量名称
            output_tensor_name = sess.graph.get_tensor_by_name("output/add:0")

            batch_size = 10
            batch_num = 50
            for i in range(batch_num):
                batch_x, batch_y = dataset.test.next_batch(batch_size,
                                                           shuffle=False)
                res = sess.run(output_tensor_name,
                               feed_dict={
                                   input_image_tensor: batch_x,
                                   input_keep_prob_tensor: 1.0
                               })
                print(i, res)
Beispiel #8
0
    def run(self):
        ax = plt.axes()

        rd = reader.Reader('elink.data.log','Data Logs')
        
        while(True):
            
            data, total_rows = rd.get_unread_logs()

            #if not new data
            if total_rows == 0:
                plt.pause(3)
                continue
            
            for data_row in data:
                data_array = data_row.split(',')
                
                try:
                    time = data_array[self.time_index]
                    self.gond_x = float(data_array[self.index_x])
                    self.gond_y = float(data_array[self.index_y])
                    self.antenna_theta = float(data_array[self.angle_index])
                except:
                    continue


                
                min_x = min(self.gond_x , self.ground_x)
                max_x = max(self.gond_x , self.ground_x)
                
                distance_x = max_x - min_x
                min_y = min(self.gond_y, self.ground_y)
                max_y = max(self.gond_y, self.ground_y)

                #pause
                plt.pause(0.000001)

                self.arrow_length = distance_x * 0.1
                r_s , theta_s = self.cart2pol(0,self.arrow_length)
            
                theta_s = math.radians(self.antenna_theta)
                
                x_s , y_s = self.pol2cart(r_s, theta_s)
                ax.clear()
                
                padding = distance_x * 0.15
                ax.set_xlim(min_x - padding , max_x + padding)
                ax.set_ylim(min_y - padding , max_y + padding)  



                ax.grid(True)
                ax.title.set_text('Antenna at {time}'.format(time=self.format_time(time)))
                ax.arrow(self.gond_x, self.gond_y, x_s, y_s,head_starts_at_zero=True, width=0.0003, head_width=0.0009, head_length=0.0004, fc='lightblue', ec='red')
                ax.plot(self.ground_x , self.ground_y, 'o', color='black')
                ax.plot([self.ground_x,self.gond_x], [self.ground_y, self.gond_y], '-.k') # dashdot black
        
        
        plt.show()
Beispiel #9
0
 def __init__(self, path):
     self.path = path
     self.reader = reader.Reader()
     self.abspath = os.path.join(self.root_path, path)
     self.content = None
     self._meta = None
     self._body = None
     self.initialize()
Beispiel #10
0
 def create_new_reader(self):
     """
     Create all reader
     :return:
     """
     self.reader_list.clear()
     for i in range(1, 4):
         self.reader_list.append(reader.Reader(i))
Beispiel #11
0
def test2():
    """Running v1 using only the WebCrawler class"""
    webcrawler = crawler.WebCrawler(url="debeukenhuisartsen.praktijkinfo.nl")
    webcrawler.crawl()
    entry = webcrawler.html[0]
    print('html\'s:', len(webcrawler.html))

    r = reader.Reader(entry['html'], entry['url'])
    r.test()
Beispiel #12
0
def graph_average(onBoolean,subList,graphData,refresh,days):
    
    if subList is None:
        logger.debug("cant graph 24hr b/c there are no streams selected")
        raise PreventUpdate

    logger.debug(refresh)

    if graphData is not None and refresh is None:
        return graphData,graphData
    if onBoolean:
        graphs = []
        #start averaging over 24hrs
        window = 60*60
        hours = 24
        start = time.time()-24*60*60*days
        stop = time.time()-window - 24*60*60*days 
        read = reader.Reader(config,logger)
        for stream in subList:
            figure = {
                'data':[],
                'layout':{'title':'Graph of {} averaged over 24hrs'.format(stream),'uirevision':stream},
                
            }
            #get the averages
            data = []
            logger.debug("starting read for stream {}".format(stream))
            for i in range(hours):
                start = start - window
                stop = stop - window
                r = read.get_stream_stat_data(stream,start=start,stop=stop)
                data.append(r)
                logger.debug("done with read {}/{} for stream {}".format(i+1,24,stream))
            #now format the data
            xx = []
            yy = {}
            for val in data:
                xx.append(pd.to_datetime(val['measurement_time']['start']/(2**32),
                    unit='s',utc=True).dt.tz_convert("US/Central"))
                for keys in val:
                    if keys != 'measurement_time':
                        if keys not in yy.keys():
                            yy[keys] = []
                        yy[keys].append(val[keys]['average'])

            for key in yy:
                figure['data'].append({'x':xx,'y':yy[key],'type':'scatter','name':key})

            graphs.append(dcc.Graph(
                id='graph-{}'.format(stream),
                figure = figure))
        read.close()
        return html.Div(graphs),html.Div(graphs)
        

    else:
        raise PreventUpdate
Beispiel #13
0
    def predict(self, predict_data_lines):
        if self.predict_queue is None:
            self.predict_queue = reader.Reader(subtoken_to_index=self.subtoken_to_index,
                                               node_to_index=self.node_to_index,
                                               target_to_index=self.target_to_index,
                                               config=self.config, is_evaluating=True)
            self.predict_placeholder = tf.placeholder(tf.string)
            reader_output = self.predict_queue.process_from_placeholder(
                self.predict_placeholder)
            reader_output = {key: tf.expand_dims(
                tensor, 0) for key, tensor in reader_output.items()}
            self.predict_top_indices_op, self.predict_top_scores_op, _, self.attention_weights_op = \
                self.build_test_graph(reader_output)
            self.predict_source_string = reader_output[reader.PATH_SOURCE_STRINGS_KEY]
            self.predict_path_string = reader_output[reader.PATH_STRINGS_KEY]
            self.predict_path_target_string = reader_output[reader.PATH_TARGET_STRINGS_KEY]
            self.predict_target_strings_op = reader_output[reader.TARGET_STRING_KEY]

            self.initialize_session_variables(self.sess)
            self.saver = tf.train.Saver()
            self.load_model(self.sess)

        results = []
        for line in predict_data_lines:
            predicted_indices, top_scores, true_target_strings, attention_weights, path_source_string, path_strings, path_target_string = self.sess.run(
                [self.predict_top_indices_op, self.predict_top_scores_op, self.predict_target_strings_op,
                 self.attention_weights_op,
                 self.predict_source_string, self.predict_path_string, self.predict_path_target_string],
                feed_dict={self.predict_placeholder: line})

            top_scores = np.squeeze(top_scores, axis=0)
            path_source_string = path_source_string.reshape((-1))
            path_strings = path_strings.reshape((-1))
            path_target_string = path_target_string.reshape((-1))
            predicted_indices = np.squeeze(predicted_indices, axis=0)
            true_target_strings = Common.binary_to_string(
                true_target_strings[0])

            if self.config.BEAM_WIDTH > 0:
                predicted_strings = [[self.index_to_target[sugg] for sugg in timestep]
                                     for timestep in predicted_indices]  # (target_length, top-k)
                # (top-k, target_length)
                predicted_strings = list(map(list, zip(*predicted_strings)))
                top_scores = [np.exp(np.sum(s)) for s in zip(*top_scores)]
            else:
                predicted_strings = [self.index_to_target[idx]
                                     for idx in predicted_indices]  # (batch, target_length)

            attention_per_path = None
            if self.config.BEAM_WIDTH == 0:
                attention_per_path = self.get_attention_per_path(path_source_string, path_strings, path_target_string,
                                                                 attention_weights)

            results.append((true_target_strings, predicted_strings,
                            top_scores, attention_per_path))
        return results
Beispiel #14
0
def main():

    with tf.Session() as session:
        config = Config()
        r = reader.Reader(vocab_size=config.vocab_size - 4,
                          num_steps=config.num_steps)
        print "vocab_size", len(r.word_to_id)
        dialogue = Dialogue(config, variational=False, forward_only=False)
        summary_writer = tf.train.SummaryWriter('logdir', session.graph)
        dialogue.fit(session, summary_writer, r, nb_epoch=555)
Beispiel #15
0
def main():
    with tf.Session() as session:
        config = Config()
        r = reader.Reader(vocab_size=config.vocab_size - 4,
                          num_steps=config.num_steps)
        print "vocab_size", len(r.word_to_id)

        with tf.variable_scope("Model", reuse=None):
            dialogue = Dialogue(config, variational=False, forward_only=False)
        with tf.variable_scope("Model", reuse=True):
            test_dialogue = Dialogue(config,
                                     variational=False,
                                     forward_only=True)

        tf.global_variables_initializer().run()

        for epoch in xrange(10):
            r.batch_size = 128
            for step, (x, y, x_early_steps,
                       y_early_steps) in enumerate(r.iterator()):
                print("************ x: ", x.shape)
                loss, weights = dialogue.step(session, x, y, x_early_steps)
                if step % 10 == 1:
                    print "step {:<4}, loss: {:.4}".format(step, loss)
                    if loss < 0.2:
                        print "weights[0] : {}".format(weights[0])
                        print "weights[1] : {}".format(weights[1])
                        print "weights[-2] : {}".format(weights[-2])
                        print "weights[-1] : {}".format(weights[-1])

            r.batch_size = 10

            for ind, (x, y, x_early_steps,
                      y_early_steps) in enumerate(r.iterator()):
                loss, indices = test_dialogue.step(session, x, y,
                                                   x_early_steps, True)

                indices = np.array(indices)
                for i in range(indices.shape[1]):
                    print "************"
                    print "post:     ", ' '.join(
                        map(
                            lambda ind: r.id_to_word[ind],
                            filter(
                                lambda ind: ind != r.control_word_to_id[
                                    '<PAD>'], x[i])))
                    print "response: ", ' '.join(
                        map(
                            lambda ind: r.id_to_word[ind],
                            filter(
                                lambda ind: ind != r.control_word_to_id[
                                    '<PAD>'], indices[:, i])))

                break  # evaluate only one batch
Beispiel #16
0
def setupOBDReader():
    obdReader = reader.Reader()
    obdReader.connect()
    availablePIDs = obdReader.checkAvailable()

    if not obdReader.conn.is_connected():
        exit(1)

    obdReader.watch(availablePIDs, pidHandler, dtcHandler)

    return obdReader
Beispiel #17
0
    def predict(self, predict_data_lines):
        if not self.model:
            print('Model is not initialized')
            exit(-1)

        predict_reader = reader.Reader(subtoken_to_index=self.subtoken_to_index,
                                       node_to_index=self.node_to_index,
                                       target_to_index=self.target_to_index,
                                       config=self.config,
                                       is_evaluating=True)
        results = []
        for line in predict_data_lines:
            input_tensors = predict_reader.process_from_placeholder(line)

            path_source_string = input_tensors[reader.PATH_SOURCE_STRINGS_KEY]
            path_strings = input_tensors[reader.PATH_STRINGS_KEY]
            path_target_string = input_tensors[reader.PATH_TARGET_STRINGS_KEY]
            true_target_strings = input_tensors[reader.TARGET_STRING_KEY]

            batched_contexts = self.model.run_encoder(input_tensors, is_training=False)
            outputs, final_states = self.model.run_decoder(batched_contexts, input_tensors, is_training=False)

            if self.config.BEAM_WIDTH > 0:
                predicted_indices = outputs.predicted_ids
                top_scores = outputs.beam_search_decoder_output.scores
                attention_weights = [tf.no_op()]
            else:
                predicted_indices = outputs.sample_id
                top_scores = tf.constant(1, shape=(1, 1), dtype=tf.float32)
                attention_weights = tf.squeeze(final_states.alignment_history.stack(), 1)

            top_scores = np.squeeze(top_scores.numpy(), axis=0)
            path_source_string = path_source_string.numpy().reshape((-1))
            path_strings = path_strings.numpy().reshape((-1))
            path_target_string = path_target_string.numpy().reshape((-1))
            predicted_indices = np.squeeze(predicted_indices.numpy(), axis=0)
            true_target_strings = Common.binary_to_string(true_target_strings.numpy()[0])

            if self.config.BEAM_WIDTH > 0:
                predicted_strings = [[self.index_to_target[sugg] for sugg in timestep]
                                     for timestep in predicted_indices]  # (target_length, top-k)
                predicted_strings = list(map(list, zip(*predicted_strings)))  # (top-k, target_length)
                top_scores = [np.exp(np.sum(s)) for s in zip(*top_scores)]
            else:
                predicted_strings = [self.index_to_target[idx]
                                     for idx in predicted_indices]  # (batch, target_length)

            attention_per_path = None
            if self.config.BEAM_WIDTH == 0:
                attention_per_path = self.get_attention_per_path(path_source_string, path_strings, path_target_string,
                                                                 attention_weights.numpy())

            results.append((true_target_strings, predicted_strings, top_scores, attention_per_path))
        return results
Beispiel #18
0
def main():
    filenames = ('a_example.in', 'b_should_be_easy.in', 'c_no_hurry.in',
                 'd_metropolis.in', 'e_high_bonus.in')
    filename = filenames[1]
    out_filename = '../output/{}_MCTS.out'.format(
        filename[:filename.index('.')])
    r = reader.Reader('../dataset/' + filename)

    a = algorithm.MCTSAlgorithm(r.get_rides(), r.get_meta_info())
    a.assign_rides()

    w = writer.Writer(a.assigned_rides, out_filename)
Beispiel #19
0
 def load(vidname):
     import reader, trace, traj
     vname, wname, mname = fnames(vidname)
     v = reader.Reader(vname, adjuststipple=0)
     w = trace.Load_Whiskers(wname)
     try:
         m = traj.MeasurementsTable(mname)
     except:
         traceback.print_exc(file=sys.stdout)
         sys.stdout.flush()
         m = None
     return v, w, m
Beispiel #20
0
def handle_echo_client(ws):
    while True:
        #msg = ws.receive()
        #if msg == "quit":
        #	ws.close_connection()
        #	break
        nfc_reader = reader.Reader()
        badge_id = nfc_reader.read_badge()
        ws.send(badge_id)
        nfc_reader.release_context()
        del nfc_reader
        time.sleep(1)
Beispiel #21
0
def engines():
    user_login = request.form.get("username")
    password = request.form.get("pass")
    name = request.form.get("name")
    writer = request.form.get("writer")
    user = reader.Reader(user_login, password, name, writer)

    if my_lib.check_login(user.login) == True:
        flash("username already exist")
        return render_template("registration.html")
    my_lib.add_user(user)
    flash("thank you for registering")
    return render_template("index.html")
Beispiel #22
0
def login():
    if current_user.is_authenticated:
        return redirect(url_for('search'))
    user = reader.Reader(request.form.get("usernamelog"),
                         request.form.get("passlog"), "any", "any")
    if my_lib.check_user(user) == True:
        print('this way')
        reader1 = my_lib.get_reader(request.form.get("usernamelog"))
        login_user(reader1)
        #session['user']=request.form.get("usernamelog")
        return redirect(url_for("search"))
    else:
        flash("incorrect username or password")
        return render_template("index.html")
def main():
    x_image = tf.placeholder(tf.float32, [None, 66, 200, 3])
    keep_prob = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32, [None, 1])

    model = Nivdia_Model(x_image, y, keep_prob, FLAGS, False)

    # dataset reader
    dataset = reader.Reader(FLAGS.data_dir, FLAGS)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        # initialize all varibales
        sess.run(tf.global_variables_initializer())
        # restore model
        print(FLAGS.model_dir)
        path = tf.train.latest_checkpoint(FLAGS.model_dir)
        if path is None:
            print("Err: the model does NOT exist")
            exit(0)
        else:
            saver.restore(sess, path)
            print("Restore model from", path)

        batch_x, batch_y = dataset.train.next_batch(FLAGS.visualization_num,
                                                    False)
        y_pred = sess.run(model.prediction,
                          feed_dict={
                              x_image: batch_x,
                              keep_prob: 1.0
                          })
        masks = sess.run(model.visualization_mask,
                         feed_dict={
                             x_image: batch_x,
                             keep_prob: 1.0
                         })

    if not os.path.exists(FLAGS.result_dir):
        os.makedirs(FLAGS.result_dir)
    for i in range(FLAGS.visualization_num):
        image, mask, overlay = visualize(batch_x[i], masks[i])
        cv2.imwrite(os.path.join(FLAGS.result_dir, "image_" + str(i) + ".jpg"),
                    image)
        cv2.imwrite(os.path.join(FLAGS.result_dir, "mask_" + str(i) + ".jpg"),
                    mask)
        cv2.imwrite(
            os.path.join(FLAGS.result_dir, "overlay_" + str(i) + ".jpg"),
            overlay)
Beispiel #24
0
def main():
    reader1 = our_reader.Reader(cfg['vocab_size'], cfg['buckets'])
    reader2 = cornell_reader.Reader(cfg)

    reader1.build_dict(cfg['dictionary_name'], cfg['reversed_dictionary_name'], cfg['path']['train'])
    reader1.read_data(cfg['path']['train'])

    encoder_inputs_10 = [i for i in reader.dataset_enc if len(i) == 10]
    decoder_inputs_10 = [i for i in reader.dataset_dec if len(i) == 10]
    encoder_inputs__toks_10 = [i for i in reader.dataset_enc_tok if len(i) == 10]
    decoder_inputs__toks_10 = [i for i in reader.dataset_dec_tok if len(i) == 10]

    print("done")

    model.create_placeholders()
Beispiel #25
0
def main():
    if len(sys.argv) < 2:
        sys.stderr.write("usage:\n\treader.py /dev/cu.{port}\n")
        sys.exit(1)
    th = reader.Reader(sys.argv[1])
    garden = GardenServer(th)
    httpd = make_server('', 8000, garden)
    print("Serving on port 8000...")

    # Serve until process is killed
    #httpd.serve_forever()

    serve_thread = threading.Thread(target=httpd.serve_forever)
    sensor_thread = threading.Thread(target=th.run)
    serve_thread.start()
    sensor_thread.start()
Beispiel #26
0
def main():
    """Program Starts here."""
    parser = argparse.ArgumentParser(prog='sync_ghs.py')
    parser.add_argument("-f",
                        nargs=1,
                        required=True,
                        help="File with the input data.")
    args = parser.parse_args()

    log_file = "logs/sync_ghs_%s.log" % time.strftime("%Y%m%d_%H%M%S")
    setup_log(log_file, logging.INFO)

    input_parser = reader.Reader(args.f[0])
    if not input_parser.read_data():
        logging.error("Parsing of input file failed.")
        sys.exit(-1)
Beispiel #27
0
    def encode(self, predict_data_lines):
        if not self.model:
            print('Model is not initialized')
            exit(-1)

        predict_reader = reader.Reader(subtoken_to_index=self.subtoken_to_index,
                                       node_to_index=self.node_to_index,
                                       target_to_index=self.target_to_index,
                                       config=self.config,
                                       is_evaluating=True)
        results = []
        for line in predict_data_lines:
            input_tensors = predict_reader.process_from_placeholder(line)
            batched_contexts = self.model.run_encoder(input_tensors, is_training=False)
            results.append(batched_contexts)
        return results
Beispiel #28
0
 def __init__(self, sess, learning_rate, data_sets_path, batch_size, canvas_size, window_size, threads, max_steps,
              save_path, optimizer, kwidth, stride, is_train, beta1, summary_step, saver_step):
     self.bach_size = batch_size
     self.saver_step = saver_step
     self.summary_step = summary_step
     self.canvas_size = canvas_size
     self.beta1 = beta1
     self.is_train = is_train
     self.stride = stride
     self.kwidth = kwidth
     self.sess = sess
     self.optimizer = optimizer
     self.save_path = save_path
     self.max_step = max_steps
     self.learning_rate = learning_rate
     self.window_size = window_size
     self.wav_data, self.label_data = reader.Reader(data_sets_path, canvas_size, batch_size, window_size,
                                                    threads).get_batch()
Beispiel #29
0
    def download_all(self):
        """すべて(総理大臣の情報を記したCSVファイル・画像ファイル群・縮小画像ファイル群)をダウンロードし、テーブルを応答する。"""
        if os.path.isdir(self._base_directory + "/images"):
            shutil.rmtree(self._base_directory + "/images")
        os.makedirs(self._base_directory + "/images")
        if os.path.isdir(self._base_directory + "/thumbnails"):
            shutil.rmtree(self._base_directory + "/thumbnails")
        os.makedirs(self._base_directory + "/thumbnails")

        self.download_csv()
        a_reader = reader.Reader(self._base_directory + "/PrimeMinisters.csv")
        a_table = a_reader.table()

        image_names = a_table.image_filenames()
        self.download_images(image_names)
        thumbnails_names = a_table.thumbnail_filenames()
        self.download_images(thumbnails_names)

        return a_table
Beispiel #30
0
 def setupUi(self):
     #self.setStyleSheet("QWidget {border-image: url(/home/pi/guiPythonLABFAB/images/Baned.png)}")
     self.setVisible(False)
     self.imageCase = {'enrollIn':'/home/pi/guiPythonLABFAB/images/Enroll.png',
                       'nonEnroll':'/home/pi/guiPythonLABFAB/images/NonEnroll.png',
                       'nonSystemEnroll':'/home/pi/guiPythonLABFAB/images/NonSystemEnroll.png',
                       'enrollOut':'/home/pi/guiPythonLABFAB/images/EnrollOut.png',
                       'visit':'/home/pi/guiPythonLABFAB/images/Visit.png',
                       'baned':'/home/pi/guiPythonLABFAB/images/Baned',
                       'enrolling': 'images/Enrrolling.png',
                       'waiting': 'Wait.png'}
     self.labeltext = QLabel(self)
     self.labeltext.setStyleSheet("border-image: none; font: 80pt; color: white;")
     self.labeltext.setVisible(False)
     self.thread = reader.Reader()
     self.thread.sig1.connect(self.screenResponse)
     self.thread.sig2.connect(self.screenResponse)
     self.thread.start()
     self.lab_id = 1