def learner_target(task_index, job, _):
    from distributed.session import SimpleDistributedSession
    import tensorflow as tf

    setup = shared_setup()

    s, a, r, s2 = setup.transitions
    q_s = setup.q_vals(s)
    q_s2 = setup.q_vals(s2)
    target = tf.stop_gradient(r + gamma * tf.reduce_max(q_s2, 1))
    prediction = tf.reduce_sum(q_s *
                               tf.one_hot(tf.cast(a, tf.int32), ac_space.n),
                               axis=1)
    td_error = target - prediction
    loss_op = tf.reduce_mean(abs(td_error))

    minimize = tf.train.AdamOptimizer(.01).minimize(loss_op)

    # inputs = [K.layers.Input(tensor=tensor) for tensor in setup.transitions]

    # model = K.Model(inputs=inputs, outputs=[td_error])
    # model.compile(tf.train.AdamOptimizer(), K.losses.mean_absolute_error)

    epoch_complete = threading.Barrier(2)
    with tf.variable_scope('test_performance'):
        setup.test_episode_reward = tf.placeholder(tf.float32)
        setup.test_summaries = tf.summary.merge([
            tf.summary.scalar('test_episode_reward',
                              setup.test_episode_reward),
            # tf.summary.histogram('agent_qs', setup.greedy_agent.greedy_agent.q_vs)
        ])
    with tf.variable_scope('train_performance'):
        loss_summary = tf.summary.scalar('loss', loss_op)

    writer = tf.summary.FileWriter('./logs/rl/tf_distributed_np_ql/' +
                                   datetime.datetime.now().isoformat())

    with SimpleDistributedSession(jobs)(job.name, task_index) as sess:
        setup.set_session(sess)
        sleep(4)

        evaluator_thread = threading.Thread(
            target=evaluator_target,
            args=[sess, setup, writer, epoch_complete])

        with Timer('Learner complete'):
            for _ in range(4):
                sess.run(tf.global_variables_initializer())
                # sess.run([it.initializer for it in setup.iters])
                evaluator_thread.start()
                with Timer('\nEpoch complete'):
                    try:
                        while True:
                            loss, _ = sess.run([loss_op, minimize])
                            writer.add_summary(sess.run(loss_summary))
                            writer.flush()

                    except tf.errors.OutOfRangeError:
                        epoch_complete.wait(1000)
                        evaluator_thread.join(1000)
예제 #2
0
def test_speed():

    with Pipeline() as pipeline:
        level1 = Unpack(range(N_STEPS))
        level2 = Unpack(range(N_STEPS))
        Sleep()

    with Timer("sequential") as t:
        expected_result = [
            (obj[level1], obj[level2]) for obj in pipeline.transform_stream()
        ]

    elapsed_sequential = t.elapsed

    with Pipeline() as pipeline:
        level1 = Unpack(range(N_STEPS))
        with ParallelPipeline(4) as pp:
            level2 = Unpack(range(N_STEPS))
            Sleep()

    with Timer("parallel") as t:
        result = [(obj[level1], obj[level2]) for obj in pipeline.transform_stream()]

    elapsed_parallel = t.elapsed

    assert result == expected_result

    assert elapsed_parallel < elapsed_sequential
예제 #3
0
def get_data():
    glove = get_glove()
    tokenizer = TreebankWordTokenizer().tokenize
    text_field = Field(sequential=True,
                       tokenize=tokenizer,
                       include_lengths=True,
                       lower=True,
                       use_vocab=True)
    label_field = Field(sequential=False,
                        pad_token=None,
                        unk_token=None,
                        is_target=True,
                        use_vocab=True)
    with Timer('snli') as timer:
        print('snli{')
        splits = get_snli(text_field, label_field)
        print('}')

    text_field.build_vocab(*splits, vectors=glove)
    label_field.build_vocab(*splits)
    text_vocab = text_field.vocab
    label_vocab = label_field.vocab

    text_embeds = get_embeds(text_vocab.vectors)
    # snli = [pick_samples(ds, n=100) for ds in splits]  # TODO: comment
    snli = splits

    return (snli, text_field, label_vocab, text_embeds)
예제 #4
0
def breakit(key, i):
    cmd = "python sign.py " + key + " " + message
    # Measure the time it takes for the response
    with Timer('timing') as timer:
        # Execute the cmd with catching the stderr
        p = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
        (output, err) = p.communicate()
        # printout only the successful signed message
        if output:
            print "output : ", output
    # Is the response-time greater than the benchmark
    # Multiplying the benchmark time with the number of already passed chars
    if float(timer.elapsed) > (i * time_one_char):
        return True
    else:
        return False
예제 #5
0
    def consolidate(root_id):
        with database.engine.connect() as conn, Timer("Consolidate") as timer:
            tree = Tree(conn)

            if root_id == "all":
                print("Consolidating all projects...")
                root_ids = [p["node_id"] for p in tree.get_projects()]
            elif root_id == "visible":
                print("Consolidating visible projects...")
                root_ids = [p["node_id"] for p in tree.get_projects(True)]
            else:
                print("Consolidating {}...".format(root_id))
                root_ids = [root_id]

            for rid in root_ids:
                with timer.child(str(rid)):
                    print("Consolidating {}...".format(rid))
                    tree.consolidate_node(rid)
            print("Done.")
예제 #6
0
    def progress(root_id, log):
        """
        Report progress on a tree
        """
        with database.engine.connect() as conn:
            tree = Tree(conn)

            if root_id is None:
                root_ids = [p["node_id"] for p in tree.get_projects()]
            else:
                root_ids = [root_id]

            with Timer("Progress") as timer:
                for rid in root_ids:
                    print("Root {}:".format(rid))
                    with timer.child(str(rid)):
                        prog = tree.calculate_progress(rid)

                    for k in sorted(prog.keys()):
                        print("{}: {}".format(k, prog[k]))
예제 #7
0
from timer_cm import Timer

from morphocut.processing.pipeline import *

# input_path = "/data1/mschroeder/Datasets/19-02-21-FredLeMoigne/M138 T4 (wetransfer-477f42)/"
input_path = "/data1/mschroeder/Datasets/19-02-21-FredLeMoigne/M138 T4 (wetransfer-477f42)/M138 T4 300A-02.jpeg"
dump_path = "/tmp/GelDump"

try:
    shutil.rmtree(dump_path, ignore_errors=True)
except OSError:
    pass

os.makedirs(dump_path)

with Timer("Total time") as timer:
    # pipeline = get_default_pipeline(input_path, export_path)

    with timer.child("Parents"):
        print("Processing parents...")
        parents = Pipeline([
            DataLoader(input_path, output_facet="raw"),
            Progress("Loaded"),
            VignetteCorrector(input_facet="raw", output_facet="color"),
            BGR2Gray(input_facet="color", output_facet="gray"),
            ThresholdOtsu(input_facet="gray", output_facet="mask")
        ])
        parents = list(parents())

    with timer.child("Children"):
        print("Processing children...")
예제 #8
0
        return node_id, remain_message, token_offset, token_weight

    def add_edge(self, prev_node_id, node_id, prev_node_weight):
        # prev_node_round_weight used to better display of long digital value
        # e.g. 2.12343234234234324 to 2.13
        prev_node_round_weight = round(prev_node_weight, 2)

        self.G.add_edge(
            prev_node_id,
            node_id,
            weight=prev_node_weight,
            round_weight=prev_node_round_weight,
            shortest_path=False,
        )


if __name__ == "__main__":
    from timer_cm import Timer

    graph_builder = NonRecursiveAlgorithm()

    with Timer("Building DAG graph"):
        for _ in range(1):
            graph_builder.init_graph()
            graph_builder.build_graph("王小明在北京的清华大学读书。")

    graph_builder.compute_shortest_path()

    result = graph_builder.get_tokens()
    print(result)
예제 #9
0
import os
from morphocut.processing.pipeline import *
from timer_cm import Timer

input_path = "/data1/mschroeder/Datasets/18-10-15_Sediment_Trap_Fred_LeMoigne/*/"
export_path = "/tmp/M138_T4_200A_cleaned.zip"

# input_path = "/data1/mschroeder/Datasets/19-02-21-FredLeMoigne/test.jpeg"
# export_path = "/tmp/M138 T7 200A 1.zip"

num_workers = None

try:
    os.remove(export_path)
except OSError:
    pass

with Timer("Total time"):
    pipeline = get_default_pipeline(input_path, export_path)
    pipeline.execute()
예제 #10
0
        remain_message = current_message[len_token:]

        return node_id, remain_message, token_offset, token_weight

    def add_edge(self, prev_node_id, node_id, prev_node_weight):
        # prev_node_round_weight used to better display of long digital value
        # e.g. 2.12343234234234324 to 2.13
        prev_node_round_weight = round(prev_node_weight, 2)

        self.G.add_edge(prev_node_id,
                        node_id,
                        weight=prev_node_weight,
                        round_weight=prev_node_round_weight,
                        shortest_path=False)


if __name__ == "__main__":
    from timer_cm import Timer

    graph_builder = NonRecursiveAlgorithm()

    with Timer('Building DAG graph'):
        for _ in range(1):
            graph_builder.init_graph()
            graph_builder.build_graph("王小明在北京的清华大学读书。")

    graph_builder.compute_shortest_path()

    result = graph_builder.get_tokens()
    print(result)
예제 #11
0
def accept_recommended_objects(node_id):
    """
    Accept recommended objects.

    URL parameters:
        node_id (int): ID of the node that accepts recommendations

    Request parameters:
        request_id: URL of the recommendations.
        rejected_members: Rejected members.
        last_page: Last page of accepted recommendations.
        log_data (optional): Additional data to be stored in the log (only if SAVE_RECOMMENDATION_STATS!)

    Returns:
        Nothing.
    """

    parameters = request.get_json()

    print(parameters)

    with Timer("accept_recommended_objects") as t:

        with t.child("assemble set of rejected objects"):
            rejected_object_ids = set(
                m[1:] for m in parameters["rejected_members"] if m.startswith("o")
            )

        with t.child("assemble list of accepted objects"):
            object_ids = []
            for page in range(parameters["last_page"] + 1):
                response = _node_get_recommended_objects(
                    node_id=node_id, request_id=parameters["request_id"], page=page
                )
                page_object_ids = (
                    v["object_id"] for v in json.loads(response.data.decode())["data"]
                )
                object_ids.extend(page_object_ids)

        # Save list of objects to enable calculation of Average Precision and the like
        if app.config.get("SAVE_RECOMMENDATION_STATS", False):
            print("Saving accept-reject stats...")
            with t.child("Save accept-reject stats") as t2:
                with t2.child("calc rejected"):
                    rejected = [o in rejected_object_ids for o in object_ids]
                with t2.child("assemble DataFrame"):
                    data = pd.DataFrame({"object_id": object_ids, "rejected": rejected})

                data_fn = os.path.join(
                    app.config["PROJECT_EXPORT_DIR"],
                    "{:%Y-%m-%d-%H-%M-%S}--accept-reject--{}.csv".format(
                        datetime.now(), node_id
                    ),
                )
                with t2.child("write data"):
                    data.to_csv(data_fn, index=False)

        with t.child("filter accepted objects"):
            # Filter object_ids
            object_ids = [o for o in object_ids if o not in rejected_object_ids]

        # print(object_ids)

        # Assemble log
        log_data = {
            "n_accepted": len(object_ids),
            "n_rejected": len(rejected_object_ids),
        }

        # Store additional log data
        addlog_data = parameters.get("log_data")
        if isinstance(addlog_data, dict):
            log_data.update(addlog_data)
        elif addlog_data is not None:
            raise ValueError(
                "Parameter log_data should be a dict, got a {}!".format(
                    type(addlog_data)
                )
            )

        with database.engine.connect() as connection:
            tree = Tree(connection)
            with t.child("save accepted/rejected to database"), connection.begin():
                tree.relocate_objects(object_ids, node_id)
                tree.reject_objects(node_id, rejected_object_ids)

            log(
                connection,
                "accept_recommended_objects",
                node_id=node_id,
                data=json_dumps(log_data),
            )

        print(
            "Node {} adopted {} objects and rejected {} objects.".format(
                node_id, len(object_ids), len(rejected_object_ids)
            )
        )

        return jsonify({})
예제 #12
0
def _get_node_members(
    node_id,
    nodes=False,
    objects=False,
    arrange_by="",
    starred_first=False,
    descending=False,
):
    with database.engine.connect() as connection, Timer("_get_node_members") as timer:
        tree = Tree(connection)

        sorted_nodes_include = "unstarred" if starred_first else None

        result = []
        if nodes:
            with timer.child("tree.get_children()"):
                result.extend(tree.get_children(node_id, include=sorted_nodes_include))
        if objects:
            with timer.child("tree.get_objects()"):
                result.extend(tree.get_objects(node_id))

        if arrange_by == "starred_sim" or starred_first:
            with timer.child("tree.get_children(starred)"):
                starred = tree.get_children(node_id, include="starred")

        if arrange_by != "":
            result = np.array(result, dtype=object)

            if arrange_by == "sim":
                with timer.child("sim"):
                    order = _arrange_by_sim(result)
            elif arrange_by == "nleaves":
                with timer.child("nleaves"):
                    order = _arrange_by_nleaves(result)
            elif arrange_by == "starred_sim":
                with timer.child("starred_sim"):
                    # If no starred members yet, arrange by distance to regular children
                    anchors = starred if len(starred) else tree.get_children(node_id)

                    order = _arrange_by_starred_sim(result, anchors)
            elif arrange_by == "interleaved":
                with timer.child("interleaved"):
                    order = _arrange_by_sim(result)
                    if len(order):
                        order0, order1 = np.array_split(order.copy(), 2)
                        order[::2] = order0
                        order[1::2] = order1[::-1]
            elif arrange_by == "random":
                with timer.child("random"):
                    order = np.random.permutation(len(result))
            else:
                warnings.warn("arrange_by={} not supported!".format(arrange_by))
                order = ()

            if descending:
                order = order[::-1]

            # ===================================================================
            # if len(order):
            #     try:
            #         assert np.all(np.bincount(order) == 1)
            #     except:
            #         print(order)
            #         print(np.bincount(order))
            #         raise
            # ===================================================================

            result = result[order].tolist()

        if starred_first:
            result = starred + result

        result = _members(tree, result)

        return result
예제 #13
0
        else:
            print("error invalid strategy")

    def scan(self, file_buffer):
        scan_promise = ScanPromise()

        for engine in self.engines:
            engine_promise = self.docker_strategy.scan(engine, file_buffer,
                                                       engine.duration)
            scan_promise.engine_promises.append(engine_promise)

        return scan_promise


if __name__ == "__main__":
    with Timer('Execution time') as timer:
        # scanner
        local_docker_strategy = LocalDockerStrategy(2)
        just_run_docker_strategy = JustRunLocalDockerStrategy()

        auto_scale_strategy = AutoScaleDockerStrategy(3, 6, 1)

        ms = MultiScanner(auto_scale_strategy)

        # add tasks
        t1 = ms.scan("task 1").engine_then(
            lambda res: print("did_fulfill: " + res),
            lambda res: print("did_reject: " + res)).then(
                lambda res: print("DONE did_fulfill: " + res),
                lambda res: print("DONE did_reject: " + res))
예제 #14
0
        p = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
        (output, err) = p.communicate()
        # printout only the successful signed message
        if output:
            print "output : ", output
    # Is the response-time greater than the benchmark
    # Multiplying the benchmark time with the number of already passed chars
    if float(timer.elapsed) > (i * time_one_char):
        return True
    else:
        return False


# Manage the generating of the key
# Also measure the total time it takes to discover the whole key
with Timer('total') as total_time:
    for i in range(0, (len(key_array))):
        # Pass also i+1 because it is used to multiply the number of passed chars
        if breakit(''.join(key_array), i + 1):
            # add the new discovered char to the printed key
            set_output(key_array[i])
            continue
        # Have to seperately test for '1' because
        # the hex filter below filters out also the right hex digit if it is '1'
        key_array[i] = "1"
        if breakit(''.join(key_array), i + 1):
            set_output(key_array[i])
            continue

        d = int(key_array[i], 16)
        for j in range(0, 14):