def test_multiple_requests_pre_min():
    origin = 1
    destination = 2
    travel_time = get_travel_time(CAR_TRAVEL_TIMES, origin, destination)
    station_list = {
        origin: Station(origin, [x for x in range(20)]),
        destination: Station(destination, [x for x in range(6, 11)])
    }
    cust_requests = [[[1, 2], [1, 2], [1, 2], [1, 2]], [[1, 2]], [[1, 2]],
                     [[1, 2]]]

    for time in range(len(cust_requests)):
        curr_customer_requests = cust_requests[time]
        # print(curr_customer_requests)
        errors = update(station_list, [], [], curr_customer_requests, time)
        print(len(station_list[destination].get_en_route_list()))
        print(len(station_list[destination].get_car_list()))

    # Checks the number of enroute vehicles
    assert len(station_list[destination].get_en_route_list()) == 7

    count = 5

    # After travel time make sure there it's adding the number of cars that should be arriving each minute
    more_requests = [[] for _iter in range(6)]
    for time in range(len(cust_requests)):
        # time = curr time + travel_time (-1 to account for time 1 = index 0
        update(station_list, [], [], more_requests[time],
               time + travel_time - 1)
        assert count == len(station_list[destination].get_car_list())
        try:
            count += len(cust_requests[time])
        except:
            print("No more customer requests")
def evaluate_theme(theme):
    """Calculate & insert topic-fingerprint for a given theme"""
    themes_query = build_topicscore_query(theme)
    try:
        topicscores = helpers.query(themes_query)["results"]["bindings"]
        #helpers.log(topicscores)
    except Exception as e:
        helpers.log("Querying SPARQL-endpoint failed:\n" + str(e))
        return
    topichash = {}
    for topicscore in topicscores:
        score = float(topicscore["score"]["value"])
        topicuri = topicscore["topic"]["value"]
        try:
            topichash[topicuri] += score
        except KeyError:
            topichash[topicuri] = score

    #weighing: divide by sum of all scores (so that sum of all topicprints = 1)
    weightedhash = {
        k: v / sum(topichash.values())
        for k, v in topichash.items()
    }

    try:
        helpers.update(build_topicprint_update_query(theme, weightedhash))
        helpers.log(
            'Calculated & inserted topic-fingerprint for theme "{}" ({} topicprints)'
            .format(theme, len(topichash)))
    except Exception as e:
        helpers.log("Querying SPARQL-endpoint failed, exiting:\n" + str(e))
        return
def station_test(origin, destination, cust_requests=[]):
    travel_time = get_travel_time(CAR_TRAVEL_TIMES, origin, destination)

    station_list = {
        origin: Station(origin, [x for x in range(5)]),
        destination: Station(destination, [x for x in range(6, 11)])
    }
    if len(cust_requests) == 0:
        cust_requests = [[[origin, destination]] for _iter in range(5)]

    print('Customer Requests: {}'.format(cust_requests))
    for time in range(len(cust_requests)):
        curr_customer_requests = cust_requests[time]
        errors = update(station_list, [], [], curr_customer_requests, time)

    # Checks the number of enroute vehicles
    assert len(station_list[destination].get_en_route_list()) == 5
    count = len(station_list[destination].get_car_list())

    # After travel time make sure there it's adding the number of cars that should be arriving each minute
    more_requests = [[] for _iter in range(6)]
    for time in range(len(more_requests)):
        # time = curr time + travel_time (-1 to account for time 1 = index 0
        update(station_list, [], [], more_requests[time],
               time + travel_time - 1)

        assert count == len(station_list[destination].get_car_list())
        try:
            count += len(cust_requests[time])
        except:
            print("No more customer requests")
Ejemplo n.º 4
0
def run():
    # Query sparql
    select_query = build_select_query()
    try:
        results = helpers.query(select_query)["results"]["bindings"]
    except Exception as e:
        helpers.log("Querying SPARQL-endpoint failed:\n" + str(e))
        return
    contents = {result["subject"]["value"]: result["text"]["value"] \
        for result in results} # Key names dependent of ?...-names in query!
    # prepare MALLET run (text from store -> files)
    mallet_input_dir = os.getenv('INPUT_PATH')
    os.makedirs(mallet_input_dir, exist_ok=True)
    fn_map = write_mallet_input(contents, mallet_input_dir)
    # Run MALLET
    try:
        mallet_command = "/start.sh"
        subprocess.run(mallet_command)
    except subprocess.CalledProcessError as e:
        helpers.log("Failed to run MALLET ...\n" + str(e))
    # Read in MALLET results from files
    mallet_output = mallet_tools.process_file(
        os.path.join(os.getenv('OUTPUT_PATH'), 'output.txt'))
    # Make a map of weights by subject (map back from uuid to subject-url)
    weights_by_subject = {fn_map[os.path.basename(path)]: topics \
        for nr, path, topics in mallet_output}
    insert_querys = build_insert_query(weights_by_subject)
    for q in insert_querys:
        try:
            helpers.log(q)
            helpers.update(q)
        except Exception as e:
            helpers.log("Querying SPARQL-endpoint failed:\n" + str(e))
Ejemplo n.º 5
0
def __register_start_signing_flow(signflow_uri: str):
    timestamp = datetime.now()
    update_activities_command = __update_activities_template.substitute(
        graph=sparql_escape_uri(APPLICATION_GRAPH),
        signflow=sparql_escape_uri(signflow_uri),
        start_date=sparql_escape_datetime(timestamp))
    update(update_activities_command)
def save_mail(sender, date, subject, content):
    str_uuid = str(uuid.uuid4())
    insert_query = "INSERT DATA\n{\nGRAPH <http://mu.semte.ch/application>\n{\n<http://mail.com/examples/mail/" + str_uuid + "> a <http://mail.com/Mail>;\n"
    insert_query += "<http://mail.com/from> \"" + sender + "\";\n"
    insert_query += "<http://mail.com/date> \"" + date + "\";\n"
    insert_query += "<http://mail.com/content> \"" + content + "\";\n"
    insert_query += "<http://mail.com/subject> \"" + subject + "\";\n"
    insert_query += "<http://mail.com/ready> \"yes\";\n"
    insert_query += "<http://mu.semte.ch/vocabularies/core/uuid> \"" + str_uuid + "\".\n"
    insert_query += "}\n}"
    helpers.update(insert_query)
Ejemplo n.º 7
0
def run():
    # select_query_form = """
    # SELECT ?url WHERE {{
    #     GRAPH <{0}> {{
    #
    #     }}
    # }}
    # """

    insert_query_form = """
    INSERT DATA {{
        GRAPH <{0}> {{
            <{1}> <{2}> {3}{4}.
        }}
    }}
    """

    select_query = os.getenv('URL_QUERY')
    # select_query = select_query_form.format(os.getenv("MU_APPLICATION_GRAPH"),
    #     os.getenv("SITE_PREDICATE"))

    try:
        results = helpers.query(select_query)["results"]["bindings"]
    except Exception as e:
        helpers.log("Querying SPARQL-endpoint failed:\n{}".format(e))

    for result in results:
        try:
            url = result["url"]["value"]
        except KeyError as e:
            helpers.log('SPARQL query must contain "?url"')
        # if url in urls: #check if url already has scraped text in store
        #     continue
        try:
            helpers.log("Getting URL \"{}\"".format(url))
            doc_before = scrape(url)
            if  not doc_before: continue
            doc_lang = get_lang(doc_before)
            doc_after = cleanup(doc_before)
            insert_query = insert_query_form.format(os.getenv('MU_APPLICATION_GRAPH'),
                url, os.getenv('CONTENT_PREDICATE'),
                escape_helpers.sparql_escape(doc_after),
                '@'+doc_lang if doc_lang else '')
            try:
                helpers.update(insert_query)
            except Exception as e:
                helpers.log("Querying SPARQL-endpoint failed:\n{}".format(e))
                continue
        except Exception as e:
            helpers.log("Something went wrong ...\n{}".format(str(e)))
            continue
Ejemplo n.º 8
0
def assign_signers(signinghub_session: SigningHubSession, signflow_uri: str,
                   signer_uris: typing.List[str]):
    #TODO: validation: ensure signflow is in draft
    mandatees_query_command = _query_mandatees_template.substitute(
        graph=sparql_escape_uri(APPLICATION_GRAPH),
        mandatees=query_result_helpers.sparql_escape_list(
            [sparql_escape_uri(uri) for uri in signer_uris]))
    mandatee_result = query(mandatees_query_command)
    mandatee_records = query_result_helpers.to_recs(mandatee_result)
    mandatee_records_set = {r["mandatee"] for r in mandatee_records}
    mandatees_not_found = [
        uri for uri in signer_uris if uri not in mandatee_records_set
    ]
    if mandatees_not_found:
        raise exceptions.ResourceNotFoundException(
            ','.join(mandatees_not_found))

    signflow_record = signing_flow.get_signing_flow(signflow_uri)
    sh_package_id = signflow_record["sh_package_id"]
    sh_users = [{
        "user_email":
        r["email"],
        "user_name":
        ' '.join(
            [name for name in [r["first_name"], r["family_name"]] if name]),
        "role":
        "SIGNER"
    } for r in mandatee_records]
    signinghub_session.add_users_to_workflow(sh_package_id, sh_users)

    signing_activities = [
        _build_signing_activity(signer_uri) for signer_uri in signer_uris
    ]

    signing_activities_escaped = query_result_helpers.sparql_escape_table([[
        sparql_escape_uri(r["uri"]),
        sparql_escape_string(r["id"]),
        sparql_escape_uri(r["mandatee_uri"])
    ] for r in signing_activities])

    assign_signers_command = _assign_signers_template.substitute(
        graph=sparql_escape_uri(APPLICATION_GRAPH),
        signflow=sparql_escape_uri(signflow_uri),
        signing_activities=signing_activities_escaped)
    update(assign_signers_command)

    return signing_activities
def run_batch(batch_size, graph):

    documents = list(
        map(
            lambda res: res['doc']['value'],
            list(
                helpers.query(construct_select_docs_query(
                    batch_size, graph))['results']['bindings'])))

    res = helpers.query(construct_list_doc_versions_query(
        documents, graph))['results']['bindings']

    res_by_doc = itertools.groupby(res, lambda res: res['doc']['value'])

    triples = []
    for doc_uri, results in res_by_doc:
        results = list(results)
        for i in range(len(results)):
            res = results[i]
            try:
                title = res['stuknummerVR']['value']
            except KeyError:
                title = res['title']['value']
            versioned_title = title + LATIN_ADVERBIAL_NUMERALS[int(
                res['num']['value'])]
            triples.append((
                escape_helpers.sparql_escape_uri(results[i]['ver']['value']),
                'dct:title',
                escape_helpers.sparql_escape_string(versioned_title),
            ))
            if i > 0:
                triples.append((
                    escape_helpers.sparql_escape_uri(
                        results[i]['ver']['value']),
                    'pav:previousVersion',
                    escape_helpers.sparql_escape_uri(
                        results[i - 1]['ver']['value']),
                ))
    if triples:
        query = construct_insert_triples(triples, graph)
        res = helpers.update(query)

    query = construct_migrate_docs(documents, graph)
    res = helpers.update(query)

    return documents
Ejemplo n.º 10
0
def repair_updater():
    if request.method == "POST":
        global brands_check, models_check, repairs_check, colors_check, device, brand, model, repair, color
        prints = {}
        captions = {}
        inputted = request.form.get("input")
        # Loops through the repair.py dictionary
        if colors_check == False:
            repair = inputted
            try:
                check = REPAIR.get(device)[0].get(brand)[0].get(model)[0].get(
                    repair)[4]
            except TypeError:
                # Loops through repairs
                if repairs_check == False:
                    model = inputted
                    try:
                        check = REPAIR.get(device)[0].get(brand)[0].get(
                            model)[0]
                    except TypeError:
                        # Checks models
                        if models_check == False:
                            brand = inputted
                            try:
                                check = REPAIR.get(device)[0].get(brand)[0]
                            except TypeError:
                                # Checks brands
                                if brands_check == False:
                                    device = inputted
                                    check = REPAIR.get(device)[0]
                                    brands_check = True
                                    return update(check, prints, 1, captions)
                            models_check = True
                            return update(check, prints, 2, captions)
                    repairs_check = True
                    return update(check, prints, 3, captions)

            colors_check = True
            return update(check, prints, 4, captions)
        else:
            color = inputted
            return redirect("/confirmation")
Ejemplo n.º 11
0
def insert_finalized(uuid):
    q = """
        PREFIX ext: <http://mu.semte.ch/vocabularies/ext/>
        PREFIX mu: <http://mu.semte.ch/vocabularies/core/>
        PREFIX dct: <http://purl.org/dc/terms/>

        INSERT {{ 
            GRAPH <http://mu.semte.ch/application> {{
                ?s ext:finalized {datetime}.
            }}
        }} 
        WHERE {{
            GRAPH <http://mu.semte.ch/application> {{
                ?s mu:uuid {uuid} .
            }}
        }}
        """.format(uuid=escape_helpers.sparql_escape(uuid),
                   datetime=escape_helpers.sparql_escape(
                       datetime.now(timezone('Europe/Brussels')).isoformat()))
    helpers.update(q)
Ejemplo n.º 12
0
def run():
    try:
        results = helpers.query(_SEGMENTS_SPARQL_QUERY)["results"]["bindings"]
    except Exception as e:
        helpers.log("Querying SPARQL-endpoint failed:\n{}".format(e))

    #Assemble overpass query
    query = "[out:xml][timeout:400];\n("
    for i in range(len(results)):
        if results[i]["junctionnr1"]["value"] != results[i]["junctionnr2"]["value"]:
            query += _SEGMENTS_OVERPASS_QUERY.format(min(results[i]["junctionnr1"]["value"], results[i]["junctionnr2"]["value"]),
                                            max(results[i]["junctionnr1"]["value"], results[i]["junctionnr2"]["value"]),
                                            results[i]["lat"]["value"],
                                            results[i]["long"]["value"],
                                            i)
    query += ");\nout body;"
    #Run query
    try:
        helpers.log("Sending query: \n{}".format(query))
        response = api.query(query)
        helpers.log("Done querying")
    except Exception as e:
        helpers.log("Something went wrong while querying overpass...\n{}".format(str(e)))

    #Map overpass query results back to our data
    for result in results :
        note = "{}-{}".format(min(result["junctionnr1"]["value"], result["junctionnr2"]["value"]),
                                        max(result["junctionnr1"]["value"], result["junctionnr2"]["value"]))
        try:
            segment = next((x for x in response.relations if x.tags["note"] == note), None)
            helpers.update(build_insert_query(result["junction1"]["value"], result["junction2"]["value"], segment.id))
            helpers.log("Rel for segment {} (NOTE={}): {}\n{}".format(result["n"]["value"], note, segment,
                        build_insert_query(result["junction1"]["value"], result["junction2"]["value"], segment.id)))
        except (KeyError, AttributeError) as e:
            helpers.log("Something went wrong mapping back result {} (NOTE={})".format(str(result), note))
            continue
Ejemplo n.º 13
0
    def train_one_step(self):
        """
        Execute one update for each of the networks. Note that if no positive advantage elements
        are returned the algorithm doesn't update the actor parameters.
        Args:
            None
        Returns:
            None
        """
        # transitions is sampled from replay buffer
        transitions = self.replay.sample_batch(self.batch_size)
        state_batch = normalize(transitions.s, self.obs_rms)
        action_batch = transitions.a
        reward_batch = normalize(transitions.r, self.ret_rms)
        next_state_batch = normalize(transitions.sp, self.obs_rms)
        terminal_mask = transitions.it
        # transitions is sampled from replay buffer

        # train critic and value
        self.critics.train(state_batch, action_batch, reward_batch,
                           next_state_batch, terminal_mask, self.target_value,
                           self.gamma, self.q_normalization)
        self.value.train(state_batch, self.target_actor, self.target_critics,
                         self.action_samples)

        # note that transitions.s represents the sampled states from the memory buffer
        states, actions, advantages = self._sample_positive_advantage_actions(
            state_batch)
        if advantages.shape[0]:
            self.actor.train(states, actions, advantages, self.mode, self.beta)

        update(self.target_actor, self.actor, self.tau)
        update(self.target_critics, self.critics, self.tau)
        update(self.target_value, self.value, self.tau)

        with self.actor.train_summary_writer.as_default():
            tf.summary.scalar('actor loss',
                              self.actor.train_loss.result(),
                              step=self.step)

        with self.critics.train_summary_writer.as_default():
            tf.summary.scalar('critic loss',
                              self.critics.train_loss.result(),
                              step=self.step)

        with self.value.train_summary_writer.as_default():
            tf.summary.scalar('value loss',
                              self.value.train_loss.result(),
                              step=self.step)

        self.step += 1
def run():
    # fetch each curated theme, calculate & insert topic-fingerprint
    try:
        results = helpers.query(build_themes_query())["results"]["bindings"]
        themes = [theme["theme"]["value"] for theme in results]
    except Exception as e:
        helpers.log("Querying SPARQL-endpoint failed, exiting:\n" + str(e))
        return

    for theme in themes:
        try:
            evaluate_theme(theme)
        except Exception as e:
            helpers.log("theme failed" + str(e))
            continue

    # Build a dictionary of topicscores ("topicprints") per curated theme
    try:
        results = helpers.query(
            build_topicprint_select_query())["results"]["bindings"]
        helpers.log(
            'Queried topicprints for all curated themes ({} topicprints)'.
            format(len(results)))
    except Exception as e:
        helpers.log("Querying SPARQL-endpoint failed, exiting:\n" + str(e))
        return
    curatedthemes = {}
    for result in results:
        theme = result["theme"]["value"]
        topic = result["topic"]["value"]
        score = float(result["score"]["value"])
        try:
            curatedthemes[theme][topic] = score
        except KeyError:
            curatedthemes[theme] = {topic: score}

    delta = 5000  #how many to learn in one run (sparql queries capped @ 10k)
    try:
        count = int(
            helpers.query(build_count_events_query())["results"]["bindings"][0]
            ["total"]["value"])
    except Exception as e:
        helpers.log("Failed to determine total number of events\n" + str(e))
        return
    for offset in range(0, count, delta):
        # Build a dictionary of topicscores per event
        try:
            results = helpers.query(build_events_query(
                delta, offset))["results"]["bindings"]
            helpers.log(
                'Queried topicscores for all curated themes ({} topicscores)'.
                format(len(results)))
        except Exception as e:
            helpers.log("Querying SPARQL-endpoint failed, exiting:\n" + str(e))
            return
        events = {}
        for result in results:
            event = result["event"]["value"]
            topic = result["topic"]["value"]
            score = float(result["score"]["value"])
            try:
                events[event][topic] = score
            except KeyError:
                events[event] = {topic: score}

        # Make a map by event of it's topicscores and weights by theme (topicprints) multiplied
        weights_by_event_by_cat = {}
        for event, topicscores_event in events.items():
            d = {theme: sum(multiply_dicts(topicscores_event, topicscores_theme).values()) \
                for theme, topicscores_theme in curatedthemes.items()}
            weights_by_event_by_cat[event] = d

        # Add learned themes to graph, event by event
        # TODO do this in previous loop without building dict for all events (less stack)
        for event, themes in weights_by_event_by_cat.items():
            try:
                helpers.update(build_learnedthemes_update_query(event, themes))
                helpers.log(
                    'Learned & inserted themes for event "{}" ({} themes)'.
                    format(event, len(themes)))
            except Exception as e:
                helpers.log("Querying SPARQL-endpoint failed:\n" + str(e))
            topic_uri,  # type MalletTopic
            rdflib.namespace.RDF["type"],
            voc_ns["MalletTopic"]))
        graph.add((
            topic_uri,  # Topic id
            voc_ns["hasTopicId"],
            rdflib.term.Literal(topic_id)))
        graph.add((
            topic_uri,  # Topic string
            voc_ns["hasTopicString"],
            rdflib.term.Literal(''.join(keywords))))

    serialized_triples = graph.serialize(format='nt')
    return """
    INSERT DATA {{
        GRAPH <{0}> {{
            {1}
        }}
    }}
    """.format(os.getenv('MU_APPLICATION_GRAPH'), serialized_triples)


if __name__ == "__main__":
    path = os.path.join(os.getenv('TRAIN_PATH'), "topics/keys.txt")
    query = trainingset_topics_query(path)
    print("Inserting query:", query)
    try:
        helpers.update(query)
    except Exception as e:
        helpers.log("Querying SPARQL-endpoint failed:\n" + str(e))
def prepare_signing_flow(signinghub_session: SigningHubSession,
                         signflow_uri: str, piece_uris: typing.List[str]):
    if len(piece_uris) == 0:
        raise exceptions.InvalidArgumentException(
            f"No piece to add specified.")
    if len(piece_uris) > 1:
        raise exceptions.InvalidArgumentException(
            f"Signflow can only add 1 piece.")
    piece_uri = piece_uris[0]

    pieces = signing_flow.get_pieces(signflow_uri)
    piece = query_result_helpers.ensure_1(pieces)
    if piece["uri"] != piece_uri:
        raise exceptions.InvalidStateException(
            f"Piece {piece_uri} is not associated to signflow {signflow_uri}.")

    get_file_query_string = _query_file_template.substitute(
        graph=sparql_escape_uri(APPLICATION_GRAPH),
        piece=sparql_escape_uri(piece_uri),
    )
    file_result = query(get_file_query_string)
    file_records = query_result_helpers.to_recs(file_result)
    file_record = query_result_helpers.ensure_1(file_records)
    piece_uri = file_record["piece"]
    file_name = file_record["piece_name"] + "." + file_record["file_extension"]
    file_path = file_record["file_path"]

    file_path = file_path.replace("share://", "/share/")
    with open(file_path, "rb") as f:
        file_content = f.read()

    preparation_activity_id = generate_uuid()
    preparation_activity_uri = uri.resource.preparation_activity(
        preparation_activity_id)

    signinghub_package = signinghub_session.add_package({
        # package_name: "New Package", # Defaults to "Undefined"
        "workflow_mode":
        "ONLY_OTHERS"  # OVRB staff who prepare the flows will never sign
    })
    signinghub_package_id = signinghub_package["package_id"]

    signinghub_document = signinghub_session.upload_document(
        signinghub_package_id,
        file_content,
        file_name,
        SH_SOURCE,
        convert_document=False)
    signinghub_document_id = signinghub_document["documentid"]

    sh_document_muid = generate_uuid()
    signinghub_document_uri = uri.resource.signinghub_document(
        signinghub_package_id, signinghub_document_id)
    query_string = _update_template.substitute(
        graph=sparql_escape_uri(APPLICATION_GRAPH),
        signflow=sparql_escape_uri(signflow_uri),
        preparation_activity=sparql_escape_uri(preparation_activity_uri),
        preparation_activity_id=sparql_escape_string(preparation_activity_id),
        piece=sparql_escape_uri(piece_uri),
        sh_document=sparql_escape_uri(signinghub_document_uri),
        sh_document_muid=sparql_escape_string(sh_document_muid),
        sh_document_id=sparql_escape_string(str(signinghub_document_id)),
        sh_package_id=sparql_escape_string(str(signinghub_package_id)),
    )
    update(query_string)
Ejemplo n.º 17
0
    def __init__(self,
                 action_dim,
                 state_dim,
                 buffer_size=1000000,
                 action_samples=10,
                 mode='linear',
                 beta=1,
                 tau=5e-3,
                 q_normalization=0.01,
                 gamma=0.99,
                 normalize_obs=False,
                 normalize_rewards=False,
                 batch_size=64,
                 actor='AIQN',
                 *args,
                 **kwargs):
        """
        Agent class to generate a stochastic policy.
        Args:
            action_dim (int): action dimension
            state_dim (int): state dimension
            buffer_size (int): how much memory is allocated to the ReplayMemoryClass
            action_samples (int): originally labelled K in the paper, represents how many
                actions should be sampled from the memory buffer
            mode (string): poorly named variable to represent variable being used in the
                distribution being used
            beta (float): value used in boltzmann distribution
            tau (float): update rate parameter
            batch_size (int): batch size
            q_normalization (float): q value normalization rate
            gamma (float): value used in critic training
            normalize_obs (boolean): boolean to indicate that you want to normalize
                observations
            normalize_rewards (boolean): boolean to indicate that you want to normalize
                return values (usually done for numerical stability)
            actor (string): string indicating the type of actor to use
        """
        self.action_dim = action_dim
        self.state_dim = state_dim
        self.buffer_size = buffer_size
        self.gamma = gamma
        self.action_samples = action_samples
        self.mode = mode
        self.beta = beta
        self.tau = tau
        self.batch_size = batch_size
        self.step = 0

        # normalization
        self.normalize_observations = normalize_obs
        self.q_normalization = q_normalization
        self.normalize_rewards = normalize_rewards

        # Actor
        # type of actor being used
        if actor == 'IQN':
            self.actor = StochasticActor(self.state_dim, self.action_dim,
                                         'source')
            self.target_actor = StochasticActor(self.state_dim,
                                                self.action_dim, 'target')
        elif actor == 'AIQN':
            self.actor = AutoRegressiveStochasticActor(self.state_dim,
                                                       self.action_dim)
            self.target_actor = AutoRegressiveStochasticActor(
                self.state_dim, self.action_dim)

        if self.normalize_observations:
            self.obs_rms = RunningMeanStd(shape=self.state_dim)
        else:
            self.obs_rms = None

        if self.normalize_rewards:
            self.ret_rms = RunningMeanStd(shape=1)
            self.ret = 0
        else:
            self.ret_rms = None

        # initialize trainable variables
        self.actor(tf.zeros([self.batch_size, self.state_dim]),
                   tf.zeros([self.batch_size, self.action_dim]))
        self.target_actor(tf.zeros([self.batch_size, self.state_dim]),
                          tf.zeros([self.batch_size, self.action_dim]))

        # Critic
        self.critics = Critic(self.state_dim, self.action_dim, 'source')
        self.target_critics = Critic(self.state_dim, self.action_dim, 'target')

        # initialize trainable variables for critics
        self.critics(tf.zeros([self.batch_size, self.state_dim]),
                     tf.zeros([self.batch_size, self.action_dim]))
        self.target_critics(tf.zeros([self.batch_size, self.state_dim]),
                            tf.zeros([self.batch_size, self.action_dim]))

        # Value
        self.value = Value(self.state_dim, 'source')
        self.target_value = Value(self.state_dim, 'target')

        # initialize value training variables
        self.value(tf.zeros([self.batch_size, self.state_dim]))
        self.value(tf.zeros([self.batch_size, self.state_dim]))

        # initialize the target networks.
        update(self.target_actor, self.actor, 1.0)
        update(self.target_critics, self.critics, 1.0)
        update(self.target_value, self.value, 1.0)

        self.replay = ReplayBuffer(self.state_dim, self.action_dim,
                                   self.buffer_size)
        self.action_sampler = ActionSampler(self.actor.action_dim)
Ejemplo n.º 18
0
def sensor():
    """ Function for test purposes. """
    helpers.update()
Ejemplo n.º 19
0
from flask import Flask, render_template, request
import helpers
from cs50 import SQL
from apscheduler.schedulers.background import BackgroundScheduler

db = SQL("sqlite:///identifier.sqlite")
app = Flask(__name__)
app.config["TEMPLATES_AUTO_RELOAD"] = True

helpers.update()

if __name__ == '__main__':
    app.run()


# Uses a sensor to update the database using API calls every 24h
def sensor():
    """ Function for test purposes. """
    helpers.update()


sched = BackgroundScheduler(daemon=True)
sched.add_job(sensor, 'interval', minutes=1440)
sched.start()


@app.route('/')
def index():
    average_volatility = round(
        db.execute("SELECT AVG(Volatility) FROM Volatility")[0]
        ["AVG(Volatility)"])
Ejemplo n.º 20
0
    def make_move(self, request):
        """play the game"""
        # retrieve game entity by urlsafe key
        game = get_by_urlsafe(request.urlsafe_game_key, Game)
        if not game:
            raise endpoints.NotFoundException('No such game')

        # check if game is already over
        if game.game_status != 'ongoing':
            raise endpoints.BadRequestException('Game is already over')

        # if the user is guessing an entire word
        if len(request.guess) > 1 and request.guess.isalpha():
            # update moves left and guesses made
            game = update(game)
            # check if the user has guessed correctly
            if request.guess == reveal_answer(game.answer):
                return game_won(game, request.guess)
            else:
                # if the user guessed wrong, check if they have moves left
                if moves_gone(game.moves_left):
                    return game_over(game, request.guess, 'Incorrect! ')
                else:
                    return wrong_guess(game, request.guess)
        # if user's word guess is malformed
        elif len(request.guess) > 1 and not request.guess.isalpha():
            raise endpoints.BadRequestException('Word has to be all letters')

        # if the user is guessing letter by letter
        if len(request.guess) == 1 and not request.guess.isalpha():
            raise endpoints.BadRequestException('Provide just one single letter!')

        guess = request.guess.lower()

        # update moves left and guesses made
        game = update(game)

        # iterate through entire answer to see if user guessed correctly
        correct = False
        for letter in game.answer:
            if letter[0] == guess and letter[1] == False:
                letter[1] = True
                correct = True

        # if the user has guessed correctly
        if correct == True:
            # differentiate between cases where they won the game or not
            for letter in game.answer:
                if letter[1] == False: # they have not won the game
                    # check if moves exhausted
                    if moves_gone(game.moves_left) == True:
                        return game_over(game, guess, 'Correct, however ...')
                    else:
                        return correct_guess(game, guess)
            # user has won the game
            return game_won(game, guess)
        # user has not guessed correctly
        else:
            # check if they have run out of moves
            if moves_gone(game.moves_left) == True:
                return game_over(game, guess, 'Wrong! ')
            else:
                return wrong_guess(game, guess)