Пример #1
0
def get_tickets():
    """Endpoint for getting all ticket data for one customer, by provided customer ID.
    ---
    parameters:
      - name: customer_id
        in: query
        type: integer
        required: true
    definitions:
      TicketList:
        type: array
        items:
          $ref: '#/definitions/Ticket'
      Ticket:
        type: object
        properties:
          ticket_id:
            type: integer
          customer_id:
            type: integer
          stake:
            type: number
          payout:
            type: number
    responses:
      200:
        description: Array of ticket objects
        schema:
          $ref: '#/definitions/TicketList'
    """
    customer_id = request.args.get('customer_id', 0, type=int)

    tickets = db.query_by_customer_id(customer_id)

    return jsonify({"data": to_json(tickets)})
Пример #2
0
def get_ticket(ticket_id: int) -> dict:
    tickets = db.query_by_ticket_id(ticket_id)

    if len(tickets) == 0:
        raise ValueError('ticket with provided ticket_id not found')

    return to_json(tickets[0])
Пример #3
0
def respond():
    train_request = dict({
        "selected":
        True,
        "model":
        dict({
            "arch": model.to_json(),
            "weights": model.get_weights(),
            "loss": model.loss,
            "optimizer": model.optimizer,
            "metrics_names": ['accuracy']
        }),
        "version":
        version,
        "hparam":
        dict({
            "epochs": 1,
            "batch_size": 32
        })
    })

    reject_message = dict({"selected": False})

    ready_clients_copy = ready_clients.copy()
    while len(ready_clients_copy) > 0:
        request = responder.recv_pyobj()
        client_id = request["client_id"]
        if client_id in selected_clients:
            responder.send_pyobj(train_request)
        else:
            responder.send_pyobj(reject_message)

        ready_clients_copy.remove(client_id)
Пример #4
0
def save_model(model):
    json_arch = model.to_json()
    json_path = os.path.join("model", "architecture.json")
    with open(json_path, 'w') as arch_file:
        arch_file.write(json_arch)

    model_path = os.path.join("model", "weights.h5")
    model.save_weights(model_path)
Пример #5
0
 def save_model(self, name, model):
     logging.info("save_model - saving model...")
     # serialize model to JSON
     model_json = model.to_json()
     with open("classifiers/" + name + ".json", "w") as json_file:
         json_file.write(model_json)
     # serialize weights to HDF5
     model.save_weights("classifiers/" + name + "_weights.h5")
     logging.info("save_model - saved model to disk")
def save_model(model, history):
	np.save('Result/'+fileName+'_histo', history)
	print(fileName+'_histo.npy has been saved!')
	#serialize model to json
	model_json = model.to_json()
	with open('Result/'+fileName+'.json', 'w') as file:
		file.write(model_json)
	print(fileName+'.json has been saved!')
	#model.save_weights(fileName+'.h5')
	print('Mode saved!')
Пример #7
0
def save_model(model):
    ###############################################################################
    # This function saves the weights and the model
    #
    # Input:
    #   model: This is the model that it will be saved

    # Output:
    #   None
    ###############################################################################

    model_json = model.to_json()

    with open("../resources/model.json", "w") as json_file:
        json_file.write(model_json)

    model.save_weights("../resources/weights.h5")
Пример #8
0
def saveModel(model, name):
	json_string = model.to_json()
	open('./model/'+ name +'_model.json', 'w').write(json_string)
	model.save_weights('./model/'+ name +'_model_weights.h5', overwrite = True)
	print("Saving the Model.")
Пример #9
0
def get_customer_tickets(customer_id: int) -> list:
    tickets = db.query_by_customer_id(customer_id)

    return to_json(tickets)
Пример #10
0
#!/usr/bin/python
import time
import six.moves.cPickle

import model
import yelp_reader

model, tokeniser, dictionarySize = model.train(yelp_reader, oneHot = True, oneHotAveraged = True, contextHashes
 = False)

jsonModel = model.to_json()
open('model.json', 'w').write(jsonModel)
open('model-dictionary-size.dat', 'w').write(str(dictionarySize))
six.moves.cPickle.dump(tokeniser, open("tokeniser.pkl", "wb"))

model.save_weights('model-' + str(time.time()) + '.h5')
Пример #11
0
def spark_application_discovery_by_workspace(workspace_config,
                                             workspace_context):
    if time.time() - workspace_context.get(
            'application_discovery_time',
            0) >= workspace_config.spark_application_discovery_interval_sec:
        metrics.application_discovery_count.labels(
            workspace_name=workspace_config.workspace_name).inc()
        try:
            print('spark application discovery...')
            bearer_token = workspace_context.get('bearer_token')
            if not bearer_token:
                return
            synapse_host = workspace_config.synapse_host()
            synapse_api_version = workspace_config.synapse_api_version
            workspace_name = workspace_config.workspace_name
            with metrics.application_discovery_duration_histogram.labels(
                    workspace_name).time():
                application_list = get_spark_applications(
                    synapse_host, synapse_api_version, bearer_token)
            workspace_scrape_configs = generate_spark_application_scrape_configs(
                application_list, workspace_name, synapse_host,
                synapse_api_version)

            if workspace_config.service_discovery_output_folder:
                folder = os.path.join(
                    workspace_config.service_discovery_output_folder,
                    f'workspace/{workspace_name}/')
                write_string_to_path(folder, 'bearer_token', bearer_token)
                write_string_to_path(folder, 'application_discovery.json',
                                     model.to_json(workspace_scrape_configs))

            workspace_context[
                'workspace_scrape_configs'] = workspace_scrape_configs
            workspace_context['application_list'] = application_list
            workspace_context['application_discovery_time'] = int(time.time())
            print(
                f'spark application discovery, found targets: {len(application_list)}.'
            )

            # spark pool metrics
            spark_pool_applications = {}
            for app in application_list:
                spark_pool_applications.setdefault(app.spark_pool_name, 0)
                spark_pool_applications[app.spark_pool_name] += 1
                print(
                    f'{app.spark_pool_name}/sessions/{app.livy_id}/applications/{app.spark_application_id}\tstate:{app.state}'
                )

            for spark_pool_name, application_count in spark_pool_applications.items(
            ):
                metrics.application_discovery_target.labels(
                    workspace_name=workspace_name,
                    spark_pool_name=spark_pool_name).set(application_count)

            # spark application metrics
            metrics.application_info._metrics = {}
            metrics.application_submit_time._metrics = {}
            metrics.application_queue_duration._metrics = {}
            metrics.application_running_duration._metrics = {}
            for app in application_list:
                app_base_labels = dict(workspace_name=workspace_name,
                                       spark_pool_name=app.spark_pool_name,
                                       name=app.name,
                                       application_id=app.spark_application_id,
                                       livy_id=app.livy_id)
                metrics.application_info.labels(
                    subscription_id=workspace_config.subscription_id,
                    resource_group=workspace_config.resource_group,
                    tenant_id=workspace_config.tenant_id,
                    **app_base_labels).set(1)
                metrics.application_submit_time.labels(**app_base_labels).set(
                    app.submit_time_seconds)
                metrics.application_queue_duration.labels(
                    **app_base_labels).set(app.queued_duration_seconds)
                metrics.application_running_duration.labels(
                    **app_base_labels).set(app.running_duration_seconds)
        except:
            metrics.application_discovery_failed_count.labels(
                workspace_name=workspace_config.workspace_name).inc()
            traceback.print_exc()