def create_file():
    service_type = request.args.get(Constants.TYPE_FIELD_NAME)
    url = request.json[Constants.URL_FIELD_NAME]
    filename = request.json[Constants.FILENAME_FIELD_NAME]

    request_errors = analyse_request_errors(request_validator, filename, url)

    if request_errors is not None:
        return request_errors

    if service_type == Constants.DATASET_CSV_TYPE:
        file_downloader = Csv(database_connector, metadata_creator)
    else:
        file_downloader = Generic(database_connector, metadata_creator)

    database = Dataset(file_downloader)

    database.add_file(url, filename)

    return (
        jsonify({
            Constants.MESSAGE_RESULT:
            f'{Constants.MICROSERVICE_URI_GET}'
            f'{request.json[Constants.FILENAME_FIELD_NAME]}'
            f'{Constants.MICROSERVICE_URI_GET_PARAMS}'
        }),
        Constants.HTTP_STATUS_CODE_SUCCESS_CREATED,
    )
def read_files(filename):
    file_downloader = Csv(database_connector, metadata_creator)
    database = Dataset(file_downloader)

    limit = Constants.LIMIT_DEFAULT_VALUE
    skip = Constants.SKIP_DEFAULT_VALUE
    query = Constants.QUERY_DEFAULT_VALUE

    request_params = request.args.to_dict()
    if Constants.LIMIT_PARAM_NAME in request_params:
        limit = int(request_params[Constants.LIMIT_PARAM_NAME])
        if limit > Constants.LIMIT_PARAM_MAX:
            limit = Constants.LIMIT_PARAM_MAX

    if Constants.SKIP_PARAM_NAME in request_params:
        skip = int(request_params[Constants.SKIP_PARAM_NAME])
        if skip < Constants.SKIP_PARAM_MIN:
            skip = Constants.SKIP_PARAM_MIN

    if Constants.QUERY_PARAM_NAME in request_params:
        query = json.loads(request_params[Constants.QUERY_PARAM_NAME])

    file_result = database.read_file(filename, skip, limit, query)

    return jsonify({Constants.MESSAGE_RESULT: file_result}), \
           Constants.HTTP_STATUS_CODE_SUCCESS
Exemple #3
0
def read_files_descriptor():
    file_downloader = Csv(database_connector)
    database = Dataset(database_connector, file_downloader)

    return jsonify({
        MESSAGE_RESULT: database.get_files(request.args.get("type"))
    }), HTTP_STATUS_CODE_SUCCESS
def read_files_descriptor():
    service_type = request.args.get(Constants.TYPE_FIELD_NAME)

    file_downloader = Csv(database_connector, metadata_creator)
    database = Dataset(file_downloader)

    return jsonify(
        {Constants.MESSAGE_RESULT: database.get_metadata_files(
            service_type)}), \
           Constants.HTTP_STATUS_CODE_SUCCESS
def delete_file(filename):
    service_type = request.args.get(Constants.TYPE_FIELD_NAME)

    if service_type == Constants.DATASET_CSV_TYPE:
        file_downloader = Csv(database_connector, metadata_creator)
    else:
        file_downloader = Generic(database_connector, metadata_creator)

    database = Dataset(file_downloader)
    database.delete_file(filename)

    return jsonify(
        {Constants.MESSAGE_RESULT:
             Constants.MESSAGE_DELETED_FILE}), \
           Constants.HTTP_STATUS_CODE_SUCCESS
Exemple #6
0
def delete_file(filename):
    file_downloader = Csv(database_connector)
    database = Dataset(database_connector, file_downloader)

    thread_pool = ThreadPoolExecutor()
    thread_pool.submit(database.delete_file, filename)

    return jsonify({MESSAGE_RESULT:
                    MESSAGE_DELETED_FILE}), HTTP_STATUS_CODE_SUCCESS
Exemple #7
0
def read_files(filename):
    file_downloader = Csv(database_connector)
    database = Dataset(database_connector, file_downloader)

    limit, skip, query = 20, 0, {}

    request_params = request.args.to_dict()
    if "limit" in request_params:
        if int(request_params["limit"]) < PAGINATE_FILE_LIMIT:
            limit = int(request_params["limit"])
    if "skip" in request_params:
        if int(request_params["skip"]) >= 0:
            skip = int(request_params["skip"])
    if "query" in request_params:
        query = request_params["query"]

    file_result = database.read_file(filename, skip, limit, query)

    return jsonify({MESSAGE_RESULT: file_result}), HTTP_STATUS_CODE_SUCCESS
Exemple #8
0
def create_file():
    url = request.json[URL_FIELD_NAME]
    filename = request.json[FILENAME]

    request_errors = analyse_request_errors(request_validator, filename, url)

    if request_errors is not None:
        return request_errors

    file_downloader = Csv(database_connector)
    database = Dataset(database_connector, file_downloader)

    database.add_file(url, filename)

    return (
        jsonify({
            MESSAGE_RESULT:
            f'{MICROSERVICE_URI_GET}{request.json[FILENAME]}'
            f'{MICROSERVICE_URI_GET_PARAMS}'
        }),
        HTTP_STATUS_CODE_SUCCESS_CREATED,
    )
Exemple #9
0
import csv
from datetime import datetime
from database import session, Dataset, PowerProduction, BatteryPercentageState, PowerFeedIn, PowerSelfConsumption,\
    PowerPurchased, PowerConsumption

dataset = Dataset()

#session.query(Dataset).filter_by(name = 'josh').first().name

filepath = 'b9c5abaa-28bd-4c7b-9632-01ca823fb585.csv'  
with open(filepath) as fp:  
  line = fp.readline()
  cnt = 1
  while line:
    sp = line.strip().split(",")
    if sp[1] == '"timeseriestimestamp"':
      print('starting')
    else:
      name = sp[0].replace('"', '')
      timestamp = datetime.strptime(sp[1].replace('"', ''), '%Y-%m-%d %H:%M:%S')
      seriesname = sp[2].replace('"', '')
      seriesvalue = sp[3].replace('"', '')

      n = session.query(Dataset).filter_by(name=name).first()

      if n:
        print(n.name)
      else:
        n = Dataset()
        n.name = name
        session.add(n)