コード例 #1
0
 def insert_one(self, record):
     try:
         db_session.add(record)
         db_session.commit()
     except Exception:
         db_session.rollback()
         raise
コード例 #2
0
 def insert_many_from_csv(self, records):
     try:
         db_session.bulk_insert_mappings(SensorData, records)
         db_session.commit()
     except Exception:
         db_session.rollback()
         raise
コード例 #3
0
 def insert_many(self, records):
     try:
         db_session.bulk_save_objects(records)
         db_session.commit()
     except Exception:
         db_session.rollback()
         raise
コード例 #4
0
ファイル: event_reminders.py プロジェクト: baasman/speech-org
def event_reminder_complete(id):
    try:
        r = db_session.query(ReminderEvents).filter(
            ReminderEvents.id == id).first()
        r.completed_since_last = True
        db_session.commit()
        return statement(render_template('reminder_completed'))
    except:
        return statement('Unable to find reminder {}'.format(id))
コード例 #5
0
ファイル: event_reminders.py プロジェクト: baasman/speech-org
def read_priority_reminders(id):
    try:
        r = db_session.query(ReminderEvents).filter(
            ReminderEvents.id == id).first()
        db_session.delete(r)
        db_session.commit()
        return statement(render_template('reminder_removed'))
    except:
        return statement('Unable to find reminder {}'.format(id))
コード例 #6
0
def migrate():
  batch = []
  batch_size =         500
  batch_size =         70000
  print(f'TOTAL {db_session.query(Output).count()}')
  output_total = db_session.query(Output).filter(text("(data->'storage') is null")).filter(Output.is_pending==False).count()
  start_batch  = time.time()
  print(f'without storage {output_total}')

  outputs = (db_session.query(Output)
             .filter(text("(data->'storage') is null"))
             .filter(Output.is_pending==False)
             .order_by(Output.created_date.desc())
            #  .limit(10000)
            #  .yield_per(batch_size)
             .enable_eagerloads(False)
  )
  updated = 0
  now = time.time()
  for idx, o in enumerate(outputs):
      # print(o)
      if o.data is None:
        o.data = {}
      # if '/' in o.batch.ci_commit.hexsha:
      #   continue

      try:
        storage = get_storage(o)
      except Exception as e:
        print("error", o, e)
        continue
      if storage is None:
        continue
      updated += 1
      batch.append({
        "id": o.id,
        "data": {
          **o.data,
          "storage": storage,
        }, 
      })
      if idx and idx % batch_size == 0:
          print(o)
          now = time.time()
          print(f"{idx/output_total:.1%} [{batch_size/(now - start_batch):.1f}/s] [est. total left {(now - start_batch) * ((output_total-idx)/batch_size) / 3600:.2f}h] [elapsed time: {now - start:.1f}s]")
          start_batch = now
          db_session.bulk_update_mappings(Output, batch)
          db_session.flush()
          batch = []
          # break

  print(f"DONE, now committing configurations [elapsed time: {now - start:.1f}s]")
  db_session.bulk_update_mappings(Output, batch)
  db_session.flush()
  db_session.commit()
  return updated
コード例 #7
0
def migrate():
    batch = []
    batch_size = 500
    # batch_size = 1_000
    # batch_size = 70_000
    print(f'TOTAL {db_session.query(Output).count()}')
    output_total = db_session.query(Output).filter(
        text("output_dir_override is null")).count()
    start_batch = time.time()
    print(f'without overrides: {output_total}')

    outputs = (
        db_session.query(Output).filter(
            text("output_dir_override is null")).order_by(
                Output.created_date.desc()).limit(100000)
        #  .yield_per(batch_size)
        #  .enable_eagerloads(False)
    )
    updated = 0
    now = time.time()
    for idx, o in enumerate(outputs):
        # print(f"{o.output_dir_override} => {o.output_dir}")
        # o.output_dir_override = str(o.output_dir)
        try:
            output_dir = o.output_dir
        except Exception as e:
            if not ("poc" in o.batch.ci_commit.project.id
                    or "arthur" in o.batch.ci_commit.project.id):
                print(
                    f"WTF {o.batch.ci_commit} in {o.batch.ci_commit.project}")
                print(e)
            continue
        updated += 1
        batch.append({
            "id": o.id,
            "output_dir_override": str(output_dir),
        })
        if idx and idx % batch_size == 0:
            print(o)
            now = time.time()
            print(
                f"{idx/output_total:.1%} [{batch_size/(now - start_batch):.1f}/s] [est. total left {(now - start_batch) * ((output_total-idx)/batch_size) / 3600:.2f}h] [elapsed time: {now - start:.1f}s]"
            )
            start_batch = now
            db_session.bulk_update_mappings(Output, batch)
            db_session.flush()
            batch = []
            # break

    print(
        f"DONE, now committing configurations [elapsed time: {now - start:.1f}s]"
    )
    db_session.bulk_update_mappings(Output, batch)
    db_session.flush()
    db_session.commit()
    return updated
コード例 #8
0
ファイル: tasks.py プロジェクト: isabella232/matching-tool
def write_match_log(db_session, match_job_id, upload_id, match_start_at,
                    match_complete_at, match_status, match_runtime):
    db_object = MatchLog(id=match_job_id,
                         upload_id=upload_id,
                         match_start_timestamp=match_start_at,
                         match_complete_timestamp=match_complete_at,
                         match_status=match_status,
                         runtime=match_runtime)
    db_session.add(db_object)
    db_session.commit()
コード例 #9
0
	def execute(self,args,diagnostics):
		print("Setting kernel test data to 0")
		diagnostic = Diagnostic()
		diagnostic.data = 0
		diagnostics["counter"] = diagnostic

		range_arg = RangeArgument(range(0,15),0)
		args["range"] = range_arg

		db_session.add(diagnostic)
		db_session.add(range_arg)
		db_session.commit()
コード例 #10
0
ファイル: app.py プロジェクト: ToxicEnvelope/TodoApp-BackEnd
def update_todo_task_by_id(task_id):
    if request.method.__eq__('PUT'):
        session_data = get_jwt_decode_data()
        optional_user = Users.query.filter_by(id=session_data['sub']).first()
        if optional_user is None:
            response = {
                "status": Fail,
                "timestamp": stamp(),
                "reason": "no such user"
            }
            resp = make_response(response)
            resp.status_code = 404
            return resp
        todo_obj = Todos.query.filter_by(user_id=optional_user.id, id=task_id).first()
        if not todo_obj:
            response = {
                'timestamp': stamp(),
                'status': Fail,
                'data': {}
            }
            resp = make_response(response)
            resp.status_code = 404
            return resp
        payload = request.get_json()
        task_description = payload['taskDescription']
        task_status = payload['isCompleted']
        if task_status:
            todo_obj.task_status = __COMPLETED__
            todo_obj.is_completed = True
        else:
            todo_obj.task_status = __NOT_STARTED__
            todo_obj.is_completed = False
        todo_obj.task_description = task_description
        todo_obj.time_created = datetime.datetime.now()
        db_session.commit()
        response = {
            'timestamp': stamp(),
            'status': Success,
            'data': {
                'taskId': todo_obj.id,
                'taskUpdateTime': todo_obj.time_created
            }
        }
        resp = make_response(response)
        resp.status_code = 202
        return resp
    response = {
        'timestamp': stamp(),
        'status': Fail
    }
    resp = make_response(response)
    resp.status_code = 403
    return resp
コード例 #11
0
def migrate_batch():
    batch = []
    batch_size = 500
    print(f'TOTAL {db_session.query(Batch).count()}')
    total = db_session.query(Batch).filter(
        text("batch_dir_override is null")).count()
    start_batch = time.time()
    print(f'without overrides: {total}')

    batches = (
        db_session.query(Batch).filter(
            text("batch_dir_override is null")).order_by(
                Batch.created_date.desc()).limit(20000)
        #  .yield_per(batch_size)
        #  .enable_eagerloads(False)
    )
    updated = 0
    now = time.time()
    for idx, b in enumerate(batches):
        try:
            batch_dir = b.batch_dir
        except Exception as e:
            if not ("poc" in b.ci_commit.project.id
                    or "arthur" in b.ci_commit.project.id):
                print(f"WTF {b.ci_commit} in {b.ci_commit.project}")
                print(e)
            continue
        # print(f"{b.batch_dir_override} => {batch_dir}")
        # exit(0)
        updated += 1
        batch.append({
            "id": b.id,
            "batch_dir_override": str(batch_dir),
        })
        if idx and idx % batch_size == 0:
            print(b)
            now = time.time()
            print(
                f"{idx/total:.1%} [{batch_size/(now - start_batch):.1f}/s] [est. total left {(now - start_batch) * ((total-idx)/batch_size) / 3600:.2f}h] [elapsed time: {now - start:.1f}s]"
            )
            start_batch = now
            db_session.bulk_update_mappings(Batch, batch)
            db_session.flush()
            batch = []
            # break

    print(
        f"DONE, now committing configurations [elapsed time: {now - start:.1f}s]"
    )
    db_session.bulk_update_mappings(Batch, batch)
    db_session.flush()
    db_session.commit()
    return updated
コード例 #12
0
def insert_debug(msg):
    try:
        print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
        m_in = msg.payload.decode("UTF-8").split(";")

        if isinstance(m_in, list) and len(m_in) == 3:
            db_session.add(
                SensorDebug(device_id=m_in[0], code=m_in[1], message=m_in[2]))
            db_session.commit()
        else:
            print("Debug Message is not with the right format")

    except Exception as e:
        print(f"error trying to insert {m_in}")
コード例 #13
0
def migrate_commits():
    batch = []
    batch_size = 500
    total = db_session.query(CiCommit).filter(
        text("commit_dir_override is null")).count()
    start_batch = time.time()
    print(f'without overrides: {total}')

    commits = (db_session.query(CiCommit).filter(
        text("commit_dir_override is null")).order_by(
            CiCommit.authored_datetime.desc()).limit(20000))
    updated = 0
    now = time.time()
    for idx, c in enumerate(commits):
        try:
            artifacts_dir = c.artifacts_dir
        except Exception as e:
            if not ("poc" in c.project.id or "arthur" in c.project.id):
                print(f"WTF {c} in {c.project}")
                print(e)
            continue
        # print(f"{c.commit_dir_override} => {artifacts_dir}")
        # continue
        # exit(0)

        updated += 1
        batch.append({
            "id": c.id,
            "commit_dir_override": str(artifacts_dir),
        })
        if idx and idx % batch_size == 0:
            print(c)
            now = time.time()
            print(
                f"{idx/total:.1%} [{batch_size/(now - start_batch):.1f}/s] [est. total left {(now - start_batch) * ((total-idx)/batch_size) / 3600:.2f}h] [elapsed time: {now - start:.1f}s]"
            )
            start_batch = now
            db_session.bulk_update_mappings(Batch, batch)
            db_session.flush()
            batch = []
            # break

    print(
        f"DONE, now committing configurations [elapsed time: {now - start:.1f}s]"
    )
    db_session.bulk_update_mappings(CiCommit, batch)
    db_session.flush()
    db_session.commit()
    return updated
コード例 #14
0
def generate(email, password):
    user = UserRepository().get_by(email, as_dict=False)

    if not isinstance(user, User):
        return user

    if not user.valid_password(password):
        return {"message": "invalid password"}, 403

    valid_until = str(datetime.now() + timedelta(days=180))
    user.auth_token = {"email": email, "valid_until": valid_until}

    db_session.add(user)
    db_session.commit()

    return {"token": user.auth_token.decode()}
コード例 #15
0
ファイル: app.py プロジェクト: shabisan/TodoApp-BackEnd
def delete_todo_task_by_id(task_id):
    '''
        :description:
            - delete an existing task resource associated to user by token from todo-tasks db
        :param:
            - URL param
                'task_id'
        :return:
            - JSON response
    '''
    if request.method == 'DELETE':
        session_data = get_jwt_decode_data()
        optional_user = Users.query.filter_by(id=session_data['sub']).first()
        if optional_user is None:
            response = {
                "status": __STATUS_FAIL,
                "timestamp": stamp()
            }
            resp = make_response(response)
            resp.status_code = 404
            return resp
        todo_obj = Todos.query.filter_by(user_id=optional_user.id, id=task_id).first()
        if todo_obj is None:
            response = {
                'timestamp': stamp(),
                'status': __STATUS_NOT_FOUND,
                'data': {}
            }
            resp = make_response(response)
            resp.status_code = 404
            return resp
        db_session.delete(todo_obj)
        db_session.commit()
        response = {
            'timestamp': stamp(),
            'status': __STATUS_SUCCESS
        }
        resp = make_response(response)
        resp.status_code = 200
        return resp
    response = {
        'timestamp': stamp(),
        'status': __STATUS_FAIL
    }
    resp = make_response(response)
    resp.status_code = 403
    return resp
コード例 #16
0
ファイル: app.py プロジェクト: ToxicEnvelope/TodoApp-BackEnd
def user_login():
    if request.method.__eq__('POST'):
        data = request.get_json()
        if data is None or len(data) == 0:
            response = {
                "status": Fail,
                "timestamp": stamp(),
                "reason": "data is missing!"
            }
            resp = make_response(response)
            resp.status_code = 403
            return resp
        user_email = data['email']
        user_password = data['password']
        optional_user = Users.query.filter_by(email=user_email).first()
        if optional_user and check_password_hash(optional_user.password, user_password):
            session['logged_in'] = True
            auth_token = optional_user.encode_auth_token(optional_user.id)
            optional_user.token = auth_token
            assert optional_user.token == auth_token
            db_session.add(optional_user)
            db_session.commit()
            response = {
                "status": Success,
                "timestamp": stamp()
            }
            resp = make_response(response)
            resp.headers['Authorization'] = optional_user.token
            resp.status_code = 200
            return resp
        else:
            response = {
                "status": Fail,
                "timestamp": stamp(),
                "reason": "unauthorized! email / password is not correct"
            }
            resp = make_response(response)
            resp.status_code = 401
            return resp
    response = {
        "status": Fail,
        "timestamp": stamp(),
        "reason": "Unable to verify WWW-Authenticate: Basic realm 'login realm'"
    }
    resp = make_response(response)
    resp.status_code = 405
    return resp
コード例 #17
0
ファイル: app.py プロジェクト: ToxicEnvelope/TodoApp-BackEnd
def user_register():
    if request.method.__eq__('POST'):
        data = request.get_json()
        if data is None or data["email"] is None or data["password"] is None:
            response = {
                "status": Fail,
                "timestamp": stamp(),
                "reason": "data is missing!"
            }
            resp = make_response(response)
            resp.status_code = 403
            return resp
        user_name = data['name']
        user_email = data['email']
        hashed_password = generate_password_hash(data['password'], method='sha256')
        optional_user = Users.query.filter_by(email=user_email).first()
        if optional_user is None:
            new_user = Users(user_name, user_email, hashed_password)
            new_user.token = new_user.encode_auth_token(new_user.id)
            db_session.add(new_user)
            db_session.commit()
            session['logged_in'] = True
            response = {
                "status": Success,
                "timestamp": stamp()
            }
            resp = make_response(response)
            resp.headers['Authorization'] = new_user.token
            resp.status_code = 201
            return resp
        else:
            response = {
                "status": Fail,
                "timestamp": stamp(),
                "reason": "user already exists!"
            }
            resp = make_response(response)
            resp.status_code = 406
            return resp
    response = {
        "status": Fail,
        "timestamp": stamp(),
        "reason": "unable to process request!"
    }
    resp = make_response(response)
    resp.status_code = 405
    return resp
コード例 #18
0
    def insert(self, email, password):
        user = User.query.filter(User.email == email)
        if user.count():
            return {"message": "email or passowrd incorrect"}, 400
        try:
            user = User()
            user.password = password.encode()
            user.email = email
            db_session.add(user)
            db_session.commit()
        except ValueError as e:
            db_session.rollback()
            return {"message": f"{str(e)}"}, 400

        return {
            "message": f"user with email {email} successfully created"
        }, 200
コード例 #19
0
def insert_sensor_data(msg):
    try:
        print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
        m_in = json.loads(msg.payload)

        for key, value in m_in["channels"].items():
            db_session.add(
                SensorData(
                    app_key=m_in["app_key"],
                    net_key=m_in["net_key"],
                    device_id=m_in["device_id"],
                    field_name=key,
                    value=value,
                    unit_string=m_in["unit"],
                ))
        db_session.commit()
    except Exception:
        print(f"error trying to insert {m_in}")
コード例 #20
0
def merge_duplicates(dryrun):
    # batches = (db_session.query(Batch)\
    #                     .filter(CiCommit.id == commit_id, Batch.label == label)\
    #                     .all())
    sql = "SELECT ci_commit_id, label, count(*) as qty FROM batches GROUP BY ci_commit_id, label HAVING count(*)> 1;"
    result = db_session.execute(sql)
    duplicates = list(result)
    # for commit_id, label, count in duplicates:
    #   print(commit_id, label, count)
    print(len(duplicates), "duplicates")

    for commit_id, label, count in duplicates:
        print(f"commit_id: {commit_id}, label={label}, count={count}")
        # .filter(and_(CiCommit.id == commit_id, Batch.label == label))\
        batches = (db_session.query(Batch)\
                            .join(CiCommit)\
                            .filter(Batch.label == label)\
                            .filter(CiCommit.id == commit_id)\
                            .all())
        print(f"  found {len(batches)}, expected: {count}")
        for b in batches:
            print("  ", b, b.label, b.ci_commit.id)
            print("  ", b.ci_commit, b.ci_commit.project)
            print('--')
        assert len(batches) == count

        final_batch, *other_batches = list(batches)
        if not final_batch.data:
            final_batch.data = {}
        if not final_batch.data.get('commands'):
            final_batch.data['commands'] = {}
        print('BEFORE', final_batch.data['commands'])

        for b in other_batches:
            if b.data and b.data.get('commands'):
                final_batch.data['commands'].update(b.data['commands'])
            for o in b.outputs:
                o.batch = final_batch
            db_session.delete(b)
        print('AFTER', final_batch.data['commands'])

        if not dryrun:
            db_session.add(final_batch)
            db_session.commit()
コード例 #21
0
ファイル: tasks.py プロジェクト: isabella232/matching-tool
def upsert_raw_table_to_master(raw_table_name, jurisdiction, event_type,
                               upload_id, db_session):
    create_merged_table(jurisdiction, event_type, db_session)
    master_table_name = generate_master_table_name(jurisdiction, event_type)
    goodtables_schema = load_schema_file(event_type)
    base_column_list = column_list_from_goodtables_schema(goodtables_schema)
    # use new postgres 'on conflict' functionality to upsert
    update_statements = [
        ' "{column}" = EXCLUDED."{column}"'.format(column=column_def[0])
        for column_def in base_column_list
    ]
    start_ts = datetime.today()
    insert_sql = '''
        insert into {master}
        select raw.*, '{new_ts}' inserted_ts, '{new_ts}' updated_ts, row_number() over ()::text || '{event_type}' as matched_id
        from "{raw}" as raw
        on conflict ({primary_key})
        do update set {update_string}, updated_ts = '{new_ts}'
    '''.format(raw=raw_table_name,
               master=master_table_name,
               event_type=event_type,
               primary_key=', '.join([
                   "\"{}\"".format(col)
                   for col in goodtables_schema['primaryKey']
               ]),
               update_string=', '.join(update_statements),
               new_ts=start_ts.isoformat())
    logging.info('Executing insert: %s', insert_sql)
    db_session.execute(insert_sql)
    end_ts = datetime.today()
    merge_log = MergeLog(
        upload_id=upload_id,
        total_unique_rows=total_unique_rows(raw_table_name,
                                            goodtables_schema['primaryKey'],
                                            db_session),
        new_unique_rows=new_unique_rows(master_table_name, start_ts,
                                        db_session),
        merge_start_timestamp=start_ts,
        merge_complete_timestamp=end_ts,
    )
    db_session.add(merge_log)
    db_session.execute('drop table "{}"'.format(raw_table_name))
    db_session.commit()
    return merge_log.id
コード例 #22
0
ファイル: app.py プロジェクト: shabisan/TodoApp-BackEnd
def add_new_todo_task():
    '''
        :description:
            - create a new task associated with user by token
        :param:
            - accepts JSON with
                'taskDescription'
        :return:
            - JSON response
    '''
    if request.method == 'POST':
        session_data = get_jwt_decode_data()
        optional_user = Users.query.filter_by(id=session_data['sub']).first()
        if optional_user is None:
            response = {
                "status": __STATUS_FAIL,
                "timestamp": stamp()
            }
            resp = make_response(response)
            resp.status_code = 404
            return resp
        payload = request.get_json()
        task_description = payload['taskDescription']
        new_todo = Todos(user_id=optional_user.id, todo_description=task_description)
        db_session.add(new_todo)
        db_session.commit()
        response = {
            'timestamp': stamp(),
            'status': __STATUS_SUCCESS,
            'data': {
                'taskId': new_todo.id,
                'taskCreationTime': new_todo.time_created
            }
        }
        resp = make_response(response)
        resp.status_code = 201
        return resp
    response = {
        'timestamp': stamp(),
        'status': __STATUS_FAIL
    }
    resp = make_response(response)
    resp.status_code = 403
    return resp
コード例 #23
0
ファイル: upload.py プロジェクト: isabella232/matching-tool
def merge_file():
    upload_id = request.args.get('uploadId', None)
    if not upload_id:
        return jsonify(status='invalid', reason='uploadId not present')
    has_access = False
    try:
        has_access = can_access_file(upload_id)
        if has_access:
            upload_log = db_session.query(Upload).get(upload_id)
            logger.info('Retrieved upload log, merging raw table to master')
            raw_table_name = 'raw_{}'.format(upload_id)
            logger.info('Merging raw table to master')
            merge_id = upsert_raw_table_to_master(raw_table_name,
                                                  upload_log.jurisdiction_slug,
                                                  upload_log.event_type_slug,
                                                  upload_id, db_session)
            logger.info('Syncing merged file to s3')

            bootstrap_master_tables(upload_log.jurisdiction_slug, db_session)

            sync_merged_file_to_storage(upload_log.jurisdiction_slug,
                                        upload_log.event_type_slug,
                                        db_session.get_bind())
            merge_log = db_session.query(MergeLog).get(merge_id)
            try:
                logger.info('Merge succeeded. Now querying matcher')
                notify_matcher(upload_log.jurisdiction_slug, upload_id)
            except Exception as e:
                logger.error('Error matching: ', e)
                db_session.rollback()
                return make_response(jsonify(status='error'), 500)
            db_session.commit()
            return jsonify(status='success',
                           new_unique_rows=merge_log.new_unique_rows,
                           total_unique_rows=merge_log.total_unique_rows)
        else:
            return jsonify(status='not authorized')
    except ValueError as e:
        logger.error('Error merging: ', e)
        db_session.rollback()
        return make_response(jsonify(status='error'), 500)
コード例 #24
0
def migrate():
  batch = []
  batch_size =         500
  output_total = 1_700_000
  start_batch  = time.time()
  outputs = (db_session.query(Output)
             .yield_per(batch_size)
             .enable_eagerloads(False)
             .order_by(Output.created_date.desc())
  )
  for idx, o in enumerate(outputs):
      if o.is_pending:
        continue
      if o.data.get('storage'):
        continue

      storage = get_storage(o)
      if storage is None:
        continue
      batch.append({
        "id": o.id,
        "data": {
          **o.data,
          "storage": storage,
        }, 
      })
      if idx % batch_size == 0:
          print(o)
          now = time.time()
          print(f"{idx/output_total:.1%} [{batch_size/(now - start_batch):.1f}/s] [est. total left {(now - start_batch) * ((output_total-idx)/batch_size) / 3600:.2f}h] [elapsed time: {now - start:.1f}s]")
          start_batch = now
          db_session.bulk_update_mappings(Output, batch)
          db_session.flush()
          batch = []

  print(f"DONE, now committing configurations [elapsed time: {now - start:.1f}s]")
  db_session.bulk_update_mappings(Output, batch)
  db_session.flush()
  db_session.commit()
コード例 #25
0
ファイル: tasks.py プロジェクト: isabella232/matching-tool
def write_upload_log(db_session, upload_id, jurisdiction_slug, event_type_slug,
                     user_id, given_filename, upload_start_time,
                     upload_complete_time, upload_status, validate_start_time,
                     validate_complete_time, validate_status, num_rows,
                     file_size, file_hash, s3_upload_path):
    db_object = Upload(id=upload_id,
                       jurisdiction_slug=jurisdiction_slug,
                       event_type_slug=event_type_slug,
                       user_id=user_id,
                       given_filename=given_filename,
                       upload_start_time=upload_start_time,
                       upload_complete_time=upload_complete_time,
                       upload_status=upload_status,
                       validate_start_time=validate_start_time,
                       validate_complete_time=validate_complete_time,
                       validate_status=validate_status,
                       num_rows=num_rows,
                       file_size=file_size,
                       file_hash=file_hash,
                       s3_upload_path=s3_upload_path)
    db_session.add(db_object)
    db_session.commit()
コード例 #26
0
def insert_data(n_users=10, n_sticky=5, n_events=10):
    fake = Faker()

    users = set([fake.name() for _ in range(n_users)])
    print(len(users))

    for user in users:
        u = User(username=user, password='******')
        for i in range(n_sticky):
            sticky = ReminderSticky(reminder=fake.text())
            u.reminders_sticky.append(sticky)
        for i in range(n_events):
            event = ReminderEvents(reminder=fake.text(),
                                   time_of_reminder=generate_random_datetime(),
                                   how_often=generate_how_often())
            u.reminders_events.append(event)
        db_session.add(u)

        try:
            db_session.commit()
        except Exception as e:
            print(str(e))
コード例 #27
0
ファイル: app.py プロジェクト: ToxicEnvelope/TodoApp-BackEnd
def delete_todo_task_by_id(task_id):
    if request.method.__eq__('DELETE'):
        session_data = get_jwt_decode_data()
        optional_user = Users.query.filter_by(id=session_data['sub']).first()
        if optional_user is None:
            response = {
                "status": Fail,
                "timestamp": stamp(),
                "reason": "no such user"
            }
            resp = make_response(response)
            resp.status_code = 404
            return resp
        todo_obj = Todos.query.filter_by(user_id=optional_user.id, id=task_id).first()
        if todo_obj is None:
            response = {
                'timestamp': stamp(),
                'status': Fail,
                'data': {}
            }
            resp = make_response(response)
            resp.status_code = 404
            return resp
        db_session.delete(todo_obj)
        db_session.commit()
        response = {
            'timestamp': stamp(),
            'status': Success
        }
        resp = make_response(response)
        resp.status_code = 200
        return resp
    response = {
        'timestamp': stamp(),
        'status': Fail
    }
    resp = make_response(response)
    resp.status_code = 403
    return resp
コード例 #28
0
ファイル: app.py プロジェクト: ToxicEnvelope/TodoApp-BackEnd
def heartbeat():
    data = get_jwt_decode_data()
    exp = data["exp"]
    now = datetime.datetime.utcnow() - datetime.timedelta(days=0, seconds=600)
    now = int(now.timestamp().__str__()[:10])
    state = exp - now
    if state <= 0:
        session['log_in'] = False
        resp = make_response({'message': 'Invalid token'})
        resp.status_code = 403
        return resp
    user_obj = Users.query.filter_by(id=data["sub"]).first()
    new_token = user_obj.encode_auth_token(user_obj.id)
    user_obj.token = new_token
    db_session.add(user_obj)
    db_session.commit()
    response = {
        "status": Success,
        "timestamp": stamp()
    }
    resp = make_response(response)
    resp.status_code = 200
    return resp
コード例 #29
0
ファイル: app.py プロジェクト: ToxicEnvelope/TodoApp-BackEnd
def add_new_todo_task():
    if request.method.__eq__('POST'):
        session_data = get_jwt_decode_data()
        optional_user = Users.query.filter_by(id=session_data['sub']).first()
        if optional_user is None:
            response = {
                "status": Fail,
                "timestamp": stamp(),
                "reason": "no such user"
            }
            resp = make_response(response)
            resp.status_code = 404
            return resp
        payload = request.get_json()
        task_description = payload['taskDescription']
        new_todo = Todos(user_id=optional_user.id)
        new_todo.task_description = task_description
        db_session.add(new_todo)
        db_session.commit()
        response = {
            'timestamp': stamp(),
            'status': Success,
            'data': {
                'taskId': new_todo.id,
                'taskCreationTime': new_todo.time_created
            }
        }
        resp = make_response(response)
        resp.status_code = 201
        return resp
    response = {
        'timestamp': stamp(),
        'status': Fail
    }
    resp = make_response(response)
    resp.status_code = 403
    return resp
コード例 #30
0
 def test_download_file(self):
     with full_rig_with_s3() as (app, engine):
         # Create matched jail_bookings
         table_name = 'jail_bookings'
         create_and_populate_master_table(table_name, engine,
                                          MATCHED_BOOKING_FILE)
         # Create matched hmis_service_stays
         table_name = 'hmis_service_stays'
         create_and_populate_master_table(table_name, engine,
                                          MATCHED_HMIS_FILE)
         db_session.commit()
         response = app.get(
             '/api/chart/download_source?jurisdiction=boone&eventType=jail_bookings'
         )
         assert response.status_code == 200
         assert response.headers[
             "Content-Disposition"] == "attachment; filename=jail_bookings.csv"
         assert response.headers["Content-type"] == "text/csv"
         data = response.get_data()
         reader = csv.reader(BytesIO(data))
         with open(MATCHED_BOOKING_FILE, 'rb') as source_file:
             source_reader = csv.reader(source_file)
             for returned_row, expected_row in zip(reader, source_reader):
                 assert returned_row == expected_row
コード例 #31
0
ファイル: upload.py プロジェクト: isabella232/matching-tool
def validate_async(uploaded_file_name, jurisdiction, full_filename, event_type,
                   flask_user_id, upload_id):
    validate_start_time = datetime.today()
    sync_upload_metadata_partial = partial(sync_upload_metadata,
                                           upload_id=upload_id,
                                           event_type=event_type,
                                           jurisdiction=jurisdiction,
                                           flask_user_id=flask_user_id,
                                           given_filename=uploaded_file_name,
                                           local_filename=full_filename,
                                           db_session=db_session)
    try:
        # 1. validate header
        validate_header(event_type, full_filename)
        # 2. preprocess file
        filename_with_all_fields = add_missing_fields(event_type,
                                                      full_filename)

        # 3. validate body
        body_validation_report = two_pass_validation(event_type,
                                                     filename_with_all_fields)

        validate_complete_time = datetime.today()
        if not body_validation_report['valid']:
            sync_upload_metadata_partial(
                validate_start_time=validate_start_time,
                validate_complete_time=validate_complete_time,
                validate_status=False,
            )

            return {
                'validation_report': body_validation_report,
                'event_type': event_type,
                'jurisdiction': jurisdiction,
                'filename_with_all_fields': filename_with_all_fields,
                'uploaded_file_name': uploaded_file_name,
                'full_filename': full_filename
            }

        try:
            # 4. upload to s3
            upload_start_time = datetime.today()
            final_upload_path = upload_path(jurisdiction, event_type,
                                            upload_id)
            upload_to_storage(final_upload_path, filename_with_all_fields)

            # 5. load into raw table
            copy_raw_table_to_db(final_upload_path, event_type, upload_id,
                                 db_session.get_bind())

            upload_complete_time = datetime.today()
            # 6. sync upload metadata to db
            sync_upload_metadata_partial(
                s3_upload_path=final_upload_path,
                validate_start_time=validate_start_time,
                validate_complete_time=validate_complete_time,
                validate_status=True,
                upload_start_time=upload_start_time,
                upload_complete_time=upload_complete_time,
                upload_status=True)
        except ValueError as e:
            sync_upload_metadata_partial(
                validate_start_time=validate_start_time,
                validate_complete_time=validate_complete_time,
                validate_status=False,
                upload_start_time=upload_start_time,
                upload_status=False,
            )
            body_validation_report = {
                'valid':
                False,
                'tables': [{
                    'headers': [],
                    'errors': [{
                        'column-number': None,
                        'row-number': None,
                        'message': str(e)
                    }]
                }]
            }

        db_session.commit()

        return {
            'validation_report': body_validation_report,
            'event_type': event_type,
            'jurisdiction': jurisdiction,
            'filename_with_all_fields': filename_with_all_fields,
            'uploaded_file_name': uploaded_file_name,
            'full_filename': full_filename
        }

    except ValueError as e:
        sync_upload_metadata_partial(validate_start_time=validate_start_time,
                                     validate_status=False)

        db_session.commit()
        return format_error_report(str(e))
コード例 #32
0
	def execute(self,args,diagnostics):
		if(args["range"].validate(diagnostics["counter"].data+1)):
			diagnostics["counter"].data = diagnostics["counter"].data + 1
			print("Incrementing test data")
			db_session.commit()