Exemplo n.º 1
0
def getBoxData(request):
    qs = DataPoint.queryBoxData(
        x = request.GET.get('x', None),
        y = request.GET.get('y', None))

    response = {'data': [el for el in qs]}
    return HttpResponse(json.dumps(response), content_type="application/json")
Exemplo n.º 2
0
def getdata(request):
    """
    Add a DataPoint
    """

    if not request.method == 'POST':
        raise Http404

    # Create a new DataPoint
    date_time = request.POST.get('when', '')
    if not date_time:
        return JsonResponse(
            {'message': 'Please enter a valid date time in the format: 6/21/2014 3:00PM'},
            status=500, content_type='application/json')

    try:
        date_time = datetime.datetime.strptime(date_time, '%m/%d/%Y %I:%M%p')
        date_time = pytz.utc.localize(date_time)
    except:
        return JsonResponse(
            {'message': 'Please enter a valid date time in the format: 6/21/2014 3:00PM'},
            status=500, content_type='application/json')


    # Dates cannot be more than 48 hours of each other
    if not DataPoint.is_within_time_constraint(date_time, 48, 'hours'):
        return JsonResponse({
            'message': 'The difference between the earliest and latest date cannot be more than {0} {1}.'.format(
                48, 'hours')}, status=500, content_type='application/json')

    identifier = request.POST.get('identifier', '').upper()
    if not identifier:
        return JsonResponse(
            {'message': 'Please enter an identifier.'},status=500, content_type='application/json')

    value = request.POST.get('value', '')
    if not value:
        return JsonResponse(
            {'message': 'Please enter a valid value (Integer).'},
            status=500, content_type='application/json')

    try:
        value = int(value)
    except:
        return JsonResponse({'message': 'Please enter a valid value (Integer).'},
                            status=500, content_type='application/json')

    try:
        DataPoint.objects.create(
            date_time=date_time,
            identifier=identifier,
            value=value,
        )
        return HttpResponse(json.dumps([]), status=200, content_type='application/json')
    except:
        return JsonResponse(
            {'message': 'Unable to create DataPoint. Please try again.'},
            status=500, content_type='application/json')
    async def on_c_got_step_results(self, sid, results):
        with db.connection_context():
            # Get the datapoint associated with the step (should be generated when step is sent)
            datapoint = DataPoint.select().where(
                DataPoint.step == results['step_id']).order_by(
                    DataPoint.created).get()

            # Then we save the datapoint
            if datapoint is not None:
                datapoint.save_cryo_data(results)
Exemplo n.º 4
0
def printLogs(request):
    logData = DataPoint.csvHeader() + "\n"
    # parse the log file
    with open('gofish.log', 'r') as f:
        logData += f.read()

    # report on the number of processed lines
    context = RequestContext(request)
    context_dict = {'logData': logData}
    return render_to_response('charts/log_print.csv', context_dict, context)
Exemplo n.º 5
0
def parseLog(request):
    numEntries = 0
    logCopy = 'logs-perf/gofish-' + str(time.time()) + '.log'
    # parse the log file
    with open('gofish.log', 'r') as f:
        with open(logCopy, 'w') as fw:
            for line in f:
                if line != '\n':
                    DataPoint.insertFromLine(line)
                    numEntries += 1
                    fw.write(line)

    # flush the log file
    with open('gofish.log', 'w') as f:
        pass

    # report on the number of processed lines
    context = RequestContext(request)
    context_dict = {'numEntries': numEntries}
    return render_to_response('charts/log_parsed.html', context_dict, context)
Exemplo n.º 6
0
def getData(request):
    qs = DataPoint.query(
        username  = request.GET.get('username', None),
        gameNum   = request.GET.get('gameNum', None),
        cueDetail = request.GET.get('cueDetail', None),
        level     = request.GET.get('level', None),
        moveCost  = request.GET.get('moveCost', None),
        endGame   = request.GET.get('endGame', None))

    response = {'data': [el.toDict() for el in qs]}
    return HttpResponse(json.dumps(response), content_type="application/json")
Exemplo n.º 7
0
def get_tags(
    tag: str,
    start: str = None,
    end: str = None,
    unit: str = "day",
):
    if unit not in ['week', 'day', 'month', 'year']:
        return {
            'status': 'error',
            'msg': "unit should be :week, day, month, year"
        }
    if start is not None:
        start = dateparser.parse(str(start))

    end = datetime.now()
    if end is not None:
        end = dateparser.parse(str(end))
    if start is None:
        start = end - relativedelta(days=1000)
    if end is not None and start is not None:
        if not validate_daterange(start, end):
            return {
                'status':
                'error',
                'msg':
                "Invalid daterange, start date must be earlier than end date"
            }

    daily_metrics = []
    datapoints = DataPoint.select().where((DataPoint.key == 'tag')
                                          & (DataPoint.value == tag))
    if datapoints.exists():
        for datapoint in datapoints:
            for point in datapoint.metrics:
                m = point
                m.pop('tag')
                m['region'] = datapoint.region.region_id
                time = datetime.strptime(m['time'].split(' ')[0], "%Y-%m-%d")
                if time >= start and time <= end:
                    daily_metrics.append(m)
    return {
        'status': 'ok',
        'date': {
            'start': start.strftime('%Y-%m-%d'),
            'end': end.strftime('%Y-%m-%d')
        },
        'results': daily_metrics
    }
Exemplo n.º 8
0
 def test_add_data_point(self):
     Node(node_id='TEST').put()
     data_point_json = {
         'node_id': 'TEST',
         'temperature': 50,
         'humidity': 40,
         'pressure': 34,
         'light': 14,
         'saturation': 12,
     }
     response = self.app.post_json('/datapoint/', data_point_json)
     self.assertEqual(201, response.status_code)
     self.assertEqual(
         {'status': 'success'},
         json.loads(response.body))
     self.assertEqual(1, DataPoint.query().count())
Exemplo n.º 9
0
def suggestion(search: str, ratio=0.5, top=20):

    edit = int(len(search) * ratio)

    exp = NodeList([
        SQL("levenshtein("), DataPoint.value,
        SQL(", '{}') <= {}".format(search, edit)),
        SQL(" order by levenshtein("), DataPoint.value,
        SQL(", '{}')".format(search))
    ],
                   glue='')
    datapoints = DataPoint.select().where(exp)
    tags = []
    if datapoints.exists():
        for datapoint in datapoints[:top]:
            tags.append(datapoint.value)
    return {'tags': tags}
Exemplo n.º 10
0
def getchart(request):
    """
    Get DataPoints
    """

    if not request.method == 'POST':
        raise Http404

    try:
        data_type = request.POST.get('data_type', 'chart')
        filter = request.POST.get('filter', '')
        rollup = request.POST.get('rollup', '')
        data_points = DataPoint.get_data(data_type, filter, rollup)
    except:
        return JsonResponse(
            {'message': 'Unable to retrieve data.'}, status=500, content_type='application/json')

    return HttpResponse(json.dumps(data_points), status=200, content_type='application/json')
Exemplo n.º 11
0
def on_message(client, userdata, msg):
    """Mock on_message method.

    Arguments:
        client {[type]} -- [description]
        userdata {[type]} -- [description]
        msg {[type]} -- [description]
    """
    client = client
    userdata = userdata
    data = json.loads(msg.payload.decode())
    data_point = DataPoint(identifier=data['id'],
                           date=datetime.fromisoformat(
                               data['content']['time_of_measurement']),
                           temperature_f=data['content']['temperature_f'],
                           temperature_c=data['content']['temperature_c'],
                           type=data['type'])
    session.add(data_point)
    session.commit()
Exemplo n.º 12
0
def submit_metric():
    """
    Submits the metric data for a specific computer
    if the computer doesn't exist it is created.
    """

    gson = json.loads(request.get_json())

    new_point = DataPoint(computer_name=gson["computer_name"],
                          cpu_percentage=gson["cpu_percentage"],
                          memory_percentage=gson["memory_percentage"],
                          timestamp=gson["timestamp"])

    with lock:
        if not instances.get(new_point.computer_name):
            instances[new_point.computer_name] = Timeline(
                maxsize=int(os.environ.get("COLLECTOR_BUFFER_SIZE")))
        instances[new_point.computer_name].append(new_point)

    return Response(status=200)
Exemplo n.º 13
0
def submit_data():
    data = request.get_json()

    sensors = {}
    for data_point in data:
        if data_point['sensor_id'] not in sensors:
            sensor = Sensor.query.filter_by(id=data_point['sensor_id']).one()
            sensors[data_point['sensor_id']] = sensor
        else:
            sensor = sensors[data_point['sensor_id']]
        dp = DataPoint.createDataPoint(sensor, data_point['data'])
        dp.date = datetime.fromtimestamp(data_point['date'])

        db.session.add(dp)

    socketio.emit('new_data_point', data, namespace='/api/submit_data')

    db.session.commit()

    return jsonify(success=True)
Exemplo n.º 14
0
    async def get_next_step(self):
        with db.connection_context():
            try:
                # Get the next step
                step = ExperimentStep.select().where(
                    ExperimentStep.step_done == False).order_by(
                        ExperimentStep.id).first()

                # Check if the step is none, and skip to the catch clause if it is
                if step is None:
                    raise DoesNotExist('Step does not exist')

                # Check if the step has an associated datapoint
                if DataPoint.select().where(
                        ExperimentStep == step).count() < 1:
                    step.generate_datapoint()

                # Convert step to dict
                step_d = model_to_dict(step)

                # Set the experiment id (different from the step id)
                step_d['experiment_configuration_id'] = step_d[
                    'experiment_configuration']['id']

                # Remove datetime and experiment configuration from the dict
                # They are not needed in the client, and they are not directly serializable to json (due to missing datetime format)
                del (step_d['created'])
                del (step_d['experiment_configuration'])

                # Return the step if it exists
                return step_d
            # Check if the step even exists
            except DoesNotExist:
                # It is OK if it does not exist, we should just stop measuring
                print('No more steps ready')

                # Return None if no step exists
                return None
Exemplo n.º 15
0
def get_post_data():
    if request.method == "GET":
        # We're sending all of our data to the user!
        data = session.query(DataPoint).all()
        return jsonify([i.serialize for i in data])

    elif request.method == "POST":
        # There's a new data point in town!
        name = request.form.get("name") or "Anonymous"
        Rstar = request.form.get("Rstar", type=float)
        fp = request.form.get("fp", type=float)
        ne = request.form.get("ne", type=float)
        fl = request.form.get("fl", type=float)
        fi = request.form.get("fi", type=float)
        fc = request.form.get("fc", type=float)
        L = request.form.get("L", type=float)

        N = Rstar * fp * ne * fl * fi * fc * L

        new_data = DataPoint(name=name,
                             N=N,
                             Rstar=Rstar,
                             fp=fp,
                             ne=ne,
                             fl=fl,
                             fi=fi,
                             fc=fc,
                             L=L)

        session.add(new_data)
        session.commit()

        return jsonify(new_data.serialize)

    else:
        # The only two requests that we support are "GET" and "POST"
        return "Unsupported HTTP request", 400
Exemplo n.º 16
0
def get_tags(tag: str,
             start: str = None,
             end: str = None,
             unit: str = "day",
             ratio: float = 1,
             top: int = 5):
    if unit not in ['week', 'day', 'month', 'year']:
        return {
            'status': 'error',
            'msg': "unit should be :week, day, month, year"
        }
    if start is not None:
        start = dateparser.parse(str(start))

    end = datetime.now()
    if end is not None:
        end = dateparser.parse(str(end))
    if start is None:
        start = end - relativedelta(days=1000)
    if end is not None and start is not None:
        if not validate_daterange(start, end):
            return {
                'status':
                'error',
                'msg':
                "Invalid daterange, start date must be earlier than end date"
            }

    daily_metrics = []

    edit = int(len(tag) * ratio)

    exp = NodeList([
        SQL("levenshtein("), DataPoint.value,
        SQL(", '{}') <= {}".format(tag, edit)),
        SQL(" order by levenshtein("), DataPoint.value,
        SQL(", '{}')".format(tag))
    ],
                   glue='')
    datapoints = DataPoint.select().where(exp)

    if datapoints.exists():
        for datapoint in datapoints[:top]:
            datapoint_metrics = []
            for point in datapoint.metrics:
                m = point
                m.pop('tag')
                m['region'] = datapoint.region.region_id
                time = datetime.strptime(m['time'].split(' ')[0], "%Y-%m-%d")
                if time >= start and time <= end:
                    datapoint_metrics.append(m)
            daily_metrics.append({
                'tag': datapoint.value,
                'data': datapoint_metrics
            })
    return {
        'status': 'ok',
        'date': {
            'start': start.strftime('%Y-%m-%d'),
            'end': end.strftime('%Y-%m-%d')
        },
        'results': daily_metrics
    }
Exemplo n.º 17
0
    def get_historical_data(self,
                            stock: str,
                            current_time: datetime,
                            number_of_days: int = 10) -> List[DataPoint]:
        spark_data_frame_for_stock = self.spark.createDataFrame([],
                                                                self.schema)

        for i in range(number_of_days + 1):
            start_time = (current_time -
                          timedelta(days=i)).strftime("%Y-%m-%d")
            end_time = (current_time -
                        timedelta(days=i - 1)).strftime("%Y-%m-%d")

            historical_data_path = "./datasets/historical_data/"
            folder_path = historical_data_path + start_time + "/"
            try:
                Path(folder_path).mkdir(parents=True, exist_ok=True)
            except OSError:
                print(f'Creation of the directory {folder_path} failed')
            # else:
            # print(f'Successfully created the directory {folder_path}')

            stock_file = Path(folder_path + stock + ".csv")
            if stock_file.is_file():
                # if stock data already downloaded, just load it
                stock_data_spark_df = self.spark.read \
                    .csv(str(stock_file), schema=self.schema, timestampFormat="yyyy-MM-dd HH:mm:ss", header=True)
                spark_data_frame_for_stock = spark_data_frame_for_stock.union(
                    stock_data_spark_df)
            else:
                # download if not downloaded
                stock_data = yf.download(stock,
                                         start=start_time,
                                         end=end_time,
                                         interval="1m")
                if len(stock_data) < 1:
                    print(f'stock data not found on yahoo finance: {stock}')
                    continue

                stock_data = stock_data.rename(
                    columns={"Adj Close": "AdjustedClose"})
                stock_data = stock_data.reset_index()
                stock_data.dropna(inplace=True)
                stock_data["Datetime"] = stock_data["Datetime"].astype(
                    str).str[:-6].astype('datetime64[ns]')
                stock_data["Volume"] = stock_data["Volume"].astype(float)
                stock_data["Symbol"] = stock
                stock_data.set_index('Datetime')
                if current_time - timedelta(
                        days=i) < datetime.now() + timedelta(days=-1):
                    stock_data.to_csv(path_or_buf=stock_file, index=False)
                stock_data_spark_df = self.spark.createDataFrame(
                    stock_data, self.schema)
                spark_data_frame_for_stock = spark_data_frame_for_stock.union(
                    stock_data_spark_df)

        spark_data_frame_for_stock_sorted = spark_data_frame_for_stock\
            .where(spark_data_frame_for_stock.Datetime <= current_time.strftime("%Y-%m-%d %H:%M:%S"))\
            .sort("Datetime")\
            .collect()

        list_of_data_points = [
            DataPoint(row.Open, row.Close, row.High, row.Low, row.Volume,
                      row.Datetime)
            for row in spark_data_frame_for_stock_sorted
        ]

        return list_of_data_points
Exemplo n.º 18
0
def main(csvfile):
    # TODO use Pandas
    reader = csv.reader(csvfile, delimiter=',', quotechar='"')

    points = []
    feature_vectors = []

    idxs = set()
    names = set()
    preexist = set(FeatureVector.objects.all().values_list("exact_name",
                                                           flat=True))

    now = timezone.now()

    count = 0
    for i, row in enumerate(reader):
        if not i:
            mapping = get_mapping(row)
            continue
        if row == [] or len(row) < max(mapping.values()):
            continue
        try:
            try:
                exact_name = get_exact_name(row[mapping["Name"]])
                try:
                    decay_feature = get_decay_feature_vector(exact_name)
                    feature_vector = True
                    if exact_name not in names and exact_name not in preexist:
                        temp = FeatureVector(exact_name=exact_name,
                                             type=FeatureVector.DECAY,
                                             vector=decay_feature,
                                             created=now)

                        temp.clean_fields()
                        feature_vectors.append(temp)
                        names.add(exact_name)

                        if len(feature_vectors) > 150:
                            FeatureVector.objects.bulk_create(feature_vectors)
                            feature_vectors = []

                except Exception:
                    feature_vector = None
            except Exception:
                feature_vector = None
                exact_name = None

            band_gap = row[mapping["BandGap"]]
            data = {
                "name": row[mapping["Name"]],
                "options": row[mapping["Options"]],
                "h**o": row[mapping["H**O"]],
                "lumo": row[mapping["LUMO"]],
                "homo_orbital": row[mapping["HomoOrbital"]],
                "dipole": row[mapping["Dipole"]],
                "energy": row[mapping["Energy"]],
                "band_gap": band_gap if band_gap != '---' else None,
                "exact_name": exact_name,
                "created": now,
            }

            point = DataPoint(**data)
            point.clean_fields()
            points.append(point)
            if len(points) > 50:
                DataPoint.objects.bulk_create(points)
                points = []
            if feature_vector is not None:
                idxs.add(count)

            count += 1
        except Exception:
            pass

    DataPoint.objects.bulk_create(points)
    FeatureVector.objects.bulk_create(feature_vectors)

    Through = DataPoint.vectors.through

    temp = DataPoint.objects.filter(created=now).values_list(
        "pk", "exact_name")
    temp2 = FeatureVector.objects.all().values_list("exact_name", "pk")
    groups = dict(temp2)

    final = []
    for i, (pk, name) in enumerate(temp):
        if i in idxs:
            final.append(
                Through(datapoint_id=pk, featurevector_id=groups[name]))

            if len(final) > 200:
                Through.objects.bulk_create(final)
                final = []
    Through.objects.bulk_create(final)

    return count
Exemplo n.º 19
0
def main(csvfile):
    reader = csv.reader(csvfile, delimiter=',', quotechar='"')

    points = []
    feature_vectors = []

    idxs = set()
    names = set()
    preexist = set(
        FeatureVector.objects.all().values_list("exact_name", flat=True))

    now = timezone.now()

    count = 0
    for row in reader:
        if row == []:
            continue
        try:
            try:
                exact_name = get_exact_name(row[1])
                try:
                    decay_feature = get_decay_feature_vector(exact_name)
                    feature_vector = True
                    if exact_name not in names and exact_name not in preexist:
                        temp = FeatureVector(
                            exact_name=exact_name,
                            type=FeatureVector.DECAY,
                            vector=decay_feature,
                            created=now)

                        temp.clean_fields()
                        feature_vectors.append(temp)
                        names.add(exact_name)

                        if len(feature_vectors) > 150:
                            FeatureVector.objects.bulk_create(feature_vectors)
                            feature_vectors = []

                except Exception:
                    feature_vector = None
            except Exception:
                feature_vector = None
                exact_name = None

            data = {
                "name": row[1],
                "options": row[4],
                "h**o": row[5],
                "lumo": row[6],
                "homo_orbital": row[7],
                "dipole": row[8],
                "energy": row[9],
                "band_gap": row[10] if row[10] != '---' else None,
                "exact_name": exact_name,
                "created": now,
            }

            point = DataPoint(**data)
            point.clean_fields()
            points.append(point)
            if len(points) > 50:
                DataPoint.objects.bulk_create(points)
                points = []
            if feature_vector is not None:
                idxs.add(count)

            count += 1
        except Exception:
            pass

    DataPoint.objects.bulk_create(points)
    FeatureVector.objects.bulk_create(feature_vectors)

    Through = DataPoint.vectors.through

    temp = DataPoint.objects.filter(
        created=now).values_list("pk", "exact_name")
    temp2 = FeatureVector.objects.all().values_list("exact_name", "pk")
    groups = dict(temp2)

    final = []
    for i, (pk, name) in enumerate(temp):
        if i in idxs:
            final.append(
                Through(datapoint_id=pk, featurevector_id=groups[name]))

            if len(final) > 200:
                Through.objects.bulk_create(final)
                final = []
    Through.objects.bulk_create(final)

    return count
Exemplo n.º 20
0
    def get_latest_data_point(self, stocks: List[str],
                              current_time: datetime) -> Dict[str, DataPoint]:

        spark_data_frame_for_stock = self.spark.createDataFrame([],
                                                                self.schema)
        pandas_data_frame = pandas\
            .DataFrame(columns=['Datetime', 'Open', 'High', 'Low', 'Close', 'AdjustedClose', 'Volume', 'Symbol'])

        download_list = []

        stocks_dict = {}

        start_time = current_time.strftime("%Y-%m-%d")
        end_time = (current_time + timedelta(days=1)).strftime("%Y-%m-%d")

        historical_data_path = "./datasets/historical_data/"
        folder_path = historical_data_path + start_time + "/"

        for stock in stocks:
            try:
                Path(folder_path).mkdir(parents=True, exist_ok=True)
            except OSError:
                print(f'Creation of the directory {folder_path} failed')

            stock_file = Path(folder_path + stock + ".csv")
            if stock_file.is_file(
            ) and current_time < datetime.now() + timedelta(hours=-24):
                # if stock data already downloaded, just load it
                # stock_data_spark_df = self.spark.read .csv(str(stock_file), schema=self.schema, timestampFormat="yyyy-MM-dd HH:mm:ss", header=True)
                # spark_data_frame_for_stock = spark_data_frame_for_stock.union(stock_data_spark_df)
                stock_data = pandas.read_csv(str(stock_file))
                stock_data['Datetime'] = pandas.to_datetime(
                    stock_data['Datetime'], format='%Y-%m-%d')
                pandas_data_frame = pandas.concat(
                    [pandas_data_frame, stock_data])

            else:
                # add stock to download list
                download_list.append(stock)

        if len(download_list) > 0:
            stocks_data = yf.download(download_list,
                                      start=start_time,
                                      end=end_time,
                                      interval="1m")
            if len(stocks_data) > 0:
                for stock in download_list:

                    stock_data = stocks_data if len(
                        download_list) == 1 else stocks_data[stock]
                    stock_data = stock_data.rename(
                        columns={"Adj Close": "AdjustedClose"})
                    stock_data = stock_data.reset_index()
                    stock_data.dropna(inplace=True)
                    stock_data["Datetime"] = stock_data["Datetime"].astype(
                        str).str[:-6].astype('datetime64[ns]')
                    stock_data["Volume"] = stock_data["Volume"].astype(float)
                    stock_data["Symbol"] = stock
                    stock_data.set_index('Datetime')

                    if current_time < datetime.now() + timedelta(hours=-24):
                        stock_file = Path(folder_path + stock + ".csv")
                        stock_data.to_csv(path_or_buf=stock_file, index=False)

                    pandas_data_frame = pandas.concat(
                        [pandas_data_frame, stock_data])

            # date_filter_string = "Datetime < '" + current_time.strftime("%Y-%m-%d %H:%M:%S") + "'"
            # spark_data_frame_for_stock = spark_data_frame_for_stock.union(self.spark.createDataFrame(stock_data, self.schema))
            # data_frame = self.spark.createDataFrame(stock_data, self.schema)

        spark_data_frame_for_stock = self.spark.createDataFrame(
            pandas_data_frame, self.schema)
        for stock in stocks:
            last_point_row = spark_data_frame_for_stock \
                .where(spark_data_frame_for_stock.Datetime <= current_time.strftime("%Y-%m-%d %H:%M:%S")) \
                .where(spark_data_frame_for_stock.Symbol == stock)\
                .sort("Datetime", ascending=False) \
                .limit(1) \
                .select("*") \
                .first()

            data_point = DataPoint(last_point_row.Open, last_point_row.Close,
                                   last_point_row.High, last_point_row.Low,
                                   last_point_row.Volume,
                                   last_point_row.Datetime)
            stocks_dict[stock] = data_point

        return stocks_dict
Exemplo n.º 21
0
def dataByUser(request):
    choices = DataPoint.describeData()
    context = RequestContext(request)
    context_dict = {'choices': choices}
    return render_to_response('charts/data_user.html', context_dict, context)
Exemplo n.º 22
0
    def setUp(self):
        thing1 = Thing(name="Thing1")
        thing1.save()
        thing1 = Thing.objects.get(name="Thing1")

        thing2 = Thing(name="Thing2")
        thing2.save()
        thing2 = Thing.objects.get(name="Thing2")
        
        v = DataPoint(value={"state":"open"}, thing=thing1)
        v.save()
        v = DataPoint(value={"state":"closed"}, thing=thing1)
        v.save()

        v = DataPoint(value={"state":"frobbed"}, thing=thing1)
        v.save()

        v = DataPoint(value={"state":"closed"}, thing=thing1)
        v.save()
        v = DataPoint(value={"state":"frobbed"}, thing=thing1)
        v.save()
        v = DataPoint(value={"state":"closed"}, thing=thing1)
        v.save()
        v = DataPoint(value={"state":"frobbed"}, thing=thing1)
        v.save()

        v = DataPoint(value={"state":"blipped"}, thing=thing2)
        v.save()

        v = DataPoint(value={"state":"blipped"}, thing=thing2)
        v.save()

        v = DataPoint(value={"state":"blipped"}, thing=thing2)
        v.save()
Exemplo n.º 23
0
async def export_data(request):
    # Check id exists
    if 'id' not in request.query:
        return web.Response(text='Could not find the requested id',
                            content_type='text/html')

    # Grab the id
    config_id = request.query['id']

    # Now we want to start the export
    # Open connection to database
    with db.connection_context():
        # Grab the configuration first
        ecl = ExperimentConfiguration.select().where(
            ExperimentConfiguration.id == config_id).dicts()

        # Check if we have a result
        if len(ecl) > 0:
            # Grab the first result (There should only be one when we query by id)
            ec = ecl[0]

            # Convert date format
            ec['created'] = ec['created'].isoformat()

            # Compute the number of points taken
            ec['n_points_taken'] = ExperimentStep.select() \
                .where(ExperimentStep.experiment_configuration == ec['id']) \
                .where(ExperimentStep.step_done == True) \
                .count()

            # Compute the number of points taken
            ec['n_points_total'] = ExperimentStep.select() \
                .where(ExperimentStep.experiment_configuration == ec['id']) \
                .count()

            # Add an empty array to contain steps
            ec['steps'] = []

            # Now we're done processing the configuration
            # Next we get all the datapoints that were saved
            # We start by iterating over all the steps in the experiment
            for step in ExperimentStep.select().where(
                    ExperimentStep.experiment_configuration ==
                    ec['id']).dicts():
                # Convert date format
                step['created'] = step['created'].isoformat()

                # Add an empty array to contain datapoints
                step['datapoints'] = []

                # And we iterate through all the datapoints for the step
                for dp in DataPoint.select().where(
                        DataPoint.step == step['id']):
                    # Create a dict to contain the collected information
                    datapoint_dict = {
                        'id': dp.id,
                        'created': dp.created.isoformat(),
                        'magnetism_datapoints': [],
                        'temperature_datapoints': [],
                        'pressure_datapoints': []
                    }

                    # Next we find the magnetism datapoint
                    for mdp in MagnetismDataPoint.select().where(
                            MagnetismDataPoint.datapoint == dp):
                        # For this we find the magnetism measurements (where we actually store the data)
                        mdps = MagnetismMeasurement.select().where(
                            MagnetismMeasurement.magnetism_data_point == mdp)

                        # Save it to the datapoint dict
                        for magnetism_datapoint in list(mdps.dicts()):
                            datapoint_dict['magnetism_datapoints'].append(
                                magnetism_datapoint)

                    # And we find the cryodatapoint
                    for cdp in CryogenicsDataPoint.select().where(
                            CryogenicsDataPoint.datapoint == dp):
                        # Similarly we find pressure and temperature datapoints
                        pdps = PressureDataPoint.select().where(
                            PressureDataPoint.cryo_data_point == cdp)
                        tdps = TemperatureDataPoint.select().where(
                            TemperatureDataPoint.cryo_data_point == cdp)

                        # Save them to the datapoint dict
                        for pressure_datapoint in list(pdps.dicts()):
                            datapoint_dict['pressure_datapoints'].append(
                                pressure_datapoint)

                        for temperature_datapoint in list(tdps.dicts()):
                            # Convert date format
                            temperature_datapoint[
                                'created'] = temperature_datapoint[
                                    'created'].isoformat()

                            # Append the temperature
                            datapoint_dict['temperature_datapoints'].append(
                                temperature_datapoint)

                    # Save the datapoint to the step
                    step['datapoints'].append(datapoint_dict)

                # Save the step to the configuration
                ec['steps'].append(step)

            # And finally we send the response data
            return web.json_response(
                headers={'Content-Disposition': f'Attachment'},
                body=json.dumps(ec))
        else:
            return web.Response(text='Attempted to export ' + str(config_id) +
                                ' but no such config found',
                                content_type='text/html')