Пример #1
0
def download_archive_stream(request, share, subdir=None):
#     try:
    selection = request.GET.get('selection').split(',')
    for item in selection:
        test_path(item)
    try:
        return share.create_archive_stream(items=selection,subdir=subdir)
    except Exception, e:
        return json_error([e.message])
Пример #2
0
def download_archive_stream(request, share, subdir=None):
    #     try:
    selection = request.GET.get('selection', '').split(',')
    for item in selection:
        test_path(item)
    try:
        return share.create_archive_stream(items=selection, subdir=subdir)
    except Exception, e:
        return json_error([str(e)])
Пример #3
0
def move_paths(request, share, subdir=None, json={}):
    response = {'moved': [], 'failed': []}
    for item in json['selection']:
        test_path(item)
        item_subpath = item if subdir is None else os.path.join(subdir, item)
        try:
            if share.move_path(item_subpath, json['destination']):
                response['moved'].append(item)
            else:
                response['failed'].append(item)
        except Exception, e:
            pass
Пример #4
0
def move_paths(request, share, subdir=None, json={}):
    response={'moved':[],'failed':[]}
    for item in json['selection']:
        test_path(item)
        item_subpath = item if subdir is None else os.path.join(subdir,item)
        try:
            if share.move_path(item_subpath,json['destination']):
                response['moved'].append(item)
            else:
                response['failed'].append(item)
        except Exception, e:
            pass
Пример #5
0
def delete_paths(request, share, subdir=None, json={}):
    response={'deleted':[],'failed':[]}
    for item in json['selection']:
        test_path(item)
        item_path = item if subdir is None else os.path.join(subdir,item)
        try:
            if share.delete_path(item_path):
                response['deleted'].append(item)
            else:
                response['failed'].append(item)
        except:
            response['failed'].append(item)
    ShareLog.create(share=share,user=request.user,action=ShareLog.ACTION_DELETED,paths=json['selection'],subdir=subdir)
    return json_response(response)
Пример #6
0
def delete_paths(request, share, subdir=None, json={}):
    response={'deleted':[],'failed':[]}
    for item in json['selection']:
        test_path(item)
        item_path = item if subdir is None else os.path.join(subdir,item)
        try:
            if share.delete_path(item_path):
                response['deleted'].append(item)
            else:
                response['failed'].append(item)
        except:
            response['failed'].append(item)
    ShareLog.create(share=share,user=request.user,action=ShareLog.ACTION_DELETED,paths=json['selection'],subdir=subdir)
    return json_response(response)
Пример #7
0
 def test_etrade_for_bogus_data(self):
     importer = EtradeImporter()
     resource = utils.test_path('test_data/etrade/2020-09-09/TSLA.json')
     chain = importer.from_json(resource)
     self.assertTrue(
         math.isnan(
             chain.get_option('put', datetime(2020, 9, 11), 20).theta))
Пример #8
0
 def test_etrade_for_dates_with_missing_file(self):
     importer = EtradeImporter(utils.test_path('test_data/etrade'))
     daterange = pd.date_range(start='2020-09-09', end='2020-12-04')
     chains = [i for i in importer.for_dates('TSLA', daterange) if i]
     self.assertEqual(2, len(chains))
Пример #9
0
 def test_etrade_for_dates(self):
     importer = EtradeImporter(utils.test_path('test_data/etrade'))
     daterange = pd.date_range(start='2020-09-09', end='2020-09-10')
     chains = list(importer.for_dates('TSLA', daterange))
     self.assertEqual(2, len(chains))
Пример #10
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun  1 15:30:20 2019

@author: pedro
"""

import pandas as pd

from utils import test_path, j
from feature_engineering import create_rolling_features_dataframe

import glob
import os

files = glob.glob(test_path() + "/seg*.csv")

chunksize = 1500

for file in files:
    df_chunks = pd.read_csv(j(test_path(), file), chunksize=chunksize)
    chunk_list = []
    for chunk in df_chunks:
        chunk = chunk.reset_index(drop=True)
        feat_row = create_rolling_features_dataframe(
            chunk.acoustic_data.values)
        chunk_list.append(feat_row)
    train_data = pd.concat(chunk_list)
    train_data = train_data.reset_index(drop=True)
    train_data.to_csv(j(test_path(), 'feats_1500_' + os.path.basename(file)))
Пример #11
0
Created on Sat Apr 13 16:18:54 2019

@author: pedro
"""

import os
import numpy as np
import pandas as pd
import scipy.signal as signal
import matplotlib.pyplot as plt
from utils import j, train_path, test_path, train_resampled_path

df_tr = pd.read_csv(j(train_path(), 'train.csv'), nrows=150000)
# too much data crushes my home laptop
#df_tr = utils.read_file(j(train_path(), 'train.csv'))
df_te = pd.read_csv(j(test_path(), 'seg_0a0fbb.csv'))

#df_tr.plot()

t = df_tr.time_to_failure
#t.plot()
freq = np.abs(1. / np.mean(t[0:100].diff()))
dt = df_tr.time_to_failure.diff()
dt[0] = dt[1]
#dt.plot()

# weird jumps
weirdjumps = t[dt < 10 * dt[0:100].mean()]
freq_jumps = np.abs(1. / np.mean(weirdjumps.diff()))
weirdjumps.plot()
# use them to guess the frequencies