Esempio n. 1
0
def main(cmd, dataset, run, conf, make_videos):   
    from pathlib import Path
    
    if make_videos:
        from visualize_tracking import render_video
        from config import DatasetConfig
        from apply_mask import Masker
        
        mask = Masker(dataset)
        dc = DatasetConfig(dataset)
        
    config_path = runs_path / "{}_{}".format(dataset,run) / "world_tracking_optimization.pklz"
    if config_path.is_file():
        config = load(config_path)
    else:
        #raise(ValueError("No world tracking optimized configuration exists at {}".format(config_path)))
        config = WorldTrackingConfig(default_config)
    
    calib = Calibration(dataset)    
    munkres = Munkres()
    ts = Timestamps(dataset)
    
    start_stop = None
    
    if cmd == "findvids":
        vidnames = (datasets_path / dataset / "videos").glob('*.mkv')
        vidnames = [x.stem for x in vidnames]
        vidnames.sort()
        
        outfolder = runs_path / "{}_{}".format(dataset,run) / "tracks_world"
        mkdir(outfolder)
    else:
        vidnames = [cmd]
        outfolder = Path('./')
        start_stop = (0,500)
            
    for v in vidnames:
        print_flush(v) 
        out_path = outfolder / (v+'_tracks.pklz')   
        
        print_flush("Loading data...")
        det_path = runs_path / "{}_{}".format(dataset,run) / "detections_world" / (v+'_world.csv')
        detections3D = pd.read_csv(det_path)
        
        klt_path = det_path.with_name(det_path.stem + '_klt.pklz')
        klts = load(klt_path)
        
        print_flush("Tracking...")
        tracks = make_tracks(dataset, v, detections3D, klts, munkres, ts, calib, config, start_stop=start_stop)
        
        print_flush("Saving tracks...")
        save(tracks, out_path)
        
        if make_videos:            
            vidpath = datasets_path / dataset / "videos" / (v+'.mkv')
            print_flush("Rendering video...")
            render_video(tracks, vidpath, out_path.with_suffix('.mp4'), calib=calib, mask=mask, fps=dc.get('video_fps'))

    print_flush("Done!")
Esempio n. 2
0
def main(cmd, dataset, run, conf, make_videos):   
    if make_videos:
        from visualize_tracking import render_video
        from config import DatasetConfig
        from apply_mask import Masker
        
        mask = Masker(dataset)
        dc = DatasetConfig(dataset)
        
    config_path = "{rp}{ds}_{rn}/world_tracking_optimization.pklz".format(rp=runs_path, ds=dataset, rn=run)
    if isfile(config_path):
        config = load(config_path)
    else:
        #raise(ValueError("No world tracking optimized configuration exists at {}".format(config_path)))
        config = WorldTrackingConfig(default_config)
    
    calib = Calibration(dataset)    
    munkres = Munkres()
    ts = Timestamps(dataset)
    
    start_stop = None
    
    if cmd == "findvids":
        from glob import glob
        vidnames = glob('{dsp}{ds}/videos/*.mkv'.format(dsp=datasets_path, ds=dataset))
        vidnames = [right_remove(x.split('/')[-1], '.mkv') for x in vidnames]
        vidnames.sort()
        
        outfolder = '{}{}_{}/tracks_world/'.format(runs_path, dataset, run)
        mkdir(outfolder)
    else:
        vidnames = [cmd]
        outfolder = './'
        start_stop = (0,500)
            
    for v in vidnames:
        print_flush(v)    
        out_path = "{of}{v}_tracks.pklz".format(of=outfolder, v=v)
        
        print_flush("Loading data...")
        det_path = "{rp}{ds}_{rn}/detections_world/{v}_world.csv".format(rp=runs_path, ds=dataset, rn=run, v=v)
        detections3D = pd.read_csv(det_path)
        
        klt_path = det_path.replace('.csv', '_klt.pklz')
        klts = load(klt_path)
        
        print_flush("Tracking...")
        tracks = make_tracks(dataset, v, detections3D, klts, munkres, ts, calib, config, start_stop=start_stop)
        
        print_flush("Saving tracks...")
        save(tracks, out_path)
        
        if make_videos:

            vidpath = "{dsp}{ds}/videos/{v}.mkv".format(dsp=datasets_path, ds=dataset, v=v)
            print_flush("Rendering video...")
            render_video(tracks, vidpath, out_path.replace('.pklz','.mp4'), calib=calib, mask=mask, fps=dc.get('video_fps'))

    print_flush("Done!")
Esempio n. 3
0
def get_shop(callback):
    if hasattr(storage, 'id'):
        # If logged in

        print(f'Shop ok: id = {storage.id}')
        print('Shop sending:', storage.id)

        res = storage.load('/shop', {
            'action': 'get-state',
            'id': storage.id,
        })

        print(res)

        if (res['status'] == 'OK'):
            callback.Call({
                'id': storage.id,
                'shop': res['shop'],
                'amount': res['amount'],
            })
        else:
            print('Shop: Error "Bad shop requests"')

    else:
        # Any case when user not logged in
        print('Shop Error: storage has no \'id\' property.')
Esempio n. 4
0
def validate_token(auth):
    full_repos_name = auth.get('repository', '').split('/')
    if len(full_repos_name) != 2:
        logger.debug('validate_token: Invalid repository field')
        return False
    cfg = config.load()
    index_endpoint = cfg.index_endpoint
    if index_endpoint is None:
        index_endpoint = 'https://index.docker.io'
    index_endpoint = index_endpoint.strip('/')
    url = '{0}/v1/repositories/{1}/{2}/images'.format(index_endpoint,
                                                      full_repos_name[0],
                                                      full_repos_name[1])
    headers = {'Authorization': flask.request.headers.get('authorization')}
    resp = requests.get(url, verify=True, headers=headers)
    logger.debug('validate_token: Index returned {0}'.format(resp.status_code))
    if resp.status_code != 200:
        return False
    store = storage.load()
    try:
        images_list = [i['id'] for i in json.loads(resp.text)]
        store.put_content(store.images_list_path(*full_repos_name),
                          json.dumps(images_list))
    except json.JSONDecodeError:
        logger.debug('validate_token: Wrong format for images_list')
        return False
    return True
Esempio n. 5
0
 def _generate_index(self, session):
     store = storage.load()
     Base.metadata.create_all(self._engine)
     session.add(Version(id=self.version))
     for repository in self._walk_storage(store=store):
         session.add(Repository(**repository))
     session.commit()
Esempio n. 6
0
def getall():
  data = request.json
  keys=getkeys()
  results = { k : load(k) for k in keys }
  for k in results : 
    results[k]['key']=k
  results = { 'data' : [ results[k] for k in results ] }
  return jsonify(results)
Esempio n. 7
0
def _query(configuration, restrict='title', select=None):
    books = storage.load(configuration, 'library')
    if select is None:
        return books
    else:
        return [book for book in books
                if restrict in book.keys()
                and select.upper()
                in unicode(book[restrict]).upper()]
Esempio n. 8
0
def get_world_tracking_config(dataset_name, run_name):
    dataset_name, run_name = map(quote, (dataset_name, run_name))
    path = "{rp}{dn}_{r}/world_tracking_optimization.pklz".format(rp=runs_path, dn=dataset_name, r=run_name)

    if isfile(path):
        wtc = load(path)
        return (wtc.get_dict(), 200)
    else:
        return (NoContent, 404)
Esempio n. 9
0
 def execute(self):
     books = storage.load(self._configuration, 'library')
     fields = set()
     for book in books:
         for field in book.keys():
             if field[0] is not '_':
                 fields.add(field)
     self.out('\n'.join(fields))
     return None, None
Esempio n. 10
0
def _query(configuration, restrict='title', select=None):
    books = storage.load(configuration, 'library')
    if select is None:
        return books
    else:
        return [
            book for book in books if restrict in book.keys()
            and select.upper() in unicode(book[restrict]).upper()
        ]
def generateClassesAccuracies(filePath):
    """ generate csv containing accuracy per class
    """
    filename = filePath.stem
    processedJAFFE = load(str(filePath))
    processedDF = pd.DataFrame(processedJAFFE)
    processedDF.columns = ['name', 'data', 'emotion']
    processedDF = processedDF.sort_values(by=['name', 'emotion'])
    grouped = processedDF.groupby(['name', 'emotion'])
    train = grouped.nth([0, 1])
    test = grouped.nth([2, 3, 4])

    yTrain = train.index.get_level_values(1).tolist()
    xTrain = train.values.ravel().tolist()
    yTest = test.index.get_level_values(1).tolist()
    xTest = test.values.ravel().tolist()
    cgamma = [
        (2e-3, 1e-2),
        (1e-2, 1e-3),
        (1e-1, 1.1e-4),
        (1e-1, 7e-5),
        (1e+1, 1e-6),
        (1e+2, 1e-7),
    ]
    output = []
    output.append(['C', 'Gamma', 'AN', 'DI', 'FE', 'HA', 'NE', 'SA', 'SU'])
    for C, gamma in cgamma:
        row = [C, gamma]
        print('C = %s \t gamma = %s' % (C, gamma))
        svc = OneVsRestClassifier(SVC(random_state=0,
                                      decision_function_shape='ovr',
                                      C=C,
                                      kernel='rbf',
                                      gamma=gamma),
                                  n_jobs=4)
        svc.fit(xTrain, yTrain)
        yTrue, yPred = yTest, svc.predict(xTest)
        yTrue = np.array(yTrue, dtype=np.unicode_)
        yPred = np.array(yPred, dtype=np.unicode_)
        correct = np.sum(yTrue == yPred)

        print("accuracy: %d/%d = " % (correct, len(yTrue)),
              D('%.2f' % (correct / len(yTrue) * 100)))
        report = classification_report(yTrue, yPred, output_dict=True)
        row.append(report['AN']['precision'])
        row.append(report['DI']['precision'])
        row.append(report['FE']['precision'])
        row.append(report['HA']['precision'])
        row.append(report['NE']['precision'])
        row.append(report['SA']['precision'])
        row.append(report['SU']['precision'])

        output.append(row)

    with open('../csv/%s_precision.csv' % filename, 'w') as csvFile:
        writer = csv.writer(csvFile)
        writer.writerows(output)
Esempio n. 12
0
 def execute(self):
     books = storage.load(self._configuration, 'library')
     fields = set()
     for book in books:
         for field in book.keys():
             if field[0] is not '_':
                 fields.add(field)
     self.out('\n'.join(fields))
     return None, None
Esempio n. 13
0
def get_world_tracking_config(dataset_name, run_name):
    dataset_name, run_name = map(quote, (dataset_name, run_name))
    path = runs_path / "{}_{}".format(
        dataset_name, run_name) / "world_tracking_optimization.pklz"

    if path.is_file():
        wtc = load(path)
        return (wtc.get_dict(), 200)
    else:
        return (NoContent, 404)
Esempio n. 14
0
 def load(self, filename):
     """ save current state of todos list
     Args:
         filename: string
     Returns:
         Boolean
     """
     pyobj = storage.load(filename)
     self._id = pyobj["_id"]
     self.id_to_todo = pyobj["id_to_todo"]
     return True
Esempio n. 15
0
def send_login(login, password, callback):
    res = storage.load('/login', {
        'login': login,
        'password': password,
        'type': 'login',
    })

    if res['status'] == 'OK':
        storage.id = res['id']

    callback.Call(res)
Esempio n. 16
0
def get_levels(callback):
    res = storage.load('/login', {
        'id': storage.id,
        'type': 'levels',
        'login': '',
        'password': '',
    })

    if res['status'] == 'OK':
        storage.level = res['levels']
        callback.Call(res['levels'])
Esempio n. 17
0
def provision_from_storage(key, value, app_name, namespace):
  """Provisions a resource for a property specified from storage."""
  raw_manifest = storage.load(value)

  manifest = yaml.safe_load(raw_manifest)
  if 'metadata' not in manifest:
    manifest['metadata'] = {}
  resource_name = dns1123_name("{}-{}".format(app_name, key))
  manifest['metadata']['name'] = resource_name
  manifest['metadata']['namespace'] = namespace

  return resource_name, add_preprovisioned_labels([manifest], key)
Esempio n. 18
0
def register(login: str, password: str, callback):
    print('Register sending:', login, password)

    res = storage.load(f'/register', {
        'login': login,
        'password': password,
    })

    if (res['status'] == 'OK'):
        storage.id = res['id']

    callback.Call(res['status'])
Esempio n. 19
0
def get():
  data = request.json
  keys=getkeys()
  results = []
  for k in keys : 
    value = load(k)
    match = search(data, value)
    if match > threshold : 
      results.append((value, match))
  result = jsonify({'data': [ i[0] for i in sorted(results, key=lambda tup : tup[-1]) ]})
  print(result.data)
  return result
Esempio n. 20
0
    def wrapper(namespace, repository, *args, **kwargs):
        cfg = config.load()
        mirroring_cfg = cfg.mirroring
        resp = f(namespace, repository, *args, **kwargs)
        if not mirroring_cfg:
            return resp
        source = mirroring_cfg['source']
        tags_cache_ttl = mirroring_cfg.get('tags_cache_ttl',
                                           DEFAULT_CACHE_TAGS_TTL)

        if resp.status_code != 404:
            logger.debug('Status code is not 404, no source '
                         'lookup required')
            return resp

        if not cache.redis_conn:
            # No tags cache, just return
            logger.warning('mirroring: Tags cache is disabled, please set a '
                           'valid `cache\' directive in the config.')
            source_resp = lookup_source(
                flask.request.path, stream=False, source=source
            )
            if not source_resp:
                return resp
            return toolkit.response(data=source_resp.content,
                                    headers=source_resp.headers, raw=True)

        store = storage.load()
        request_path = flask.request.path

        if request_path.endswith('/tags'):
            # client GETs a list of tags
            tag_path = store.tag_path(namespace, repository)
        else:
            # client GETs a single tag
            tag_path = store.tag_path(namespace, repository, kwargs['tag'])

        data = cache.redis_conn.get('{0}:{1}'.format(
            cache.cache_prefix, tag_path
        ))
        if data is not None:
            return toolkit.response(data=data, raw=True)
        source_resp = lookup_source(
            flask.request.path, stream=False, source=source
        )
        if not source_resp:
            return resp
        data = source_resp.content
        cache.redis_conn.setex('{0}:{1}'.format(
            cache.cache_prefix, tag_path
        ), tags_cache_ttl, data)
        return toolkit.response(data=data, headers=source_resp.headers,
                                raw=True)
Esempio n. 21
0
def create_game(callback):
    res = storage.load('/create-game', {
        'id': storage.id,
    })

    if res['status'] == 'OK':
        print('Create Game: respond OK')
        print('Create Game: SID =', res['sid'])
        storage.sid = res['sid']
        storage.run_server()
        res = storage.load(
            '/game', {
                'action': 'connect',
                'id': storage.id,
                'sid': storage.sid,
                'host': f'{storage.host}:{storage.port}',
            })

        if res['status'] == 'OK':
            storage.connection_count = res['connection_count']

    callback.Call(res)
Esempio n. 22
0
def login(login: str, password: str, callback):
    print('Login sending:', login, password)

    res = storage.load('/login', {
        'login': login,
        'password': password,
    })

    print(res)

    if (res['status'] == 'OK'):
        storage.id = res['id']

    callback.Call(res['status'])
Esempio n. 23
0
def storage_status():
    message = ''
    try:
        _storage = storage.load(_config.storage)
        key = toolkit.gen_random_string()
        value = toolkit.gen_random_string()
        _storage.put_content(key, value)
        stored_value = _storage.get_content(key)
        _storage.remove(key)
        if value != stored_value:
            message = 'Set value is different from what was received'
    except Exception as e:
        message = str(e)
    return {'storage': message}
Esempio n. 24
0
    def __init__(self, nickname, server, port=6667):
        SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
        self.ircobj.add_global_handler("all_events", self.dispatcher, 0)

        self.logged_in = False
        
        loghandler.Handler(self)
        self.storage = storage.load()
        self.storage.dirty_cb = self.storage_dirty_cb

        self.ipc = ipc.ListeningConnection(self.ircobj)

        self.timers = []
        Dekisu.instance = self
        Timer.bot = self
Esempio n. 25
0
def shop_select(slider_name, product_name, callback):
    res = storage.load(
        '/shop', {
            'action': 'select',
            'id': storage.id,
            'slider': slider_name,
            'product': product_name,
        })

    if res['status'] == 'OK':
        callback.Call(res)
    else:
        print(
            f'Shop: Error "Bad product select request ({slider_name}, {product_name})"'
        )
Esempio n. 26
0
 def docker_pull(self, namespace, repos):
     # Test pull
     # Docker -> Index
     resp = requests.get('{0}/v1/repositories/{1}/{2}/images'.format(
         self.index_endpoint, namespace, repos),
                         auth=tuple(self.user_credentials),
                         headers={'X-Docker-Token': 'true'})
     self.assertEqual(resp.status_code, 200)
     token = resp.headers.get('x-docker-token')
     # Here we should use the 'X-Endpoints' returned in a real environment
     # Docker -> Registry
     resp = requests.get('{0}/v1/repositories/{1}/{2}/tags/latest'.format(
         self.registry_endpoint, namespace, repos),
                         headers={'Authorization': 'Token ' + token})
     self.assertEqual(resp.status_code, 200, resp.text)
     self.cookies = resp.cookies
     # Docker -> Registry
     image_id = json.loads(resp.text)
     resp = requests.get('{0}/v1/images/{1}/ancestry'.format(
         self.registry_endpoint, image_id),
                         cookies=self.cookies)
     self.update_cookies(resp)
     self.assertEqual(resp.status_code, 200, resp.text)
     ancestry = json.loads(resp.text)
     # We got the ancestry, let's fetch all the images there
     for image_id in ancestry:
         json_data, checksum, blob = self.fetch_image(image_id)
         # check queried checksum and local computed checksum from the image
         # are the same
         tmpfile = StringIO.StringIO()
         tmpfile.write(blob)
         tmpfile.seek(0)
         computed_checksum = checksums.compute_simple(tmpfile, json_data)
         tmpfile.close()
         self.assertEqual(checksum, computed_checksum)
     # Remove image tags
     resp = requests.delete('{0}/v1/repositories/{1}/{2}/tags'.format(
         self.registry_endpoint, namespace, repos),
                            cookies=self.cookies)
     self.assertEqual(resp.status_code, 200, resp.text)
     self.update_cookies(resp)
     # Remove image_id, then parent_id
     store = storage.load()
     store.remove(os.path.join(store.images, self.image_id))
     store.remove(os.path.join(store.images, self.parent_id))
Esempio n. 27
0
def shop_buy(slider_name, product_name, callback):
    res = storage.load(
        '/shop', {
            'action': 'buy',
            'id': storage.id,
            'slider': slider_name,
            'product': product_name,
        })

    if res['status'] == 'OK':
        callback.Call(res)
    elif res['status'] == 'Deprecated':
        callback.Call(res)
        print(f'Shop: Deal deprecated because of small amount')
    else:
        print(
            f'Shop: Error "Bad product select request ({slider_name}, {product_name})"'
        )
Esempio n. 28
0
def connect_game(sid, callback):
    storage.sid = sid
    print('Connect Game: SID =', storage.sid)

    storage.run_server()

    res = storage.load(
        '/game', {
            'action': 'connect',
            'id': storage.id,
            'sid': storage.sid,
            'host': f'{storage.host}:{storage.port}',
        })

    if res['status'] == 'OK':
        storage.connection_count = res['connection_count']

    callback.Call(res)
Esempio n. 29
0
 def __init__(self, configuration):
     term = configuration['terminal']
     self._configuration = configuration
     try:
         db = storage.load(configuration, 'isbndb')
         self._rate = db['rate']
         if self._rate.date < date.today():
             term.debug("Resetting limit, expired %s" % self._rate.date)
             self._rate = Rate(limit=configuration['isbndb']['limit'],
                               date=date.today())
     except:
         self._rate = Rate(limit=configuration['isbndb']['limit'],
                           date=date.today())
     if self._rate is not None:
         term.debug('%s ISBNDB requests permitted on %s.',
                    self._rate.limit, self._rate.date)
     else:
         term.debug('ISBNDB requests not limited.')
Esempio n. 30
0
 def __init__(self, configuration):
     term = configuration['terminal']
     self._configuration = configuration
     try:
         db = storage.load(configuration, 'isbndb')
         self._rate = db['rate']
         if self._rate.date < date.today():
             term.debug("Resetting limit, expired %s" % self._rate.date)
             self._rate = Rate(limit=configuration['isbndb']['limit'],
                               date=date.today())
     except:
         self._rate = Rate(limit=configuration['isbndb']['limit'],
                           date=date.today())
     if self._rate is not None:
         term.debug('%s ISBNDB requests permitted on %s.',
                    self._rate.limit, self._rate.date)
     else:
         term.debug('ISBNDB requests not limited.')
Esempio n. 31
0
 def docker_pull(self, namespace, repos):
     # Test pull
     # Docker -> Index
     resp = requests.get('{0}/v1/repositories/{1}/{2}/images'.format(
         self.index_endpoint, namespace, repos),
         auth=tuple(self.user_credentials),
         headers={'X-Docker-Token': 'true'})
     self.assertEqual(resp.status_code, 200)
     token = resp.headers.get('x-docker-token')
     # Here we should use the 'X-Endpoints' returned in a real environment
     # Docker -> Registry
     resp = requests.get('{0}/v1/repositories/{1}/{2}/tags/latest'.format(
                         self.registry_endpoint, namespace, repos),
                         headers={'Authorization': 'Token ' + token})
     self.assertEqual(resp.status_code, 200, resp.text)
     self.cookies = resp.cookies
     # Docker -> Registry
     image_id = json.loads(resp.text)
     resp = requests.get('{0}/v1/images/{1}/ancestry'.format(
         self.registry_endpoint, image_id),
         cookies=self.cookies)
     self.update_cookies(resp)
     self.assertEqual(resp.status_code, 200, resp.text)
     ancestry = json.loads(resp.text)
     # We got the ancestry, let's fetch all the images there
     for image_id in ancestry:
         json_data, checksum, blob = self.fetch_image(image_id)
         # check queried checksum and local computed checksum from the image
         # are the same
         tmpfile = StringIO.StringIO()
         tmpfile.write(blob)
         tmpfile.seek(0)
         computed_checksum = checksums.compute_simple(tmpfile, json_data)
         tmpfile.close()
         self.assertEqual(checksum, computed_checksum)
     # Remove image tags
     resp = requests.delete('{0}/v1/repositories/{1}/{2}/tags'.format(
         self.registry_endpoint, namespace, repos), cookies=self.cookies)
     self.assertEqual(resp.status_code, 200, resp.text)
     self.update_cookies(resp)
     # Remove image_id, then parent_id
     store = storage.load()
     store.remove(os.path.join(store.images, self.image_id))
     store.remove(os.path.join(store.images, self.parent_id))
Esempio n. 32
0
def load_graph(fname):
    """ Loads a graph from the given file
    """
    sav = storage.load(fname)

    ver = sav['version']
    
    SAVE_FORMAT_VERSION = 5
    if ver > SAVE_FORMAT_VERSION:
        print "File format version {} incompatible!".format(ver)
        sys.exit()

    leaf = sav['leaf']
    tree = sav['tree']
    filt = sav['filtration']
    remv = sav['removed-edges']
    prun = sav['pruned']

    return leaf, tree, filt, remv, prun
Esempio n. 33
0
def format_tracks_from_file(tpath, tracks_format, coords='pixels'):
    if tracks_format == 'custom_text':
        convert_track = convert_track_custom_text
    elif tracks_format == 'csv':
        convert_track = convert_track_csv
    else:
        raise (ValueError('Tracks format {} is invalid'.format(tracks_format)))

    if tpath.is_file():
        tracks = load(tpath)
        text = ""

        tracks.sort(key=lambda x: x.id)

        for i_track, track in enumerate(tracks):
            text += convert_track(track, coords, i_track)

        return text
    else:
        raise (FileNotFoundError('Track {} not found'.format(tpath)))
Esempio n. 34
0
        def wrapper(*args, **kwargs):
            cfg = config.load()
            mirroring_cfg = cfg.mirroring
            resp = f(*args, **kwargs)
            if not mirroring_cfg:
                return resp
            source = mirroring_cfg['source']
            if index_route:
                source = mirroring_cfg.get('source_index', source)
            logger.debug('Source provided, registry acts as mirror')
            if resp.status_code != 404:
                logger.debug('Status code is not 404, no source '
                             'lookup required')
                return resp
            source_resp = lookup_source(
                flask.request.path, stream=stream, source=source
            )
            if not source_resp:
                return resp

            store = storage.load()

            if not stream:
                logger.debug('JSON data found on source, writing response')
                resp_data = source_resp.content
                if cache:
                    store_mirrored_data(
                        resp_data, flask.request.url_rule.rule, kwargs,
                        store
                    )
                return toolkit.response(
                    data=resp_data,
                    headers=source_resp.headers,
                    raw=True
                )
            logger.debug('Layer data found on source, preparing to '
                         'stream response...')
            layer_path = store.image_layer_path(kwargs['image_id'])
            return _handle_mirrored_layer(source_resp, layer_path, store)
Esempio n. 35
0
def traintestsplit(filepath):
    """ split data into train/test subsets
    """
    proba1 = load(filepath)
    processedDF = pd.DataFrame(proba1)
    
    # sort emotion dictionary and get its values
    processedDF[1] = processedDF[1].apply(lambda y: sorted(y.items(), key=lambda x: x[0]))
    processedDF[1] = processedDF[1].apply(lambda y: list(map(lambda x: x[1], y)))
    
    processedDF.columns = ['name', 'data', 'emotion']
    processedDF = processedDF.sort_values(by=['name', 'emotion'])
    grouped = processedDF.groupby(['name', 'emotion'])
    
    # split train and test data
    train = grouped.nth([0, 1])
    test = grouped.nth([2, 3, 4])
    _yTrain = train.index.get_level_values(1).tolist()
    _xTrain = train.values.ravel().tolist()
    _yTest = test.index.get_level_values(1).tolist()
    _xTest = test.values.ravel().tolist()
    return _xTrain, _yTrain, _xTest, _yTest
Esempio n. 36
0
def extractProbabilities(filePath, C, gamma):
    """ build dataset containing the probabilities of each class of every image
        in the test set from the original dataset
    """
    filename = filePath.stem
    processedJAFFE = load(str(filePath))
    processedDF = pd.DataFrame(processedJAFFE)
    processedDF.columns = ['name', 'data', 'emotion']
    processedDF = processedDF.sort_values(by=['name', 'emotion'])
    grouped = processedDF.groupby(['name', 'emotion'])

    # extract train data
    train = grouped.nth([0, 1])
    yTrain = train.index.get_level_values(1).tolist()
    xTrain = train.values.ravel().tolist()

    # train our model
    svc = OneVsRestClassifier(SVC(random_state=0,
                                  decision_function_shape='ovr',
                                  C=C,
                                  kernel='rbf',
                                  gamma=gamma,
                                  probability=True),
                              n_jobs=4)
    svc.fit(xTrain, yTrain)

    classes = svc.classes_
    for index, (_name, data, _emotion) in enumerate(processedJAFFE):
        proba = svc.predict_proba(data.reshape(1, -1))[0]
        processedJAFFE[index][1] = dict(zip(classes, proba))
        pprint(processedJAFFE[index])
        print(svc.predict(data.reshape(1, -1)))
        print('-' * 50)
    newFilename = filename + '_probabilities_c_%s_gamma_%s' % (C, gamma)
    print('saving file:', '/data/%s' % newFilename)
    save('../data/probabilities/%s' % newFilename, processedJAFFE)
Esempio n. 37
0
import storage

count = storage.load()
print "Total patterns: ", len(storage.TREE.patterns)
print "Total messages: ", count
 def setUp(self):
     self._storage = storage.load("selectel")
Esempio n. 39
0
 def _load_map_points_from_model(self):
     model = storage.load(self._args.model)[0]
     return model.normalized_observed_reductions
Esempio n. 40
0
 def setUp(self):
     self._storage = storage.load('local')
Esempio n. 41
0
    saveload_group = parser.add_mutually_exclusive_group()
    saveload_group.add_argument('-l', '--load', help="Load saved analyzed"
            " data instead of graph file", action='store_true')
    saveload_group.add_argument('-s', '--save', 
            help="Save analyzed data in pickle",
            type=str, default="")
    saveload_group.add_argument('-r', '--reanalyze', 
            help="Reanalyzes the given"
            " file, i.e. does everything except calculating"
            " the tree layout.", action='store_true')

    args = parser.parse_args()
    print "Loading file."
    
    if args.load or args.reanalyze:
        sav = storage.load(args.INPUT)

        horton_strahler = sav['horton-strahler-index']
        shreve = sav['shreve-index']
        tree_asymmetry = sav['tree-asymmetry']
        tree_asymmetry_no_ext = sav['tree-asymmetry-no-ext']
        areas = sav['tree-areas']
        marked_tree = sav['marked-tree']
        marked_tree_no_ext = sav['marked-tree-no-ext']
        tree_pos = sav['tree-positions']

        graph_file = sav['graph-file']

        leaf, tree, filt, remv, prun = load_graph(graph_file)
    else:
        graph_file = args.INPUT
Esempio n. 42
0
 def setUp(self):
     self._cfg = config.load()
     conn = boto.connect_s3(self._cfg.s3_access_key, self._cfg.s3_secret_key)
     conn.create_bucket(self._cfg.s3_bucket)
     self._storage = storage.load('s3')
Esempio n. 43
0
 def load_memory():
     filename = QtGui.QFileDialog.getLoadFileName(self, "Load memory", filter="Memory (*.mem)")
     if filename:
         memory.set_frames(storage.load(filename))
         recall_behavior.reset()
Esempio n. 44
0
 def load_state(self):
     self.voted_for, self.current_term, self.log = storage.load(self.id)
     # print "voted for", self.voted_for
     print self.current_term
     print self.log
Esempio n. 45
0
 def setUp(self):
     self._storage = storage.load('s3')
Esempio n. 46
0
        improvise_params,
        preferred_location,
        MAX_NOVELTY)

improvise_params = ImproviseParameters()
improvise_params.set_values_from_args(args)
improvise_behaviors = {
    model_name: _create_improvise_behavior(model_name)
    for model_name in MODELS}

set_up_logging()

index = 0
memory = Memory()
if args.memory:
    memory.set_frames(storage.load(args.memory))
recall_behavior = RecallBehavior()
master_behavior = MasterBehavior() 
avatar = Avatar(index, master_entity, master_behavior)

def clear_memory():
    recall_behavior.reset()
    memory.clear()
            
avatars = [avatar]

application = Application(
    students[args.model], avatars, args, receive_from_pn=True, create_entity=create_entity, z_up=Z_UP)

set_model(args.model)
set_max_angular_step(args.max_angular_step)
Esempio n. 47
0
def main():
    if len(sys.argv) == 1:
        parser.print_help()
        exit
    args = parser.parse_args()

    if args.fib:
        for x in fib.generate_fib_sequence(args.fib):
            print x

    if args.sto:
        storage = storage.Storage()
        while input != "exit": 
            input = raw_input("Enter command:")
            items = input.split()
            if len(items) == 0:
                print "Command required"
                continue
            command = items[0]
            del(items[0])
            if command == "add":
                for x in items:
                    storage.add(x)
            elif command == "remove":
                for x in items:
                    storage.remove(x)
            elif command == "find":
                for x in items:
                    storage.find(x)
            elif command == "list":
                storage.list()
            elif command == "save":
                print items[0]
                storage.save(items[0])
            elif command == "load":
                storage.load(items[0])
            elif command == "grep":
                storage.grep(items[0])    
            elif command != "exit":
                print "Invalid command"

    if args.text:
        try:
            with open(args.text, "r") as file:
                text = file.read()
        except IOError:
            print "File {} doesn't exist".format(args.text)

        print "Average wordcount:{}".format(text_statistics.average_wordcount(text))
        print "Median wordcount: {}".format(text_statistics.median_wordcount(text))
        print "Word count: {}".format(text_statistics.count_words(text))
        try: 
            n = raw_input("Enter N:")
            number = raw_input("Enter K:")
            print "Top {0} of {1}-grams is:".format(number, n) 
            print text_statistics.top_ngrams(text, int(n), int(number))
        except ValueError:
            print "Invalid input. Integer required!"

    if args.sort:
        try: 
            with open(args.sort, "r") as file:
                ara = file.read().replace("\n", "").replace(" ", "").split(",")
                ara = map(int, ara)
        except IOError:
            print "File {} doesn't exist".format(args.sort)
        print "quick sort"
        sort.demo(ara, sort.quick_sort)
        print "merge sort"
        sort.demo(ara, sort.merge_sort)
        print "radix sort"
        sort.demo(ara, sort.radix_sort)
Esempio n. 48
0
#!/usr/bin/python

import sys
import resources
import storage
from twisted.internet.protocol import DatagramProtocol
from twisted.web import server
from twisted.application import service, internet
from twisted.python import log
from django.conf import settings

storage.load()

settings.configure(TEMPLATE_DIRS=('templates',))

class UDPServer(DatagramProtocol):
	def datagramReceived(self, datagram, address):
 		data = datagram.strip().decode('cp1251')
		storage.put(data)

class Service(service.Service):
	def startService(self):
		log.startLogging(sys.stdout)
		service.Service.startService(self)
		log.msg('ErrorDigest server started')

_topService = service.MultiService()

_service = Service()
_service.setServiceParent(_topService)
 def setUp(self):
     self._storage = storage.load('swift')
     self._storage._swift_connection.put_container(
         self._storage._swift_container
     )
Esempio n. 50
0
 def setUp(self):
     self._storage = storage.load("local")
Esempio n. 51
0
import datetime
import functools
import logging
import time

import flask
import simplejson as json

import checksums
import storage
import toolkit

from .app import app


store = storage.load()
logger = logging.getLogger(__name__)


def require_completion(f):
    """This make sure that the image push correctly finished."""
    @functools.wraps(f)
    def wrapper(*args, **kwargs):
        if store.exists(store.image_mark_path(kwargs['image_id'])):
            return toolkit.api_error('Image is being uploaded, retry later')
        return f(*args, **kwargs)
    return wrapper


def set_cache_headers(f):
    """Returns HTTP headers suitable for caching."""
Esempio n. 52
0
 def setUp(self):
     self._storage = storage.load('gcs')