Example #1
0
 def __init__(self):
     self.parser.add_argument('-a',
                              dest='algorithm',
                              type=int,
                              help=stringify(algorithms))
     self.parser.add_argument('-p',
                              dest='problem',
                              type=int,
                              help=stringify(problems))
     self.parser.add_argument('-g',
                              default=10000,
                              dest='n_generations',
                              type=int,
                              help='number of generations, default 10000')
     self.parser.add_argument('-o',
                              default=50,
                              dest='population',
                              type=int,
                              help='population size, default 50')
     self.parser.add_argument('-r',
                              default=0.5,
                              dest='probability',
                              type=float,
                              help='operation probability, default 0.5')
     self.args = self.parser.parse_args().__dict__
Example #2
0
    def on_message(self, msg):
        try:
            dl_msg_type = msg[1]
            if dl_msg_type == DataLoggerMessages.EXTRACT_GENERAL:
                self.uid = msg[2]
                self.n_columns = parse_uint32(msg[3:7])
                self.n_entries = parse_uint32(msg[7:11])
                self.schema_name = stringify(msg[11:])
            elif dl_msg_type == DataLoggerMessages.EXTRACT_COLUMN:
                column_idx = msg[2]
                self.column_done[column_idx] = True
                self.column_type[column_idx] = msg[3]
                self.column_name[column_idx] = stringify(msg[4:])
            elif dl_msg_type == DataLoggerMessages.EXTRACT_DATA:
                entry_idx = parse_uint32(msg[2:6])
                self.entry_done[entry_idx] = True
                entry = []
                offset = 6
                for column_type in self.column_type:
                    if column_type == ColumnType.FLOAT:
                        entry.append(parse_float(msg[offset:(offset+4)]))
                        offset += 4
                    if column_type == ColumnType.UINT32:
                        entry.append(parse_uint32(msg[offset:(offset+4)]))
                        offset += 4
                self.entry[entry_idx] = tuple(entry)

            if (self.schema_name is not None and
                    all(self.column_done) and
                    all(self.entry_done)):
                self.done = True
        except Exception as e:
            print(e)
            self.failure = True
Example #3
0
    def create_splits(self, test_prop=.1, dev_prop=.1):
        """
        Takes the train data and creates train-dev-test splits:
            * if a `test_prop` (float) is passed, a test set
              will be created and saved (for later comparisons).
            * if a `dev_prop` (float) is passed, a dev set will
              be created on the basis of the training material 
              which remains after creating the test set.
        """
        if test_prop:
            self.X_train, self.X_test, self.Y_train, self.Y_test, \
                self.train_tokens, self.test_tokens = \
                train_test_split(self.X_train, self.Y_train,\
                    self.train_tokens, test_size=dev_prop, random_state=42982)

            # save test data for comparison to Bouma et al:
            gt = utils.pred_to_classes(self.Y_test)
            with open('../data/test_gold.txt', 'w') as f:
                for i in [utils.stringify(o, p) for o, p in \
                            zip(self.test_tokens, gt)]:
                    f.write(i+'\n')
            with open('../data/test_input.txt', 'w') as f:
                for i in self.test_tokens:
                    f.write(''.join(i)+'\n')

        if dev_prop:
            self.X_train, self.X_dev, self.Y_train, self.Y_dev,\
                self.train_tokens, self.dev_tokens = \
                train_test_split(self.X_train, self.Y_train,\
                    self.train_tokens, test_size=dev_prop, random_state=4767)
Example #4
0
 def episodes_list_Menu():
     xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
     url = vars.params.get("url")
     serie_title = vars.params.get("serie_title")
     seasonidx = int(vars.params.get("seasonidx"))
     response = utils.stringify(urllib2.urlopen(url).read())
     utils.log("episodeListMenu: response: %s" % response, xbmc.LOGDEBUG)
     jsonresponse = json.loads(response)
     episodes = jsonresponse['results']['seasons'][seasonidx]['episodes']
     for episode in episodes:
         name = episode['title']
         release_date = episode['releaseDate'].split('T')[0]
         plot = episode['description']
         runtime = episode['program']['runtimeHours'].split(':')
         seconds = int(runtime[-1])
         minutes = int(runtime[-2])
         duration = minutes * 60 + seconds
         if len(runtime) == 3:
             hours = int(runtime[0])
             duration = duration + hours * 3600
         thumb = episode['image']
         infoList = {
                 "mediatype": "episode",
                 "title": name,
                 "TVShowTitle": serie_title,
                 "duration": duration,
                 "plot": plot,
                 "aired":str(release_date)
                 }
         common.addListItem(url=str(episode['program']['id']), name=name, mode='nba_tv_play_serieepisode', iconimage=thumb, infoList=infoList)
     xbmcplugin.endOfDirectory(handle=int(sys.argv[1]))
Example #5
0
 def season_Menu():
     xbmcplugin.setContent(int(sys.argv[1]), 'seasons')
     slug = vars.params.get("slug")
     serie_title = vars.params.get("serie_title")
     page = int(vars.params.get("page", 1))
     per_page = 20
     utils.log("seasonListMenu: tag is %s, page is %d" % (slug, page), xbmc.LOGDEBUG)
     base_url = "https://content-api-prod.nba.com/public/1/endeavor/video-list/nba-tv-series/%s?"
     params = urlencode({
         "sort": "releaseDate desc",
         "page": page,
         "count": per_page
     })
     url = base_url % slug + params
     response = utils.stringify(urllib2.urlopen(url).read())
     utils.log("seasonListMenu: response: %s" % response, xbmc.LOGDEBUG)
     jsonresponse = json.loads(response)
     seasonicon = jsonresponse['results']['series']['coverImage']['portrait']
     # idx is the index of the season in the json data
     # to do: avoid fetching the same page for season and episodes
     idx = 0
     for season in jsonresponse['results']['seasons']:
         name = 'Season %s' % season['season']
         common.addListItem(name, '',
                 'nba_tv_episode',
                 seasonicon,
                 isfolder=True,
                 customparams={'url':url, 'seasonidx': idx, 'serie_title': serie_title})
         idx = idx +1
Example #6
0
async def get_students():
    """/student route."""
    students = current_app.config['student_manager']
    data = await students.get()
    if not data:
        return '', HTTPCode.NOTFOUND
    return stringify(data), HTTPCode.OK
Example #7
0
async def get_all_tasks(auth_obj):
    """Route that gets all the tasks in the database. Any authentication necessary.
    Teacher auth -> all tasks returned
    Student auth -> student's tasks returned
    No auth -> BADREQUEST"""
    tasks = current_app.config['task_manager']
    is_completed = request.args.get(
        "is_completed"
    )  # Should be set to True if client wants the "has_completed" attribute
    is_mine = request.args.get(
        "mine"
    ) == "True"  # Used when a teacher wants to get their own tasks TODO: Perhaps make this a default thing - make default teacher funcitonaity return only the teacher's tasks

    if type(auth_obj) == Student:
        # Get only student's tasks
        if is_completed == "True":
            data = await tasks.get(student_id=auth_obj.id, get_completed=True)
        else:
            data = await tasks.get(student_id=auth_obj.id)
    else:
        # Get the teacher's tasks (all the tasks from the database)
        if is_mine:
            data = await tasks.get(teacher_id=auth_obj.id)
        else:
            data = await tasks.get()

    if data:
        return stringify(data), HTTPCode.OK
    else:
        return '', HTTPCode.NOTFOUND
def main():
    # #params:
    config = {'learning_rate': 0.001,
              'nr_epochs': 200,
              'batch_size': 8}
    batch_size = 1
    model_name = stringify(["model_"] +
                           [str(k)+'_'+str(config[k]) for k in config
                            if type(config[k]) == int or
                            type(config[k]) == float])+'.pt'
    model_path = ''
    # data_path = ''
    
    # train_dataset = dataset()
    # train_loader = DataLoader(train_dataset, batch_size=batch_size,
    #                           shuffle=True)
    test_dataset = dataset()
    val_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    model_inference = model_def()
    model_inference.load_state_dict(torch.load(
                   os.path.join(model_path, model_name), map_location=device))
    model_inference.eval()

    labels = np.array([])
    outputs = np.array([])
    for batch_idx, (data, target) in enumerate(val_loader):
        prob = model_inference.forward_inference(data)
        labels = np.append(labels, target.data.to('cpu'))
        outputs = np.append(outputs, prob.data.to('cpu'))
    print(labels)
    print(outputs)
Example #9
0
def header_img_to_latex(elem, doc):
    """
    Convert Header -> LaTeX RawInline
    """

    if isinstance(elem, Header):
        modified = False
        # Will contain the elements without the Images, replaced by LaTeX RawInlines
        new_content = []

        stringified = stringify(
            elem).strip()  # before we include the latex includegraphics

        for item in elem.content:
            if isinstance(item, Image):
                modified = True
                convert_px_to_in(96, item)
                new_content.append(
                    RawInline('\\raisebox{-0.2\\height}{' +
                              generate_includegraphics(item) + '}\\enspace',
                              format='tex'))
            else:
                new_content.append(item)

        if modified:
            elem.content = new_content
            return generate_latex_header(elem, stringified)
Example #10
0
async def task_completed(id, auth_obj):
    """Route that sets a task to completed for a given student. This route must have a 
    'completed' field in the form-data if the task is completed. If not given, this defaults to False."""
    if not id.isdigit():
        return '', HTTPCode.BADREQUEST

    tasks = current_app.config['task_manager']
    task_id = int(id)

    if request.method == "POST":
        form = await request.form
        completed = True if form.get("completed") == "true" else False

        try:
            await tasks.student_completed(completed, auth_obj.id, task_id)
            return '', HTTPCode.OK
        except PermissionError:
            return '', HTTPCode.UNAUTHORIZED  # Unauthorized to change other peoples task statuses

    elif request.method == "GET":
        marks = current_app.config['mark_manager']
        mark = await marks.get(student_id=auth_obj.id, task_id=task_id)
        if mark:
            return stringify([mark]), HTTPCode.OK
        else:
            return '', HTTPCode.NOTFOUND
Example #11
0
def get_status():
    '''
    Returns either a status object or its text represenation when given a valid
    token string.
    curl localhost:8081:/status?token=4f7b38cf02f0ba5c38000000
    curl 'localhost:8081:/status?token=4f7b38cf02f0ba5c38000000&human_readable=true'
    '''
    token = utils.str_to_obj(request.query.get('token'))
    if not token:
        raise HTTPResponse('Please specify a valid token.\n', 400)

    # Output formatting
    status = utils.get_status(token, no_id=True)
    human_readable = request.query.get('human_readable')
    if status:
        if human_readable:  # Return text depending on the deployment's status
            if status['Deployment finished']:
                return 'Deployment finished.\n'
            elif status['Error occured']:
                return 'Error occured during deployment.\n'
            else:
                try:
                    return '%s.\n' % status['running step']
                except KeyError:
                    return 'Error occured before the beginning of deployment.\n'
        else:  # Just return the whole status object
            return utils.stringify(status)
    else:
        raise HTTPResponse('No status found for this token\n', 404)
Example #12
0
def html_img_to_image(elem, doc):
    """
    Apply the translations, see module doc
    """

    # We are only interested in image elements
    if not isinstance(elem, Image):
        return

    if not len(elem.content):
        return RawInline(etree.tostring(E.img(src=elem.url, **elem.attributes),
                                        encoding='utf-8',
                                        xml_declaration=False,
                                        pretty_print=False).decode('utf-8'),
                         format='html')
    html = E(
        "figure",
        E.img(CLASS("figure"), src=elem.url, **elem.attributes),
        E("figcaption", CLASS("caption"), stringify(elem)),
    )
    if hasattr(elem.content[0], 'identifier'):
        html.set('id', elem.content[0].identifier)
    return RawInline(etree.tostring(html,
                                    encoding='utf-8',
                                    xml_declaration=False,
                                    pretty_print=True).decode('utf-8'),
                     format='html')
Example #13
0
def get_status():
    '''
    Returns either a status object or its text represenation when given a valid
    token string.
    curl localhost:8081:/status?token=4f7b38cf02f0ba5c38000000
    curl 'localhost:8081:/status?token=4f7b38cf02f0ba5c38000000&human_readable=true'
    '''
    token = utils.str_to_obj(request.query.get('token'))
    if not token:
        raise HTTPResponse('Please specify a valid token.\n', 400)

    # Output formatting
    status = utils.get_status(token, no_id=True)
    human_readable = request.query.get('human_readable')
    if status:
        if human_readable: # Return text depending on the deployment's status
            if status['Deployment finished']:
                return 'Deployment finished.\n'
            elif status['Error occured']:
                return 'Error occured during deployment.\n'
            else:
                try:
                    return '%s.\n' % status['running step']
                except KeyError:
                    return 'Error occured before the beginning of deployment.\n'
        else: # Just return the whole status object
            return utils.stringify(status)
    else:
        raise HTTPResponse('No status found for this token\n', 404)
Example #14
0
    def create_splits(self, test_prop=.1, dev_prop=.1):
        """
        Takes the train data and creates train-dev-test splits:
            * if a `test_prop` (float) is passed, a test set
              will be created and saved (for later comparisons).
            * if a `dev_prop` (float) is passed, a dev set will
              be created on the basis of the training material 
              which remains after creating the test set.
        """
        if test_prop:
            self.X_train, self.X_test, self.Y_train, self.Y_test, \
                self.train_tokens, self.test_tokens = \
                train_test_split(self.X_train, self.Y_train,\
                    self.train_tokens, test_size=dev_prop, random_state=42982)

            # save test data for comparison to Bouma et al:
            gt = utils.pred_to_classes(self.Y_test)
            with open('../data/test_gold.txt', 'w') as f:
                for i in [utils.stringify(o, p) for o, p in \
                            zip(self.test_tokens, gt)]:
                    f.write(i + '\n')
            with open('../data/test_input.txt', 'w') as f:
                for i in self.test_tokens:
                    f.write(''.join(i) + '\n')

        if dev_prop:
            self.X_train, self.X_dev, self.Y_train, self.Y_dev,\
                self.train_tokens, self.dev_tokens = \
                train_test_split(self.X_train, self.Y_train,\
                    self.train_tokens, test_size=dev_prop, random_state=4767)
Example #15
0
async def create_teacher():
    """Creates a new teacher."""
    data = await request.form
    teachers = current_app.config['teacher_manager']

    try:
        if not is_password_sufficient(data['password']):
            return '', HTTPCode.BADREQUEST
        forename = data.get("forename")
        surname = data.get("surname")
        username = data.get("username")
        title = data.get("title")
        password = data.get("password")
        if not (forename and surname and title and password):
            return '', HTTPCode.BADREQUEST

        await teachers.create(forename, surname, username if username else "",
                              title, password)
        all_teachers = await teachers.get()
        teacher = max(all_teachers,
                      key=lambda x: x.id)  # Returns the newest teacher
        return stringify([teacher]), HTTPCode.CREATED, {
            "Location": bp.url_prefix + "/" + str(teacher.id)
        }
    except UsernameTaken:
        return '', HTTPCode.BADREQUEST  # Username taken
Example #16
0
 def __str__(self):
     if self.cla is not None:
         s = ".{}".format(self.cla)
     elif self.elem is not None:
         s = "#{}".format(self.elem.id)
     else:
         s = "#foo"
     return stringify(s)
Example #17
0
async def username_taken():
    """Route that returns true if the username is taken. Requires some type of authentication."""
    data = await request.form
    username = data.get("username")
    if not username:
        return '', HTTPCode.BADREQUEST
    taken = await current_app.config['student_manager'].is_username_taken(username)
    return stringify([taken]), HTTPCode.OK
Example #18
0
 def __str__(self):
     s = "{ "
     for prop in self.props:
         s += "\"{}\": [".format(prop)
         for value in self.props[prop]:
             s += stringify(str(value)) + ", "
         s += "], "
     s += " }"
     return s
Example #19
0
async def get_group(id):
    if not id.isdigit():
        return '', HTTPCode.BADREQUEST

    groups = current_app.config['group_manager']
    data = await groups.get(group_id=int(id))
    if not data:
        return '', HTTPCode.NOTFOUND
    return stringify([data]), HTTPCode.OK
Example #20
0
async def get_task(id):
    """Route that gets a task from the database. Any authentication necessary."""
    if not id.isdigit():
        return '', HTTPCode.BADREQUEST

    tasks = current_app.config['task_manager']
    task = await tasks.get(id=int(id))
    if not task:
        return '', HTTPCode.NOTFOUND
    return stringify([task]), HTTPCode.OK
Example #21
0
def print_routes():
    '''
    Kinda HATEOAS ;).
    See http://article.gmane.org/gmane.comp.python.bottle.general/480/match=routes+list
    curl localhost:8081/
    '''
    routes = []
    for route in default_app().routes:
        routes.append('%s %s' % (route.method, route.rule))
    return utils.stringify(routes)
Example #22
0
def generate_latex_header(elem, stringified):
    """
    Generate a LaTeX header
    """

    levels = ['section', 'subsection', 'subsubsection', 'paragraph']

    return RawBlock('\\%s[%s]{%s}\n' %
                    (levels[elem.level - 1], stringified, stringify(elem)),
                    format='tex')
Example #23
0
def print_routes():
    '''
    Kinda HATEOAS ;).
    See http://article.gmane.org/gmane.comp.python.bottle.general/480/match=routes+list
    curl localhost:8081/
    '''
    routes = []
    for route in default_app().routes:
        routes.append('%s %s' % (route.method, route.rule))
    return utils.stringify(routes)
Example #24
0
def find_params(model, feature_set, y, subsample=None, grid_search=False):
    """
    Return parameter set for the model, either predefined
    or found through grid search.
    """
    model_name = model.__class__.__name__
    params = INITIAL_PARAMS.get(model_name, {})
    y = y if subsample is None else y[subsample]

    try:
        with open('saved_params.json') as f:
            saved_params = json.load(f)
    except IOError:
        saved_params = {}

    if (grid_search and model_name in PARAM_GRID
            and stringify(model, feature_set) not in saved_params):
        X, _ = get_dataset(feature_set, subsample, [0])
        clf = GridSearchCV(model,
                           PARAM_GRID[model_name],
                           cv=10,
                           n_jobs=6,
                           scoring="roc_auc")
        clf.fit(X, y)
        logger.info("found params (%s > %.4f): %s",
                    stringify(model, feature_set), clf.best_score_,
                    clf.best_params_)
        params.update(clf.best_params_)
        saved_params[stringify(model, feature_set)] = params
        with open('saved_params.json', 'w') as f:
            json.dump(saved_params,
                      f,
                      indent=4,
                      separators=(',', ': '),
                      ensure_ascii=True,
                      sort_keys=True)
    else:
        params.update(saved_params.get(stringify(model, feature_set), {}))
        if grid_search:
            logger.info("using params %s: %s", stringify(model, feature_set),
                        params)

    return params
Example #25
0
async def get_task_marks(id):
    """Gets all the marks avaliable for the given task. This route can be used to see who has completed a task, too."""
    if not id.isdigit():
        return '', HTTPCode.BADREQUEST

    marks = current_app.config['mark_manager']
    data = await marks.get(task_id=int(id))
    if not data:
        return '', HTTPCode.NOTFOUND
    else:
        return stringify([data]), HTTPCode.OK
Example #26
0
async def get_group_tasks(id):
    """Route that gets all the tasks relating to a group. Any authentication level needed."""
    if not id.isdigit():
        return '', HTTPCode.BADREQUEST

    tasks = current_app.config['task_manager']
    data = await tasks.get(group_id=int(id))
    if not data:
        return '', HTTPCode.NOTFOUND
    else:
        return stringify(data), HTTPCode.OK
Example #27
0
async def teacher_get_current_feedback(id):
    """Route that returns pre-existing feedback for a student and a task. The student_id is given as a query string parameter under `student`."""
    student_id = request.args.get("student")
    if not id.isdigit() or not student_id.isdigit():
        return '', HTTPCode.BADREQUEST

    marks = current_app.config['mark_manager']
    mark = await marks.get(student_id=int(student_id), task_id=int(id))
    if mark:
        return stringify([mark]), HTTPCode.OK
    else:
        return '', HTTPCode.NOTFOUND
Example #28
0
 def dumpDatabase():
     collections = DatabaseManagement.db.collection_names()
     dump = {}
     for i, collection_name in enumerate(collections):
         col = getattr(DatabaseManagement.db, collections[i])
         collection = col.find()
         dump[collection_name] = []
         for document in collection:
             for attribute in document:
                 document[attribute] = stringify(document[attribute])
             dump[collection_name].append(document)
     return dump
Example #29
0
async def get_group_students(id):
    """Get all the students in a given group."""
    if not id.isdigit():
        return '', HTTPCode.BADREQUEST

    groups = current_app.config['group_manager']
    group = await groups.get(group_id=int(id))
    if not group:
        return '', HTTPCode.NOTFOUND

    data = await groups.students(int(id))
    return stringify(data), HTTPCode.OK
Example #30
0
 def nba_tv_videoPlay():
     xbmcplugin.setContent(int(sys.argv[1]), 'videos')
     slug = vars.params.get("slug")
     page = int(vars.params.get("page", 1))
     per_page = 22
     utils.log("nba_tv_videoPlay: collection is %s, page is %d" % (slug, page), xbmc.LOGDEBUG)
     base_url = "https://content-api-prod.nba.com/public/1/endeavor/video-list/collection/%s?"
     params = urlencode({
         "sort": "releaseDate desc",
         "page": page,
         "count": per_page
     })
     url = base_url % slug + params
     utils.log("nba_tv_videoPlay: %s: url of collection is %s" % (slug, url), xbmc.LOGDEBUG)
     response = utils.stringify(urllib2.urlopen(url).read())
     utils.log("nba_tv_videoPlay: response: %s" % response, xbmc.LOGDEBUG)
     jsonresponse = json.loads(response)
     for video in jsonresponse['results']['videos']:
         name = video['title']
         entitlement = video['entitlements']
         release_date = video['releaseDate'].split('T')[0]
         plot = video['description']
         thumb = video['image']
         runtime = video['program']['runtimeHours'].split(':')
         seconds = int(runtime[-1])
         minutes = int(runtime[-2])
         duration = minutes * 60 + seconds
         if len(runtime) == 3:
             hours = int(runtime[0])
             duration = duration + hours * 3600
         infoList = {
                 "mediatype": "video",
                 "title": name,
                 "duration": duration,
                 "plot": plot,
                 "aired":str(release_date)
                 }
         if entitlement == 'free':
             common.addListItem(url=str(video['program']['id']), name=name, mode='videoplay', iconimage=thumb, infoList=infoList)
         else:
             common.addListItem(url=str(video['program']['id']), name=name, mode='nba_tv_play_serieepisode', iconimage=thumb, infoList=infoList)
     if vars.params.get("pagination") and page+1 <= jsonresponse['results']['pages']:
         next_page_name = xbmcaddon.Addon().getLocalizedString(50008)
 
         # Add "next page" link
         custom_params = {
             'slug': slug,
             'page': page + 1,
             'pagination': True
         }
         common.addListItem(next_page_name, '', 'nba_tv_videolist', '', True, customparams=custom_params)    
     xbmcplugin.endOfDirectory(handle=int(sys.argv[1]))
Example #31
0
def get_model():
    '''
    curl localhost:8081/environments?env=dev1 or
    curl localhost:8081/environments
    '''
    try:
        models = pymongo.Connection().sds.models
    except (pymongo.errors.AutoReconnect):
        raise HTTPResponse('Couldn\'t connect to SDS db\n', 500)
    company = request.creds.company
    env = request.query.get('env')
    if not env:  # Will list all models for the given company
        models = [m['env'] for m in models.find({'company': company})]
        if len(models) == 0:
            return 'You have no models\n'
        return utils.stringify(models)
    else:
        try:
            model = models.find_one({'company': company, 'env': env})['model']
        except:
            raise HTTPResponse('Model %s not found\n' % env, 404)
        return utils.stringify(model)
Example #32
0
def get_model():
    '''
    curl localhost:8081/environments?env=dev1 or
    curl localhost:8081/environments
    '''
    try:
        models = pymongo.Connection().sds.models
    except(pymongo.errors.AutoReconnect):
        raise HTTPResponse('Couldn\'t connect to SDS db\n', 500)
    company = request.creds.company
    env = request.query.get('env')
    if not env: # Will list all models for the given company
        models = [m['env'] for m in models.find({'company': company})]
        if len(models) == 0:
            return 'You have no models\n'
        return utils.stringify(models)
    else:
        try:
            model = models.find_one({'company': company, 'env': env})['model']
        except:
            raise HTTPResponse('Model %s not found\n' % env, 404)
        return utils.stringify(model)
Example #33
0
def links_to_footnotes(elem, doc):
    """
    Will shift a header level from the filter-header-shift
    metadata value (which must exist)
    """
    if doc.format != 'latex':
        return

    if isinstance(elem, Link):
        if elem.url.startswith('#'):
            return
        if elem.url.startswith('mailto:'):
            return
        if elem.url == stringify(elem):
            return
        return [
            elem,
            Note(
                Para(RawInline(stringify(elem), format='tex'), Str(':'),
                     Space(),
                     Link(Str(elem.url), title=elem.title, url=elem.url)))
        ]
Example #34
0
def parse_message(msg):
    parsed_message = []
    i = 0
    last_open_str = 0

    def store_string_so_far():
        if last_open_str != i:
            parsed_message.append(stringify(msg[last_open_str:i]))

    while i < len(msg):
        if stringify(msg[i:(i+2)]) == '%d':
            store_string_so_far()
            int_bytes = msg[(i+2):(i+4)]
            parsed_int = parse_int(int_bytes)
            parsed_message.append(parsed_int)
            last_open_str = i + 4
            i = i + 4
        elif stringify(msg[i:(i+2)]) == '%f':
            store_string_so_far()
            float_bytes = msg[(i+2):(i+6)]
            parsed_float = parse_float(float_bytes)
            parsed_message.append(parsed_float)
            last_open_str = i + 6
            i = i + 6
        elif stringify(msg[i:(i+2)]) == '%l':
            store_string_so_far()
            uint32_bytes = msg[(i+2):(i+6)]
            parsed_uint32 = parse_uint32(uint32_bytes)
            parsed_message.append(parsed_uint32)
            last_open_str = i + 6
            i = i + 6
        else:
            if i+1 == len(msg):
                i += 1
                store_string_so_far()
            else:
                i += 1

    return parsed_message
Example #35
0
 def nba_tv_videoMenu():
     xbmcplugin.setContent(int(sys.argv[1]), 'videos')
     url = "https://content-api-prod.nba.com/public/1/endeavor/layout/watch/nbatv"
     json_parser = json.loads(utils.stringify(urllib2.urlopen(url).read()))
     for category in json_parser['results']['carousels']:
         if category['type'] == "video_carousel":
             common.addListItem(category['title'], '',
                 'nba_tv_videoplay', category['value']['videos'][0]['image'], True,
                 customparams={'slug':category['value']['slug'], 'pagination': True})
         elif category['type'] == "collection_cards":
             for collection in category['value']['items']:
                 common.addListItem(collection['name'], '',
                 'nba_tv_videoplay', collection['image'], True,
                 customparams={'slug':collection['slug'], 'pagination': True})
Example #36
0
def main():
    config = {'learning_rate': 0.001,
              'nr_epochs': 200,
              'batch_size': 8}
    data_path = 'temp/dataset'
    model_path = 'temp/saved_models'
    model_name = stringify(["model_"] +
                           [str(k)+'_'+str(config[k]) for k in config
                           if type(config[k]) == int or
                           type(config[k]) == float])+'.pt'
    model_file = os.path.join(model_path, model_name)
    model_inference = model_def()
    model_inference.load_state_dict(torch.load(model_file,
                                               map_location=device))
    # model_inference.eval()
    test_dataset = dataset(data_path, split='test')
    img, _ = test_dataset[0]
    img.unsqueeze_(0)
    img = img.data.numpy()
    dummy_input = torch.from_numpy(img).float().to(device)
    dummy_out1, dummy_out2, dummy_out3, dummy_out4 = model_inference(
                                                                   dummy_input)
    print(dummy_out1.size())
    print(dummy_out2.size())
    print(dummy_out3.size())
    print(dummy_out4.size())

    torch.onnx.export(model_inference, dummy_input,
                      os.path.join(model_path, 'model.onnx'),
                      input_names=['input'],
                      output_names=['out1', 'out2', 'out3', 'out4'])
    model_onnx = onnx.load(os.path.join(model_path, 'model.onnx'))
    tf_rep = prepare(model_onnx)
    tf_rep.export_graph(os.path.join(model_path, 'model.pb'))
    graph_def_file = os.path.join(model_path, 'model.pb')
    input_arrays = ["input"]
    output_arrays = ["out1", "out2", "out3", "out4"]
    converter = tf.lite.TFLiteConverter.from_frozen_graph(
        graph_def_file, input_arrays, output_arrays)
    tflite_model = converter.convert()
    open(os.path.join(model_path, "model.tflite"), "wb").write(tflite_model)

    # Generate sub-models
    for i in range(1, 5):
        output_arrays = ['out'+str(i)]
        converter = tf.lite.TFLiteConverter.from_frozen_graph(
            graph_def_file, input_arrays, output_arrays)
        tflite_model = converter.convert()
        model_name = "model_{}.tflite".format(i)
        open(os.path.join(model_path, model_name), "wb").write(tflite_model)
Example #37
0
def find_params(model, feature_set, y, subsample=None, grid_search=False):
    """
    Return parameter set for the model, either predefined
    or found through grid search.
    """
    model_name = model.__class__.__name__
    params = INITIAL_PARAMS.get(model_name, {})
    y = y if subsample is None else y[subsample]

    try:
        with open('saved_params.json') as f:
            saved_params = json.load(f)
    except IOError:
        saved_params = {}

    if (grid_search and model_name in PARAM_GRID and stringify(
            model, feature_set) not in saved_params):
        X, _ = get_dataset(feature_set, subsample, [0])
        clf = GridSearchCV(model, PARAM_GRID[model_name], cv=10, n_jobs=4,
                           scoring="roc_auc")
        #grid search for the best parameter for the learning model
        clf.fit(X, y)
        logger.info("found params (%s > %.4f): %s",
                    stringify(model, feature_set),
                    clf.best_score_, clf.best_params_)
        params.update(clf.best_params_)
        saved_params[stringify(model, feature_set)] = params
        with open('saved_params.json', 'w') as f:
            json.dump(saved_params, f, indent=4, separators=(',', ': '),
                      ensure_ascii=True, sort_keys=True)
    else:
        params.update(saved_params.get(stringify(model, feature_set), {}))
        if grid_search:
            logger.info("using params %s: %s", stringify(model, feature_set),
                        params)

    return params
Example #38
0
    def syllabify(self, data = None, inp = None, outp = None):
        """
        * Eats new, unsyllabified words either as a list (`data`)
          or from a file (`inp`)
        * Returns the syllabified words in string format
          (e.g. ser-uaes) and saves these to a file if `outp`
          is specified.
        """
        if inp:
            data = utils.load_data(inp)

        X = []
        for token in data:
            token = list(token)[:self.max_input_len]
            token = ['%'] + token + ['@'] # add beginning
            while len(token) < (self.max_input_len + 2):
                token.append('PAD')
            x = []
            for char in token:
                try:
                    x.append(self.char_lookup[char])
                except KeyError:
                    x.append(self.filler)
            X.append(x)

        new_X = np.array(X, dtype='float32')

        new_inputs = {'char_input': new_X}

        preds = self.model.predict(new_inputs,
                           batch_size = self.batch_size,
                           verbose=0)
        preds = utils.pred_to_classes(preds)

        syllabified = list([utils.stringify(o, p) for o, p in zip(data, preds)])
        if outp:
            with open(outp, 'w') as f:
                for s in syllabified:
                    f.write(s + '\n')
        else:
            return syllabified
Example #39
0
    def handle(self, msg):
        if len(msg) == 0:
            print ('WARNING: Empty message')
            return
        msg_type = msg[0]
        if msg_type == ToComputer.DEBUG:
            # debug message
            subsystems = [
                'INFO',
                'ERROR',
                'CRON',
            ]
            if (0 > msg[1] or msg[1] >= len(subsystems)):
                print  ("WARNING: Unknown debug category: %d.." % (msg[1],))
                subsystem = 'UNKNOWN'
                print (stringify(msg[2:]))
            else:
                subsystem = subsystems[msg[1]]

            content = parse_message(msg[2:])
            content = ''.join([str(m) for m in content])
            content = '[%s] %s' % (subsystem, content)
            self.log(content)
        elif msg_type == ToComputer.GET_SETTINGS_REPLY:
            time_since_epoch_s = parse_uint32(msg[1:5])
            date = datetime.fromtimestamp(time_since_epoch_s)
            box_uid = msg[5]
            box_node_type = chr(msg[6])
            box_balance = parse_uint32(msg[7:11])
            state_of_charge       = parse_float(msg[11:15])
            uncertainty_of_charge = parse_float(msg[15:19])
            battery_capacity = parse_float(msg[19:23])

            off_threshold = parse_float(msg[23:27])
            red_threshold = parse_float(msg[27:31])
            yellow_threshold = parse_float(msg[31:35])

            balance_update_hours = parse_int(msg[35:37])
            balance_update_minutes = parse_int(msg[37:39])
            balance_update_ammount = parse_uint32(msg[39:43])

            #self.log('Time on device is ' + str(date))

            self.update_if_not_focused(self.ui_root.settings.box_time, str(date))
            self.update_if_not_focused(self.ui_root.settings.box_uid, str(box_uid))
            self.update_if_not_focused(self.ui_root.settings.box_node_type, str(box_node_type))
            self.update_if_not_focused(self.ui_root.settings.box_balance, str(box_balance))
            self.update_if_not_focused(self.ui_root.settings.state_of_charge,       str(state_of_charge))
            self.update_if_not_focused(self.ui_root.settings.uncertainty_of_charge, str(uncertainty_of_charge))
            self.update_if_not_focused(self.ui_root.settings.battery_capacity, str(battery_capacity))

            self.update_if_not_focused(self.ui_root.settings.off_threshold, str(off_threshold)[:6])
            self.update_if_not_focused(self.ui_root.settings.red_threshold, str(red_threshold)[:6])
            self.update_if_not_focused(self.ui_root.settings.yellow_threshold, str(yellow_threshold)[:6])

            self.update_if_not_focused(self.ui_root.settings.balance_update_hours, str(balance_update_hours))
            self.update_if_not_focused(self.ui_root.settings.balance_update_minutes, str(balance_update_minutes))
            self.update_if_not_focused(self.ui_root.settings.balance_update_ammount, str(balance_update_ammount))

        elif msg_type == ToComputer.DATA_LOGGER_REPLY:
            controller.get.data_logger.on_message(msg)
        else:
            print( 'WARNING: Uknown message type :', msg[0])
Example #40
0
 def store_string_so_far():
     if last_open_str != i:
         parsed_message.append(stringify(msg[last_open_str:i]))
Example #41
0
spread = []
market_performance = []
dates_shown = []
STEP = 30

for i in range(STEP, len(stocks[0].values)-STEP, STEP):
    previous = i - STEP
    now = i
    next = i + STEP
    strong_stocks = [s for s in stocks 
                     if s.getOffensive(previous, now) >= 100 and s.getDefensive(previous, now) <= 100]
    weak_stocks = [s for s in stocks 
                   if s.getOffensive(previous, now) <= 100 and s.getDefensive(previous, now) >= 100]
 
    mean_strong_performance = utils.mean([s.getPerformance(now, next) for s in strong_stocks])
    mean_weak_performance = utils.mean([s.getPerformance(now, next) for s in weak_stocks])
    spread.append(mean_strong_performance - mean_weak_performance)
    market_performance.append(market.getPerformance(previous, now)) # to look for correlations with the spread
    dates_shown.append(dates[now]) # dates to be shown in the X axis

# format the data properly and save it in a file
columns = (['dates'] + dates_shown, 
           ['spread'] + utils.stringify(spread), 
           ['market'] + utils.stringify(market_performance))
rows = zip(*columns)
out = open('spread.txt', 'w')
for row in rows:
    out.write(' '.join(row))
    out.write('\n')
out.close()
Example #42
0
    def fit_predict(self, y, train=None, predict=None, show_steps=True):
        """
        Fit each model on the appropriate dataset, then return the average
        of their individual predictions. If train is specified, use a subset
        of the training set to train the models, then predict the outcome of
        either the remaining samples or (if given) those specified in predict.
        If train is omitted, train the models on the full training set, then
        predict the outcome of the full test set.

        Options:
        ------------------------------
        - y: numpy array. The full vector of the ground truths.
        - train: list. The indices of the elements to be used for training.
            If None, take the entire training set.
        - predict: list. The indices of the elements to be predicted.
        - show_steps: boolean. Whether to compute metrics after each stage
            of the computation.
        """
        y_train = y[train] if train is not None else y
        if train is not None and predict is None:
            predict = [i for i in range(len(y)) if i not in train]

        stage0_train = []
        stage0_predict = []
        for model, feature_set in self.models:
            X_train, X_predict = get_dataset(feature_set, train, predict)

            identifier = train[0] if train is not None else -1
            cache_file = stringify(model, feature_set) + str(identifier)

            model_preds = self._get_model_preds(
                model, X_train, X_predict, y_train, cache_file)
            stage0_predict.append(model_preds)

            # if stacking, compute cross-validated predictions on the train set
            if self.stack:
                model_cv_preds = self._get_model_cv_preds(
                    model, X_train, y_train, cache_file)
                stage0_train.append(model_cv_preds)

            # verbose mode: compute metrics after every model computation
            if show_steps:
                if train is not None:
                    mean_preds, stack_preds, fwls_preds = self._combine_preds(
                        np.array(stage0_train).T, np.array(stage0_predict).T,
                        y_train, train, predict,
                        stack=self.stack, fwls=self.fwls)

                    model_auc = compute_auc(y[predict], stage0_predict[-1])
                    mean_auc = compute_auc(y[predict], mean_preds)
                    stack_auc = compute_auc(y[predict], stack_preds) \
                        if self.stack else 0
                    fwls_auc = compute_auc(y[predict], fwls_preds) \
                        if self.fwls else 0

                    logger.info(
                        "> AUC: %.4f (%.4f, %.4f, %.4f) [%s]", model_auc,
                        mean_auc, stack_auc, fwls_auc,
                        stringify(model, feature_set))
                else:
                    logger.info("> used model %s:\n%s", stringify(
                        model, feature_set), model.get_params())

        if self.model_selection and predict is not None:
            best_subset = self._find_best_subset(y[predict], stage0_predict)
            stage0_train = [pred for i, pred in enumerate(stage0_train)
                            if i in best_subset]
            stage0_predict = [pred for i, pred in enumerate(stage0_predict)
                              if i in best_subset]

        mean_preds, stack_preds, fwls_preds = self._combine_preds(
            np.array(stage0_train).T, np.array(stage0_predict).T,
            y_train, stack=self.stack, fwls=self.fwls)

        if self.stack:
            selected_preds = stack_preds if not self.fwls else fwls_preds
        else:
            selected_preds = mean_preds

        return selected_preds