Beispiel #1
0
def main(args: list):
    if len(argv) != 3:
        error(f"Invalid Arguments #: {len(argv)}!")

    ast: dict = loadJson(args[1])
    patterns: list = loadJson(args[2])
    parser: Parser = Parser(patterns)
    parser.parseFunc(ast["type"], ast)
    vulnerabilities: Set[Vulnerability] = parser.getEvaluator().getVulnerabilities()
    dumpJson(args[1].replace(".json", ".output.json"), vulnerabilities)
Beispiel #2
0
 def getContextList(self, channel, position):
     utils.log('getContextList, channel = ' + str(channel) + ', position = ' + str(position))
     position = self.fixPlaylistIndex(channel, position)
     item = self.channelLST.channels[channel - 1].guidedata[position]
     contextLST = (utils.loadJson(item.get('contextmenu','[]') or '[]'))
     utils.log('getContextList, contextLST = ' + str(contextLST))
     return contextLST
    def _loadFuncs(self):
        self.log('Start files loading...')
        filesMap = self.filesMap
        for key in filesMap:
            filePath = filesMap[key]
            self.log(f'Try to load {key} file: {filePath}')

            func = None
            if filePath.endswith('.pickle'):
                grid = utils.loadPickle(filePath, 'rb')
                if grid is None:
                    self.error(
                        f'Unable to load {key} function as pickle:\n{filePath}.'
                    )
                func = num_methods.interpolation.SplineInterpolation(grid)
            elif filePath.endswith('.json'):
                jsn = utils.loadJson(filePath, 'r')
                if jsn is None:
                    self.error(
                        f'Unable to load {key} function interpolation as json:\n{filePath}.'
                    )
                func = num_methods.interpolation.SplineInterpolation(
                    None).load_from_dict(jsn)
            else:
                grid = utils.loadCSV(filePath)
                if grid is None:
                    self.error(
                        f'Unable to load {key} function as csv:\n{filePath}.')
                func = num_methods.interpolation.SplineInterpolation(grid)

            self.funcDict[key] = func
            self.log('Successful')
        self.log('All files loaded')
Beispiel #4
0
def predict(string):
    dic = vars(params)
    dic['embedding'] = None
    dic['is_train'] = False
    saved = loadJson()
    # merging config, using dic to overwrite saved when same key
    dic = {**saved, **dic}
    return inference_online(dic, string)
Beispiel #5
0
def main(verbose=False):

    try:
        #Initialize
        stats_authors = dict()

        # Get current location
        path = utils.getPath()
        params = utils.loadJson(path + "/docs/parameters.json")

        # Get selenium driver
        driver = sf.getDriver(path)

        # Iterate over tags
        for tagName in params["tags"]:
            print("--------------- Starting tag " + tagName +
                  " ---------------")
            # Get tag URL
            driver.get('https://www.tiktok.com/tag/' + tagName)

            # Scroll in tags
            login_form, driver = sf.scrollPage(driver,
                                               scope="tag",
                                               maxNScrolls=50)

            #Get authors names:
            authors_list = sf.get_authors(login_form, driver)
            print("--------------- Found ", len(authors_list),
                  "users: ---------------")
            #Extract statistics from each author:
            stats_authors = sf.get_stats_author(driver,
                                                authors_list,
                                                params,
                                                stats_authors,
                                                useTikster=True)

        #Compute metrics for each author:
        metrics_author = sf.compute_metrics(stats_authors)

        #print(metrics_author)

    except Exception as e:
        traceback.print_exc()

    finally:
        # Check whether driver has been initialized yet
        try:
            driver
        except NameError:
            driver = None

        # Always close drivers
        if not driver is None:
            driver.close()
            driver.quit()
            print("Driver closed")
def main():
    src = sys.argv[1]
    dest = sys.argv[2]
    stats = utils.loadJson(sys.argv[3])
    threshold = float(sys.argv[4])

    # keep executing only if stats were loaded
    if stats != None:
        stats = stats['classes']
        cache = utils.loadJson(getCacheName(stats))
        if cache == None:
            cache = {}

        applySegmentation(src, dest, threshold, stats, cache)
        utils.saveJson(cache, getCacheName(stats))
    else:
        print('No stats available, cant process images')

    print('Finished')
 def __init__(self, parent=None):
     super(ControlPanel, self).__init__(parent)
     self._config = loadJson()
     self.ctrlpanelWidth = self._config['controlpanel']['width']
     self.ctrlpanelHeight = self._config['controlpanel']['height']
     self.initUI()
     self.timer = QtCore.QTimer()
     self.timer.setInterval(50)
     self.timer.timeout.connect(self.on_update)
     self.timer.start()
Beispiel #8
0
def run():
    # convert to dict
    dic = vars(params)
    dic['embedding'] = None

    if dic['is_train']:
        train(dic)
    else:
        # load config
        saved = loadJson()
        # merging config, using dic to overwrite saved when same key
        dic = {**saved, **dic}
        inference(dic)
def main():
    src = sys.argv[1]
    dest = sys.argv[2]
    pixels = {}

    for f in os.listdir(src):
        print('Processing ' + f)

        data = utils.loadJson(src + f)
        for key in data:
            if key != 'height' and key != 'width' and len(data[key]) > 0:
                if not key in pixels:
                    pixels[key] = []

                extracted = utils.extractRGB(data[key])
                pixels[key] = pixels[key] + extracted

    for key in pixels:
        print('Extracted ' + str(len(pixels[key])) + ' pixels for ' + key)

    print('Saving')
    utils.saveJson(pixels, dest + 'pixels.json')

    print('Finished')
Beispiel #10
0
    def __init__(self, parent=None):
        super(TouchPanel, self).__init__(parent)
        self._config = loadJson()
        self.touchpanelWidth = self._config['touchpanel']['width']
        self.touchpanelHeight = self._config['touchpanel']['height']
        if sys.platform == "win32":
            self.setWindowFlags(QtCore.Qt.WindowDoesNotAcceptFocus
                                | QtCore.Qt.Tool
                                | QtCore.Qt.FramelessWindowHint
                                | QtCore.Qt.WindowStaysOnTopHint)
            self.setGeometry(self._config['touchpanel']['left_win32'],
                             self._config['touchpanel']['top_win32'],
                             self.touchpanelWidth, self.touchpanelHeight)
        else:
            self.setWindowFlags(QtCore.Qt.WindowDoesNotAcceptFocus
                                | QtCore.Qt.Tool
                                | QtCore.Qt.FramelessWindowHint
                                | QtCore.Qt.WindowStaysOnTopHint
                                | QtCore.Qt.X11BypassWindowManagerHint)
            self.setGeometry(self._config['touchpanel']['left'],
                             self._config['touchpanel']['top'],
                             self.touchpanelWidth, self.touchpanelHeight)

        self.initUI()
Beispiel #11
0
if __name__ == '__main__':
    if utils.getProperty('PseudoTVRunning') != "True":
        try: params = dict(arg.split('=') for arg in sys.argv[1].split('&'))
        except: params = {}
        dataType = None
        utils.log('params = ' + str(params))
        for type in ['json','property','listitem']:
            try:
                data = params[type]
                dataType = type
                break
            except: pass
            
        hasChannels= False
        channelLST = ChannelList()
        channelLST.incHDHR      = (utils.loadJson(utils.unquote(params.get('include_hdhr','')))          or channelLST.incHDHR)
        channelLST.skinPath     = ((utils.loadJson(utils.unquote(params.get('skin_path',''))))           or channelLST.chkSkinPath())
        channelLST.mediaFolder  = os.path.join(channelLST.skinPath,'resources','skins','default','media')
        channelLST.refreshPath  = utils.loadJson(utils.unquote(params.get('refresh_path',''))            or utils.ADDON_ID)
        channelLST.refreshIntvl = int(utils.loadJson(utils.unquote(params.get('refresh_interval','')))   or '0')
        channelLST.skinFolder   = os.path.join(channelLST.skinPath,'resources','skins','default','1080i',) if xbmcvfs.exists(os.path.join(channelLST.skinPath,'resources','skins','default','1080i','%s.guide.xml'%utils.ADDON_ID)) else os.path.join(channelLST.skinPath,'resources','skins','default','720p')
        utils.setProperty('uEPG.rowCount',utils.loadJson(utils.unquote(params.get('row_count',''))       or '9'))
        channelLST.pluginName, channelLST.pluginAuthor, channelLST.pluginIcon, channelLST.pluginFanart, channelLST.pluginPath = utils.getPluginMeta(channelLST.refreshPath)
        
        utils.log('dataType = '     + str(dataType))
        utils.log('skinPath = '     + str(channelLST.skinPath))
        utils.log('skinFolder = '   + str(channelLST.skinFolder))
        utils.log('rowCount = '     + utils.getProperty('uEPG.rowCount'))
        utils.log('refreshPath = '  + str(channelLST.refreshPath))
        utils.log('refreshIntvl = ' + str(channelLST.refreshIntvl))
        utils.setProperty('PluginName'   ,channelLST.pluginName)
Beispiel #12
0
def initChannels(params, data, dataType):
    utils.log('initChannels, params = ' + str(params))
    with busy_dialog():
        hasChannels = False
        channelLST = ChannelList()
        channelLST.incHDHR = (
            params.get('include_hdhr', '')
            or utils.REAL_SETTINGS.getSetting('Enable_HDHR')) == 'true'
        channelLST.skinPath = (utils.unquote(params.get('skin_path', ''))
                               or channelLST.chkSkinPath())
        channelLST.mediaFolder = os.path.join(channelLST.skinPath, 'resources',
                                              'skins', 'default', 'media')
        channelLST.refreshPath = (utils.unquote(params.get('refresh_path', ''))
                                  or utils.ADDON_ID)
        channelLST.refreshIntvl = int(
            params.get('refresh_interval', '') or '0')
        channelLST.skinFolder = os.path.join(
            channelLST.skinPath,
            'resources',
            'skins',
            'default',
            '1080i',
        ) if xbmcvfs.exists(
            os.path.join(channelLST.skinPath, 'resources', 'skins', 'default',
                         '1080i', '%s.guide.xml' %
                         utils.ADDON_ID)) else os.path.join(
                             channelLST.skinPath, 'resources', 'skins',
                             'default', '720p')
        utils.setProperty('uEPG.rowCount',
                          (params.get('row_count', '') or '9'))
        channelLST.pluginName, channelLST.pluginAuthor, channelLST.pluginIcon, channelLST.pluginFanart, channelLST.pluginPath = utils.getPluginMeta(
            channelLST.refreshPath)

        utils.log('dataType = ' + str(dataType))
        utils.log('skinPath = ' + str(channelLST.skinPath))
        utils.log('skinFolder = ' + str(channelLST.skinFolder))
        utils.log('rowCount = ' + utils.getProperty('uEPG.rowCount'))
        utils.log('refreshPath = ' + str(channelLST.refreshPath))
        utils.log('refreshIntvl = ' + str(channelLST.refreshIntvl))
        utils.setProperty('PluginName', channelLST.pluginName)
        utils.setProperty('PluginIcon', channelLST.pluginIcon)
        utils.setProperty('PluginFanart', channelLST.pluginFanart)
        utils.setProperty('PluginAuthor', channelLST.pluginAuthor)
        utils.setProperty('pluginPath', channelLST.pluginPath)

        #show optional load screen
        # if channelLST.uEPGRunning == False and utils.getProperty('uEPGSplash') != 'True' and xbmcvfs.exists(os.path.join(channelLST.skinFolder,'%s.splash.xml'%utils.ADDON_ID)) == True:
        # mySplash   = epg.Splash('%s.splash.xml'%utils.ADDON_ID,channelLST.skinPath,'default')
        # mySplash.show()
        # xbmc.sleep(100)

        if utils.HDHR().hasHDHR():
            if utils.REAL_SETTINGS.getSetting(
                    'FirstTime_HDHR') == "true" and not channelLST.incHDHR:
                utils.REAL_SETTINGS.setSetting('FirstTime_HDHR', 'false')
                if utils.yesnoDialog(
                    (utils.LANGUAGE(30012) % (channelLST.pluginName)),
                        custom='Later'):
                    utils.REAL_SETTINGS.setSetting('Enable_HDHR', 'true')
                    channelLST.incHDHR = True
        utils.log('incHDHR = ' + str(channelLST.incHDHR))

    if dataType == 'wrap': hasChannels = channelLST.prepareJson(data)
    elif dataType == 'json':
        hasChannels = channelLST.prepareJson(
            utils.loadJson(utils.unquote(data)))
    elif dataType == 'property':
        hasChannels = channelLST.prepareJson(
            utils.loadJson(utils.unquote(utils.getProperty(data))))
    elif dataType == 'listitem':
        hasChannels = channelLST.prepareVFS(utils.unquote(data))

    if hasChannels == True:
        if utils.REAL_SETTINGS.getSetting('FirstTime_Run') == "true":
            utils.REAL_SETTINGS.setSetting('FirstTime_Run', 'false')
            utils.textViewer(
                utils.LANGUAGE(30008),
                '%s / %s' % (utils.ADDON_NAME, channelLST.pluginName))

        # if utils.getProperty('uEPGSplash') == 'True':
        # mySplash.close()
        # del mySplash
        # xbmc.sleep(100)

        if channelLST.refreshIntvl > 0 and channelLST.refreshPath is not None:
            channelLST.startRefreshTimer()
        if channelLST.uEPGRunning == False and utils.getProperty(
                'uEPGGuide') != 'True':
            channelLST.myEPG = epg.uEPG('%s.guide.xml' % utils.ADDON_ID,
                                        channelLST.skinPath, 'default')
            channelLST.myEPG.channelLST = channelLST
            channelLST.myEPG.doModal()
            del channelLST.myEPG
            xbmc.sleep(100)
    else:
        utils.log("invalid uEPG information", xbmc.LOGERROR)
        # utils.notificationDialog(utils.LANGUAGE(30002)%(channelLST.pluginName,channelLST.pluginAuthor),icon=channelLST.pluginIcon)
        # utils.REAL_SETTINGS.openSettings()
    del utils.KODI_MONITOR
Beispiel #13
0
        text_dir = [
            os.path.join(dest_dir, 'train'),
            os.path.join(dest_dir, 'test')
        ]

        generate_text([train_data, test_data],
                      data_ext,
                      text_dir,
                      args.data_type,
                      topn_sent=args.topn_sent,
                      is_stat=args.is_stat)

    elif args.data_type == 'tree':

        test_file = os.path.join(dest_dir, '{}_test_seg'.format(data_name))
        test_data = loadJson(test_file)
        train_file = os.path.join(dest_dir, '{}_train_seg'.format(data_name))
        train_data = loadJson(train_file)

        data_ext = []
        summary, Y = text_train.ref[:train_ids], text_train.Y[:train_ids]
        name, pos = text_train.name[:train_ids], text_train.pos[:train_ids]
        # text = text_train.text[:train_ids]
        data_ext.append([summary, Y, name, pos])
        summary, Y = text_test.ref[:test_ids], text_test.Y[:test_ids]
        name, pos = text_test.name[:test_ids], text_test.pos[:test_ids]
        # text = text_test.text[:train_ids]
        data_ext.append([summary, Y, name, pos])
        text_dir = [
            os.path.join(dest_dir, 'train'),
            os.path.join(dest_dir, 'test')
Beispiel #14
0
def main():
    src = sys.argv[1]
    dest = sys.argv[2]
    attempts = 10
    ratio = 0.5
    thres = 0.95

    pixels = utils.loadJson(src)
    bestSamples = {}
    bestStats = {'classes': []}

    minVal = [1e10000]
    minTrain = []
    minTest = []
    for n in range(attempts):
        print('** Attempt ' + str(n))
        samples = {}
        stats = {'classes': []}

        # get sets and stats from data
        for key in pixels:
            print('\tProcessing color ' + key)
            sets = getBestSets(key, pixels[key], ratio, attempts)
            stats['classes'].append(sets['stats'])
            samples[key] = sets

        # calculate the classification error
        totalTrain = 0.0
        totalVal = 0.0
        totalTest = 0.0
        errTrain = 0
        errVal = 0
        errTest = 0
        cache = {}
        for key in samples:
            clsTrain = classify(samples[key]['train'], thres, stats['classes'], cache)
            errTrain += classificationError(clsTrain, key)
            totalTrain += len(samples[key]['train'][utils.R])

            clsVal = classify(samples[key]['val'], thres, stats['classes'], cache)
            errVal += classificationError(clsVal, key)
            totalVal += len(samples[key]['val'][utils.R])

            clsTest = classify(samples[key]['test'], thres, stats['classes'], cache)
            errTest += classificationError(clsTest, key)
            totalTest += len(samples[key]['test'][utils.R])

        print('\tError train: \t' + str(errTrain) + '\t' + str(errTrain / totalTrain))
        print('\tError val: \t' + str(errVal) + '\t' + str(errVal / totalVal))
        print('\tError test: \t' + str(errTest) + '\t' + str(errTest / totalTest))

        if errVal < minVal[0]:
            minVal = [errVal, totalVal]
            minTrain = [errTrain, totalTrain]
            minTest = [errTest, totalTest]
            bestSamples = samples
            bestStats = stats

        if minVal[0] == 0:
            break

    print('Min error')
    print('\ttrain: \t' + str(minTrain[0]) + '\t' + str(minTrain[0] / minTrain[1]))
    print('\tval: \t' + str(minVal[0]) + '\t' + str(minVal[0] / minVal[1]))
    print('\ttest: \t' + str(minTest[0]) + '\t' + str(minTest[0] / minTest[1]))

    bestSamples = {'samples': bestSamples, 'errors': {}}
    bestSamples['errors']['train'] = minTrain
    bestSamples['errors']['val'] = minVal
    bestSamples['errors']['test'] = minTest

    print('Saving data')
    utils.saveJson(bestStats, dest + 'learnedStats.json')
    utils.saveJson(bestSamples, dest + 'learnedSamples.json')

    print('Finished')
Beispiel #15
0
def main():
    src = sys.argv[1]
    dest = sys.argv[2]
    attempts = 10
    ratio = 0.5
    thres = 0.95

    pixels = utils.loadJson(src)
    bestSamples = {}
    bestStats = {'classes': []}

    minVal = [1e10000]
    minTrain = []
    minTest = []
    for n in range(attempts):
        print('** Attempt ' + str(n))
        samples = {}
        stats = {'classes': []}

        # get sets and stats from data
        for key in pixels:
            print('\tProcessing color ' + key)
            sets = getBestSets(key, pixels[key], ratio, attempts)
            stats['classes'].append(sets['stats'])
            samples[key] = sets

        # calculate the classification error
        totalTrain = 0.0
        totalVal = 0.0
        totalTest = 0.0
        errTrain = 0
        errVal = 0
        errTest = 0
        cache = {}
        for key in samples:
            clsTrain = classify(samples[key]['train'], thres, stats['classes'],
                                cache)
            errTrain += classificationError(clsTrain, key)
            totalTrain += len(samples[key]['train'][utils.R])

            clsVal = classify(samples[key]['val'], thres, stats['classes'],
                              cache)
            errVal += classificationError(clsVal, key)
            totalVal += len(samples[key]['val'][utils.R])

            clsTest = classify(samples[key]['test'], thres, stats['classes'],
                               cache)
            errTest += classificationError(clsTest, key)
            totalTest += len(samples[key]['test'][utils.R])

        print('\tError train: \t' + str(errTrain) + '\t' +
              str(errTrain / totalTrain))
        print('\tError val: \t' + str(errVal) + '\t' + str(errVal / totalVal))
        print('\tError test: \t' + str(errTest) + '\t' +
              str(errTest / totalTest))

        if errVal < minVal[0]:
            minVal = [errVal, totalVal]
            minTrain = [errTrain, totalTrain]
            minTest = [errTest, totalTest]
            bestSamples = samples
            bestStats = stats

        if minVal[0] == 0:
            break

    print('Min error')
    print('\ttrain: \t' + str(minTrain[0]) + '\t' +
          str(minTrain[0] / minTrain[1]))
    print('\tval: \t' + str(minVal[0]) + '\t' + str(minVal[0] / minVal[1]))
    print('\ttest: \t' + str(minTest[0]) + '\t' + str(minTest[0] / minTest[1]))

    bestSamples = {'samples': bestSamples, 'errors': {}}
    bestSamples['errors']['train'] = minTrain
    bestSamples['errors']['val'] = minVal
    bestSamples['errors']['test'] = minTest

    print('Saving data')
    utils.saveJson(bestStats, dest + 'learnedStats.json')
    utils.saveJson(bestSamples, dest + 'learnedSamples.json')

    print('Finished')
Beispiel #16
0
    else:
        word = re.findall('\d+', word)
        if not word:
            return og
        if len(word) > 1:
            return False
        elif og[-2:] in ["st" "nd", "rd", "th"]:
            word = word[0]
            return num2words(int(word), ordinal=True)
        else:
            return False


#data to make
for z in ["val", "train"]:
    data = loadJson(
        "data_vqa/v2_mscoco_{}2014_annotations".format(z))["annotations"]
    total_data += len(data)
    if z == "train":
        train_data = data
    else:
        val_data = data
    for a in tqdm(data):
        label2score = {}
        qid = a["question_id"]
        img_id = a["image_id"]
        a_type = a["answer_type"]
        gt = stripPunc(a["multiple_choice_answer"].lower(), a_type)
        if gt:
            ans_count[gt] += 1
        if gt and gt not in ans2num:
            ans2num[gt] = num_ans
Beispiel #17
0
            params = dict(arg.split('=') for arg in sys.argv[1].split('&'))
        except:
            params = {}
        dataType = None
        utils.log('params = ' + str(params))
        for type in ['json', 'property', 'listitem']:
            try:
                data = params[type]
                dataType = type
                break
            except:
                pass

        hasChannels = False
        channelLST = ChannelList()
        channelLST.incHDHR = (utils.loadJson(
            utils.unquote(params.get('include_hdhr', '')))
                              or channelLST.incHDHR)
        channelLST.skinPath = ((utils.loadJson(
            utils.unquote(params.get('skin_path', ''))))
                               or channelLST.chkSkinPath())
        channelLST.mediaFolder = os.path.join(channelLST.skinPath, 'resources',
                                              'skins', 'default', 'media')
        channelLST.refreshPath = utils.loadJson(
            utils.unquote(params.get('refresh_path', '')) or utils.ADDON_ID)
        channelLST.refreshIntvl = int(
            utils.loadJson(utils.unquote(params.get('refresh_interval', '')))
            or '0')
        channelLST.skinFolder = os.path.join(
            channelLST.skinPath,
            'resources',
            'skins',
        split = word.split(" ")
        if (split[0] in attributes \
                or split[0].isdigit()\
                or len(split[0]) ==1)\
                or split[0] in relations\
                and split[1] in set2check:
                    return [split[0]]
    if word in INVALID_NOUNS or (objs and word in relations):
        return []
    cands = word2alternatives(word)
    if og not in cands:
        cands.append(og)
    return cands

print("LOADING")
vg = loadJson("data_visual_genome/scene_graphs")
for scene in tqdm(vg):

    img_id = scene["image_id"]
    objs = scene["objects"]
    relationships = scene["relationships"]
    img_info = []

    #GET OBJECTS
    for o in objs:
        #get attributes
        try:
            attrs = o["attributes"]
            attrs = None
            fil_attrs = []
            if attrs is not None:
Beispiel #19
0
 def __init__(self, parent=None):
     super(Keyboard, self).__init__(parent)
     self._config = loadJson()
     self.keyboardWidth = self._config['keyboard']['width']
     self.keyboardHeight = self._config['keyboard']['height']
     self.initUI()
from nltk import word_tokenize, pos_tag, download
from nltk.corpus import wordnet as wn #download('wordnet')`
import inflect
from copy import deepcopy
from substring import substring, getLCS

PRINT = False
ISGQA = True

relations = loadTxt("phase_1/relations")
attributes = loadTxt("phase_1/attributes")
objects = loadTxt("phase_1/objects")
vg_relations = loadTxt("phase_2/vg_relations")
vg_objects = loadTxt("phase_2/vg_relations")
vg_attributes = loadTxt("phase_2/vg_relations")
vg_id2obj = loadJson("phase_2/id2obj")
graphs = loadJson("phase_2/v
    splits = ["val"]
    subsets = ["all"]
    train_annos = []
    val_annos = []
    all_answers = set()

qid2ambgOverlap =  defaultdict(list)
qid2normOverlap = defaultdict(list)
ambigous_objects =  defaultdict(list)
find = FindInParens(id2obj, ("OBJECTS", objects))
uniq_rels = deepcopy(relations)
uniq_attrs =  deepcopy(attributes)
uniq_objs = deepcopy(objects)
#!/usr/bin/env python3
import os
import json
import math
import utils
import datetime
from consts import *

# check cache file
data = utils.loadJson(CACHE_USAGE)
if not data:

	try:

		# get config
		gSheetId = utils.getConfigValue(None, XFINITY_GSHEET)
		gSheetUrl = utils.getGoogleSheetUrl(gSheetId)

		# update current usage
		book = utils.openGoogleSheet(gSheetId)
		dataSheet = book.get_worksheet(0)
		date = dataSheet.acell(DATE_CELL).value
		cap = int(dataSheet.acell(CAP_CELL).value.split()[0])
		target = int(dataSheet.acell(TARGET_CELL).value.split()[0])
		usage = int(dataSheet.acell(USAGE_CELL).value.split()[0])
		today = int(dataSheet.acell(TODAY_CELL).value.split()[0])

		# build data
		data = {
			'date': date,
			'cap': cap,
Beispiel #22
0
import os
import requests
from utils import loadJson
from tablulate import tablulate
from lib.query import QueryCurrentFoodTrucks

userInput = loadJson()
def query_api(offset):
    query = QueryCurrentFoodTrucks(
        offset=offset
    ).build_query()
    url = "{0}{1}".format(base_url,query)

    if os.environ.get('FT_APP_TOKEN') is not None:
        header={'X-App-token': os.environ.get('FT_APP_TOKEN')}
        request = requests.get(url, headers=header)
    else:
            request =requests.get(url)

            return request
def go_to_page(page, num):
    request = query_api(page)
    page += num
    keep_paging = True
    return (page,request, keep_paging)

def print_results_to_terminal(results):
    trucks = []
    for foodtruck in data:
        trucks.append([foodtruck['applicant'],foodtruck['location']])
        columns = ["NAME", "LOCATION"]
Beispiel #23
0
from collections import defaultdict, Counter
from tqdm import tqdm
import re
from shapely.geometry import Polygon
import statistics
from nltk import word_tokenize, pos_tag, download
from nltk.corpus import wordnet as wn
import inflect
from copy import deepcopy

PRINT = False
ISGQA = True
relations = loadTxt("phase_1/relations")
attributes = loadTxt("phase_1/attributes")
objects = loadTxt("phase_1/objects")
id2obj = loadJson("phase_1/gqa_id2obj")
if ISGQA:
    graphs = loadJson("phase_1/gqa_img2info")
    splits = ["val", "train"]
    subsets = ["balanced", "all"]
    train_annos = []
    val_annos = []
    all_answers = set()

qid2ambgOverlap = defaultdict(list)
qid2normOverlap = defaultdict(list)
ambigous_objects = defaultdict(list)
find = FindInParens(id2obj, ("OBJECTS", objects))
uniq_rels = deepcopy(relations)
uniq_attrs = deepcopy(attributes)
uniq_objs = deepcopy(objects)
Beispiel #24
0
import re
from shapely.geometry import Polygon
import statistics
from nltk import word_tokenize, pos_tag, download
from nltk.corpus import wordnet as wn  #download('wordnet')`
import inflect
from copy import deepcopy
from substring import substring, getLCS

PRINT = False
ISGQA = True

relations = loadTxt("phase_1/relations")
attributes = loadTxt("phase_1/attributes")
objects = loadTxt("phase_1/objects")
id2obj = loadJson("phase_1/id2obj")
obj_synonyms = loadJson("phase_2/visual_genome_obj_synonyms.json")
obj_synonyms = loadJson("phase_2/visual_genome_rel_synonyms.json")
vg_atts = loadJson("phase_2/visual_genome_attributes")
vg_objs = loadJson("phase_2/visual_genome_attributes")
vg_rels = loadJson("phase_2/visual_genome_attributes")
graphs = loadJson("phase_2/visual_genome_graphs")
vqa_ans = loadJson("phase_2/vqa_val_qid2ans")
vg_objs = loadJson("phase_2/visual_genome_attributes")

if ISGQA:
    img2sg_bb = loadJson("phase_1/img2sg_bb")
    imgId2objs = loadJson("phase_1/img2objs")
    splits = ["val"]
    subsets = ["all"]
    train_annos = []
Beispiel #25
0
            params = dict(arg.split('=') for arg in sys.argv[1].split('&'))
        except:
            params = {}

        dataType = None
        utils.log('params = ' + str(params))
        for type in ['json', 'property', 'listitem']:
            try:
                data = params[type]
                dataType = type
                break
            except:
                pass

        channelLST = ChannelList()
        channelLST.skinPath = ((utils.loadJson(
            utils.unquote(params.get('skin_path', ''))))
                               or channelLST.chkSkinPath())
        channelLST.mediaFolder = os.path.join(channelLST.skinPath, 'resources',
                                              'skins', 'default', 'media')
        channelLST.refreshPath = utils.loadJson(
            utils.unquote(params.get('refresh_path', None) or None))
        channelLST.refreshIntvl = int(
            utils.loadJson(utils.unquote(params.get('refresh_interval', '0')))
            or '0')
        channelLST.skinFolder = os.path.join(
            channelLST.skinPath,
            'resources',
            'skins',
            'default',
            '1080i',
        ) if xbmcvfs.exists(