Example #1
0
 def upload_fixture_image(
     self, fixture_image_path,
     verification_stats_path, verification_result_path, headline='test'
 ):
     with self.app.app_context():
         with open(fixture_image_path, mode='rb') as f:
             file_name = ntpath.basename(fixture_image_path)
             file_type = 'image'
             content_type = '%s/%s' % (file_type, imghdr.what(f))
             file_id = app.media.put(
                 f, filename=file_name,
                 content_type=content_type,
                 resource=get_resource_service('ingest').datasource,
                 metadata={}
             )
             inserted = [file_id]
             renditions = generate_renditions(
                 f, file_id, inserted, file_type, content_type,
                 rendition_config=config.RENDITIONS['picture'],
                 url_for_media=url_for_media
             )
         data = [{
             'headline': headline,
             'slugline': 'rebuild',
             'renditions': renditions,
             'type': 'picture'
         }]
         image_id = get_resource_service('ingest').post(data)
     with open(verification_result_path, 'r') as f:
         self.expected_verification_results.append(json.load(f))
     with open(verification_stats_path, 'r') as f:
         self.expected_verification_stats.append(json.load(f))
     return image_id
def get_proportional_hash_area(period):
	""" 	Takes in periods accepted by P2Pool - hour, day, week, month or year,
		then gets hash_data from the server running on localhost, parses it, 
		and calculates each miner's hash power against the total during that time. 
	"""
	import urllib2, json
	path1 = 'http://localhost:9332/web/graph_data/miner_hash_rates/last_'+period
	result1 = json.load(urllib2.urlopen(path1))
	path2 = 'http://localhost:9332/web/graph_data/miner_dead_hash_rates/last_'+period
	result2 = json.load(urllib2.urlopen(path2))
	
	hash_areas = {}
	total_hash_area = 0
	for row in result1:
		for address in row[1]:
			try:
				hash_areas[address] += row[1][address] * row[2]
			except KeyError:
				hash_areas[address] = row[1][address] * row[2]
			finally:
				total_hash_area += row[1][address] * row[2]
	
	for row in result2:
		for address in row[1]:
			hash_areas[address] -= row[1][address]*row[2]
			total_hash_area -= row[1][address] * row[2]

	proportions = {}	
	for address in hash_areas.keys():
		proportions[address] = hash_areas[address] / total_hash_area
		hash_areas[address] /= 1000000000000000
	
	return hash_areas, proportions
Example #3
0
    def saveList(self):
        nameOfList = self.LNEDTlistName.text()
        restsOfEachDay = self.collect_restaurants()
        foods = self.collect_foods()
        pyJson = {}
        pyJson["version"]=self.form_data_version
        pyJson["restsOfEachDay"] = restsOfEachDay
        pyJson["foods"] = foods
        # write json file
        json_file = json.dumps(pyJson)
        file = open('./foodlists/' + nameOfList + '.json', 'w')
        file.write(json_file)  #write database to file
        file.close()

        json_file = open('./foodlists/' + nameOfList + ".json", "r")
        json_decoded = json.load(json_file)
        json_file.close()

        datas_file = open('./data/_lisnamedata', "r+")
        datas_decoded = json.load(datas_file)
        if nameOfList.upper() not in (name.upper() for name in datas_decoded["listNames"]):
            datas_decoded["listNames"].append(nameOfList)
            # self.CMBXlists.addItem(nameOfList)
            # self.CMBXeditLists.addItem(nameOfList)

        datas_file.seek(0, 0)  #go to begining of file
        datas_file.write(json.dumps(datas_decoded))
        datas_file.close()
        self.fill_list_of_listname()
        self.showInfoMessage(u" لیست جدید",nameOfList+u" ذخیره شد ")
Example #4
0
def install_analytics():
    Category.objects.all().delete()
    Algorithm.objects.all().delete()

    # load categories
    categories_fp = open(os.path.join(os.path.dirname(__file__), 'data/categories.json'))
    data = json.load(categories_fp)
    n_of_categories = 0
    for c in data:
        Category.objects.create(pk=c['pk'], name=c['fields']['name'],
                                description=c['fields']['description'])
        n_of_categories += 1

    categories_fp.close()

    # load algorithms
    algorithms_fp = open(os.path.join(os.path.dirname(__file__), 'data/algorithms.json'))
    data = json.load(algorithms_fp)
    n_of_algorithms = 0
    for a in data:
        Algorithm.objects.create(pk=a['pk'],
                                 category=Category.objects.get(pk=a['fields']['category']),
                                 name=a['fields']['name'], description=a['fields']['description'])
        n_of_algorithms += 1

    algorithms_fp.close()

    return HttpResponse(json.dumps({
        'n_of_categories': n_of_categories,
        'n_of_algorithms': n_of_algorithms
        }), status=200, content_type="application/json")
def main(we_file, w2i_file, sen):
    cc_matrix = "./input/cc_matrix.npy"
    if not os.path.isfile(w2i_file):
        sentences, word2idx = get_reuters_data(n_vocab=2000)
        with open(w2i_file, 'w') as f:
            json.dump(word2idx, f)
        with open(sen, 'w') as f:
            json.dump(sentences, f)
    else:
        with open(w2i_file) as data_file:    
            word2idx = json.load(data_file)
        with open(sen) as data_file:    
            sentences = json.load(data_file)

    V = len(word2idx)
    model = Glove(50, V, 10)
    # model.fit(sentences, cc_matrix=cc_matrix, epochs=20) # ALS
    model.fit(
        sentences,
        cc_matrix=cc_matrix,
        learning_rate=3*10e-5,
        reg=0.01,
        epochs=2000,
        gd=True,
        use_theano=True
    ) # gradient descent
    model.save(we_file)
 def getJSON (self, name):
   file_name = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "data", name+".json")
   try:
     self.json_data  = json.load(open(file_name, 'r'))
   except IOError:
     subprocess.call (["./get_residue.sh", file_name], shell=True)
     self.json_data  = json.load(open(file_name, 'r'))
Example #7
0
    def json(self, json_file):
        """
        Reads and parses the input of a json file handler or file.

        Json files are parsed differently depending on if the root is a dictionary or an array.

        1) If the json's root is a dictionary, these are parsed into a sequence of (Key, Value)
        pairs

        2) If the json's root is an array, these are parsed into a sequence
        of entries

        >>> seq.json('examples/users.json').first()
        [u'sarah', {u'date_created': u'08/08', u'news_email': True, u'email': u'sarah@gmail.com'}]

        :param json_file: path or file containing json content
        :return: Sequence wrapping jsonl file
        """
        if isinstance(json_file, str):
            file_open = get_read_function(json_file, self.disable_compression)
            input_file = file_open(json_file)
            json_input = jsonapi.load(input_file)
        elif hasattr(json_file, 'read'):
            json_input = jsonapi.load(json_file)
        else:
            raise ValueError('json_file must be a file path or implement the iterator interface')

        if isinstance(json_input, list):
            return self(json_input)
        else:
            return self(six.viewitems(json_input))
Example #8
0
def load_config():
    global config
    global pkmn_info
    global type_chart
    global type_list
    global raid_info
    global egg_timer
    global raid_timer
    global icon_list
    global GOOGLE_API_KEY
    global GOOGLE_MAPS_URL

    # Load configuration
    with open("config.json", "r") as fd:
        config = json.load(fd)

    # Set up message catalog access
    # language = gettext.translation('clembot', localedir='locale', languages="en")
    # language.install()
    # pokemon_language = [config['pokemon-language']]
    # pokemon_path_source = os.path.join('locale', '{0}', 'pkmn.json').format(config['pokemon-language'])

    # Load Pokemon list and raid info
    with open(os.path.join('data', 'pkmn.json'), "r") as fd:
        pkmn_info = json.load(fd)

    with open(os.path.join('data', "icon.json"), "r") as fd:
        icon_list = json.load(fd)

    # Set spelling dictionary to our list of Pokemon
    spelling.set_dictionary(pkmn_info['pokemon_list'])
  def test_cache_invalidation(self):
    utils.debug("===========test_cache_invalidation=========")
    master_position = utils.mysql_query(62344, 'vt_test_keyspace', 'show master status')
    #The sleep is needed here, so the invalidator can catch up and the number can be tested.
    replica_tablet.mquery('vt_test_keyspace', "select MASTER_POS_WAIT('%s', %d)" % (master_position[0][0], master_position[0][1]), 5)
    time.sleep(5)
    invalidations = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['Totals']['Invalidations']
    invalidatorStats = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/vars" % replica_host)))['CacheInvalidationProcessor']
    utils.debug("Invalidations %d InvalidatorStats %s" % (invalidations, invalidatorStats))
    self.assertTrue(invalidations > 0, "Invalidations are flowing through.")

    res = replica_tablet.mquery('vt_test_keyspace', "select min(id) from vt_insert_test")
    self.assertNotEqual(res[0][0], None, "Cannot proceed, no rows in vt_insert_test")
    id = int(res[0][0])
    stats_dict = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['vt_insert_test']
    utils.debug("vt_insert_test stats %s" % stats_dict)
    misses = stats_dict['Misses']
    hits = stats_dict["Hits"]
    replica_tablet.vquery("select * from vt_insert_test where id=%d" % (id), path='test_keyspace/0')
    stats_dict = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['vt_insert_test']
    self.assertEqual(stats_dict['Misses'] - misses, 1, "This shouldn't have hit the cache")

    replica_tablet.vquery("select * from vt_insert_test where id=%d" % (id), path='test_keyspace/0')
    stats_dict = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['vt_insert_test']
    self.assertEqual(stats_dict['Hits'] - hits, 1, "This should have hit the cache")
    def load_data(self):
        logging.info("Starting data loads.")
        self.data = json.load(open('data/features.json'))
        helpers.normalize_features(self.data)

        self.dev_movie_ids = set(json.load(open('data/dev.json')))
        self.test_movie_ids = set(json.load(open('data/test.json')))

        # self.data = dict((k,self.data[unicode(k)]) for k in (list(self.dev_movie_ids)[0:900] + list(self.test_movie_ids)[0:100]))

        # Transforms the data so they can be used by pybrain.
        logging.info("Loading feature keys...")
        feature_keys = set()
        for movie_id, features in self.data.iteritems():
            feature_keys.update(features['features'].keys())

        self.feature_keys = list(feature_keys)
        logging.info("Feature keys loaded.")

        logging.info("Vectorizing features...")
        self.dev_features = []
        self.dev_scores = []
        self.test_features = []
        self.test_scores = []
        for movie_id, features in self.data.iteritems():
            if int(movie_id) in self.dev_movie_ids:
                features_list = self.dev_features
                scores_list = self.dev_scores
            else:
                features_list = self.test_features
                scores_list = self.test_scores

            features_list.append([features['features'].get(feature_key, 0) for feature_key in self.feature_keys])
            scores_list.append([features['rating']])
        logging.info("Features vectorized.")
Example #11
0
 def get_first_image_specific_implementation(self, species, index):
     #returns the url of the first image of the species found in the source
     (return_status, ids) = self.get_ids(species)
     
     if not(return_status):
         self.err_list[index] = (species, ids)
         return
     else:
         id = ids[0]
         url = 'http://eol.org/api/pages/1.0/' + str(id) + '.json?images=10&videos=0&sounds=0&maps=0&text=0&iucn=false&subjects=overview&licenses=all&details=false&common_names='
         
         data_pages = ''
         
         i = 0
         while True:
             try:
                 data_pages = json.load(urllib2.urlopen(url))
                 break
             except URLError:
                 if i < 3:
                     i = i + 1
                 else:
                     self.err_list[index] = (species, constants.CONNECTION_ERROR)
                     return
             except:
                 self.err_list[index] = (species, constants.JSON_ERROR)
                 return
         
         pages_list = data_pages['dataObjects']
         
         if len(pages_list) == 0:
             self.err_list[index] = (species, constants.NO_IMAGES_FOR_SPECIES)
             return
         else:
             object_id = pages_list[0]['dataObjectVersionID']
             url = 'http://eol.org/api/data_objects/1.0/' + str(object_id) + '.json'
             
             image_list = ''
             i = 0
             
             while True:
                 try:
                     image_list = json.load(urllib2.urlopen(url))['dataObjects']
                     break
                 except URLError:
                     if i < 3:
                         i = i + 1
                     else:
                         self.err_list[index] = (species, constants.CONNECTION_ERROR)
                         return
                 except:
                     self.err_list[index] = (species, constants.JSON_ERROR)
                     return
             
             if len(image_list) == 0:
                 self.err_list[index] = (species, constants.NO_IMAGES_FOR_SPECIES)
                 return
             else:
                 self.img_list[index] = (species, image_list[0]['mediaURL'])
                 return
def dynamic_params_from_model(model):
    '''Get user-submitted dynamic parameters from a DynamicSaveInputs model'''
    ser_model = serializers.serialize('json', [model])
    user_inputs = json.loads(ser_model)
    inputs = user_inputs[0]['fields']

    # Read user-modifiable parameters list from file
    usermods_path = os.path.join(os.path.split(__file__)[0],
                                 "ogusa_user_modifiable.json")
    with open(usermods_path, "r") as f:
         ump = json.load(f)
         USER_MODIFIABLE_PARAMS = ump["USER_MODIFIABLE_PARAMS"]

    # Read ogusa parameters list from file
    ogusa_params_path = os.path.join(os.path.split(__file__)[0],
                                 "ogusa_parameters.json")
    with open(ogusa_params_path, "r") as f:
         OGUSA_PARAMS = json.load(f)

    params = {k:inputs[k] for k in USER_MODIFIABLE_PARAMS}

    for k, v in params.items():
        if v == '':
            params[k] = str(OGUSA_PARAMS[k]['value'])

    return params
Example #13
0
def get_jscs_options(path):
    option_sets = []

    jscsrc_path = findup(path, '.jscsrc')
    if os.path.isfile(jscsrc_path):
        jscsrc = lazy_parse_comment_json(
            open(jscsrc_path, 'r', encoding='UTF-8'))
        option_sets.append((jscsrc_path, jscsrc))

    jscs_json_path = findup(path, '.jscs.json')
    if os.path.isfile(jscs_json_path):
        jscs_json = json.load(open(jscs_json_path, 'r', encoding='UTF-8'))
        option_sets.append((jscs_json_path, jscs_json))

    package_path = findup(path, 'package.json')
    if os.path.isfile(package_path):
        package = json.load(open(package_path, 'r', encoding='UTF-8'))
        if 'jscsConfig' in package:
            option_sets.append((package_path, package['jscsConfig']))

    # Sort sets by dirname length
    option_sets.sort(key=lambda x: len(os.path.dirname(x[0])))

    # Merge options together
    options = dict()
    for path, option_set in option_sets:
        options.update(option_set)

    return options
def read_attrs(glider_config_path, glider_name):
    # Load in configurations
    attrs = {}

    # Load institute global attributes
    global_attrs_path = (
        os.path.join(glider_config_path, "global_attributes.json")
    )
    with open(global_attrs_path, 'r') as f:
        attrs['global'] = json.load(f)

    # Load deployment attributes (including global attributes)
    deployment_attrs_path = (
        os.path.join(glider_config_path, glider_name,
                     "deployment.json")
    )
    with open(deployment_attrs_path, 'r') as f:
        attrs['deployment'] = json.load(f)

    # Load instruments
    instruments_attrs_path = (
        os.path.join(glider_config_path, glider_name,
                     "instruments.json")
    )
    with open(instruments_attrs_path, 'r') as f:
        attrs['instruments'] = json.load(f)

    # Fill in global attributes
    attrs['global'].update(attrs['deployment']['global_attributes'])

    return attrs
def merge_data_from_crawl(): #To refactor far too long
    category_file = sorted(os.listdir(os.getcwd()+"/data/data_from_crawl"))
    movies_category = [file_name for file_name in category_file if not any(c.isdigit() for c in file_name)]
    for category in movies_category:
        if category.endswith('.json'):
            category = category[:-5]
        if category.endswith('die'):
            movies_with_part = [file_name for file_name in category_file if (len(file_name) < 30 and category in file_name)]
        else:
            movies_with_part = [file_name for file_name in category_file if category in file_name]
        if len(movies_with_part) > 1:
            all_part_data = {}
            for movies_part in movies_with_part:
                with open('data/data_from_crawl/' + movies_part) as part:
                    data = json.load(part)
                for movie_title, movie_data in data.iteritems():
                    all_part_data[movie_title] = movie_data
            with io.open("data/data_to_use/" + movies_with_part[0] , "w+", encoding='utf8') as all_part:
                output = json.dumps(all_part_data, ensure_ascii=False, encoding='utf8')
                all_part.write(unicode(output))
        if len(movies_with_part) == 1 and  movies_with_part[0][0] != '.':
            with open('data/data_from_crawl/' + movies_with_part[0]) as part:
                data = json.load(part)
            with io.open("data/data_to_use/" + movies_with_part[0] , "w+", encoding='utf8') as all_part:
                output = json.dumps(data, ensure_ascii=False, encoding='utf8')
                all_part.write(unicode(output))
 def test_stop_replication(self):
   utils.debug("===========test_stop_replication=========")
   utils.run_vtctl('ChangeSlaveType test_nj-0000062345 replica')
   time.sleep(10)
   perform_insert(100)
   master_position = utils.mysql_query(62344, 'vt_test_keyspace', 'show master status')
   #The sleep is needed here, so the invalidator can catch up and the number can be tested.
   replica_tablet.mquery('vt_test_keyspace', "select MASTER_POS_WAIT('%s', %d)" % (master_position[0][0], master_position[0][1]), 5)
   time.sleep(5)
   inv_count1 = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['Totals']['Invalidations']
   replica_tablet.mquery('vt_test_keyspace', "stop slave")
   perform_insert(100)
   # EOF is returned after 30s, sleeping a bit more to ensure we catch the EOF
   # and can test replication stop effectively.
   time.sleep(35)
   replica_tablet.mquery('vt_test_keyspace', "start slave")
   master_position = utils.mysql_query(62344, 'vt_test_keyspace', 'show master status')
   #The sleep is needed here, so the invalidator can catch up and the number can be tested.
   replica_tablet.mquery('vt_test_keyspace', "select MASTER_POS_WAIT('%s', %d)" % (master_position[0][0], master_position[0][1]), 5)
   time.sleep(10)
   invalidatorStats = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/vars" % replica_host)))['CacheInvalidationProcessor']
   utils.debug("invalidatorStats %s" % invalidatorStats)
   inv_count2 = framework.MultiDict(json.load(urllib2.urlopen("http://%s/debug/table_stats" % replica_host)))['Totals']['Invalidations']
   utils.debug("invalidator count1 %d count2 %d" % (inv_count1, inv_count2))
   self.assertEqual(invalidatorStats["States"]["Current"], "Enabled", "Row-cache invalidator should be enabled")
   self.assertTrue(inv_count2 - inv_count1 > 0, "invalidator was able to restart after a small pause in replication")
Example #17
0
def populate_recipe_tables():  # pylint: disable=too-many-locals
    """Populate tables about recipes."""
    # Get all plans and recipes
    with open(JsonRes.craftplan, 'r', encoding='UTF-8') as f_recipes:
        recipes = json.load(f_recipes)

    with open(JsonRes.plans_en, 'r', encoding='UTF-8') as f_plans:
        plans = flatten_nested_dict(json.load(f_plans))

    for recipe, component in recipes.items():
        # filter matis affiliadted recipe to get higher quality recipe
        if recipe[:3] == 'bcf':
            recipe = recipe.replace('bcf', 'bcc')

        # Filter recipes to keep only those are not affiliated to a faction
        if recipe[:3] != 'bcc' and recipe[:4] != 'bcmj':
            continue

        quality = get_plan_quality(recipe)

        try:
            # Split the recipe code to separate the quality code from the rest
            # pylint: disable=unpacking-non-sequence
            keypath, infos = get_plan(plans, recipe.split('_')[0])
        except TypeError:
            print(
                "The recipe code: {} was not found in the ".format(recipe) +
                "flatten dict of plans.",
                file=sys.stderr
            )
            continue

        *categories, recipe_name = keypath.split("|")

        if "ammo" in recipe_name:
            # recipe_name += " of " + categories[-1].lower()
            recipe_name = categories[-1] + " " + recipe_name.lower()
            categories[-1] = None

        if categories[0] == "Weapon":
            if "One" in categories[-1]:
                categories[-1] = 1
            elif "Two" in categories[-1] or "hand" in categories[-1]:
                categories[-1] = 2
            else:
                categories[-1] = None

        recipe_cat_id = insert_recipe_category(*categories)

        recipe_id = insert_recipe(
            recipe_name, quality, infos['icon'] + '.png', recipe_cat_id
        )

        # Recipe already exists
        if recipe_id == 'stop':
            continue

        # Loop over plan's component
        for comp, amount in component['mpft'].items():
            insert_recipe_component(recipe_id, comp, amount)
Example #18
0
def load_options():
    """Read various proselintrc files, allowing user overrides."""
    possible_defaults = (
        '/etc/proselintrc',
        os.path.join(proselint_path, '.proselintrc'),
    )
    options = {}
    has_overrides = False

    for filename in possible_defaults:
        try:
            options = json.load(open(filename))
            break
        except IOError:
            pass

    try:
        user_options = json.load(open(os.path.expanduser('~/.proselintrc')))
        has_overrides = True
    except IOError:
        pass

    if has_overrides:
        if 'max_errors' in user_options:
            options['max_errors'] = user_options['max_errors']
        if 'checks' in user_options:
            for (key, value) in user_options['checks'].items():
                try:
                    options['checks'][key] = value
                except KeyError:
                    pass

    return options
Example #19
0
def getcommits_from_project(project):
    global access_token
    url1 = 'https://api.github.com/user'
    request1=Request(url1)
    request1.add_header('Authorization', 'token %s' % access_token)
    response1 = urlopen(request1)
    result1 = json.load(response1)
    person = result1['login']
    repo_info=['Fasta','js2839']
    owner= repo_info[1]
    repo = repo_info[0]
    url = 'https://api.github.com/repos/'+owner+'/'+repo+'/commits'
    data=[]
    request = Request(url)
    request.add_header('Authorization', 'token %s' % access_token)
    response = urlopen(request)
    result = json.load(response)
    for i in range(len(result)):
        print 'result0'
        data.append([result[i]['commit']['message'], result[i]['commit']['author']['name'], result[i]['commit']['author']['date']])
        print data[i]
    for com in data:
        (per,sub_name)=getPercentage(com[0])
        err = save_to_db( per, sub_name, com[1], project, com[2])
    return 
def crash_recover():
	global crash_obj
	tidy_reset=0
	try:
		with open(crash_file):
			print "recovering from %s" % crash_file
			crash_json=open(crash_file)
			crash_progress=json.load(crash_json)
			tidy_reset=1
			pass
	except IOError:	
		print "no crash log found.  Executing as normal"
		
		pass
	
	if tidy_reset:
		print "\tRestoring progress"
		crash_json = open(crash_file)
		crash_obj=json.load(crash_json)
	else:
		validate_delete = raw_input("Delete all entries to %s in %s.%s?  (Y/N)" % (start_date,db_schema,db_name))
		if validate_delete.upper() == 'Y':
			db_cursor.execute("DELETE FROM %s WHERE date>='%s'" % (db_name,start_date))
			db.commit()
			print "\tCleaning up ALL entries to %s" % start_date
		else:
			print "\tWARNING: values may be wrong without scrubbing duplicates"
			#Initialize crash_obj
		crash_obj={}
		crash_obj["parsed_data"]={}
		crash_obj["progress"]={}
Example #21
0
def ReadJson(config_file):
    """Read in a JSON configuration file and return the corresponding dicts.

    A JSON file only defines a single dict.  However to be parallel to the functionality of
    ReadYaml, the output is a list with a single item, which is the dict defined by the JSON file.

    @param config_file      The name of the configuration file to read.

    @returns [config_dict]
    """
    import json

    with open(config_file) as f:
        try:
            # cf. http://stackoverflow.com/questions/6921699/can-i-get-json-to-load-into-an-ordereddict-in-python
            config = json.load(f, object_pairs_hook=OrderedDict)
        except TypeError:  # pragma: no cover
            # for python2.6, json doesn't come with the object_pairs_hook, so
            # try using simplejson, and if that doesn't work, just use a regular dict.
            # Also, it seems that if the above line raises an exception, the file handle
            # is not left at the beginning, so seek back to 0.
            f.seek(0)
            try:
                import simplejson
                config = simplejson.load(f, object_pairs_hook=OrderedDict)
            except ImportError:
                config = json.load(f)

    # JSON files only ever define a single job, but we need to return a list with this one item.
    return [config]
Example #22
0
def do_environment_model_edit(mc, args):
    """Edit an environment's object model."""
    jp_obj = None
    if not args.filename:
        jp_obj = json.load(sys.stdin)
    else:
        with open(args.filename) as fpatch:
            jp_obj = json.load(fpatch)

    if not isinstance(jp_obj, list):
        raise exceptions.CommandError('JSON-patch must be a list of changes')
    for change in jp_obj:
        if 'op' not in change or 'path' not in change:
            raise exceptions.CommandError('Every change in JSON-patch must '
                                          'contain "op" and "path" keys')
        op = change['op']
        if op not in ['add', 'replace', 'remove']:
            raise exceptions.CommandError('The value of "op" item must be '
                                          '"add", "replace" or "remove", '
                                          'got {0}'.format(op))
        if op != 'remove' and 'value' not in change:
            raise exceptions.CommandError('"add" or "replace" change in '
                                          'JSON-patch must contain "value" '
                                          'key')
    session_id = args.session_id
    new_model = mc.environments.update_model(args.id, jp_obj, session_id)
    print(utils.json_formatter(new_model))
Example #23
0
def install_vocabularies():
    Vocabulary.objects.all().delete()
    VocabularyClass.objects.all().delete()
    VocabularyProperty.objects.all().delete()

    # load vocabularies
    vocabularies_fp = open(os.path.join(os.path.dirname(__file__), 'data/vocabularies.json'))
    data = json.load(vocabularies_fp)
    n_of_vocabularies = 0
    with transaction.atomic():  # atomic transactions vastly improve performence
        for v in data:
            vocabulary = Vocabulary.objects.create(pk=v['pk'], category=v['fields']['category'],
                                                   version=v['fields']['version'], votes=v['fields']['votes'],
                                                   originalUrl=v['fields']['originalUrl'],
                                                   description=v['fields']['description'], title=v['fields']['title'],
                                                   downloads=v['fields']['downloads'],
                                                   lodRanking=v['fields']['lodRanking'],
                                                   preferredNamespacePrefix=v['fields']['preferredNamespacePrefix'],
                                                   datePublished=v['fields']['datePublished'],
                                                   downloadUrl=v['fields']['downloadUrl'], score=v['fields']['score'],
                                                   uploader=User.objects.get(pk=v['fields']['uploader']),
                                                   dateModified=v['fields']['dateModified'],
                                                   dateCreated=v['fields']['dateCreated'],
                                                   preferredNamespaceUri=v['fields']['preferredNamespaceUri'],
                                                   example=v['fields']['example'], prevent_default_make=True)
            n_of_vocabularies += 1
            vocabulary.prevent_default_make = False  # reset to false so it can be updated
    vocabularies_fp.close()

    # load classes
    classes_fp = open(os.path.join(os.path.dirname(__file__), 'data/classes.json'))
    data = json.load(classes_fp)
    n_of_classes = 0
    with transaction.atomic():
        for c in data:
            VocabularyClass.objects.create(pk=c['pk'], description=c['fields']['description'],
                                           uri=c['fields']['uri'], label=c['fields']['label'],
                                           vocabulary=Vocabulary.objects.get(pk=c['fields']['vocabulary']))
            n_of_classes += 1
    classes_fp.close()

    # load properties
    properties_fp = open(os.path.join(os.path.dirname(__file__), 'data/properties.json'))
    data = json.load(properties_fp)
    n_of_properties = 0
    with transaction.atomic():
        for p in data:
            VocabularyProperty.objects.create(pk=p['pk'], description=p['fields']['description'],
                                              uri=p['fields']['uri'],
                                              vocabulary=Vocabulary.objects.get(pk=p['fields']['vocabulary']),
                                              label=p['fields']['label'], domain=p['fields']['domain'],
                                              range=p['fields']['range'], parent_uri=p['fields']['parent_uri'])
            n_of_properties += 1
    properties_fp.close()

    return HttpResponse(json.dumps({
        'n_of_vocabularies': n_of_vocabularies,
        'n_of_classes': n_of_classes,
        'n_of_properties': n_of_properties
    }), status=200, content_type="application/json")
Example #24
0
    def test_post_works(self):
        site = WPSiteFactory(url='http://www.foo.com/')
        # assert the post we're about to make doesn't exist
        self.assertFalse(WPPost.objects.filter(wp=site, id=521).exists())
        # sample POST based on a RequestBin[requestb.in] test
        request = self.factory.post('/foo/', {
            'hook': 'save_post',
            'ID': '521',
            'guid': 'http://www.foo.com/?p=521',
        })
        data = json.load(open(os.path.join(BASE_DIR, 'support', 'posts_521.json')))
        with mock.patch('wjordpress.models.WPApi') as MockApi:
            # got to be a better syntax for this
            MockApi.return_value = mock.MagicMock(**{'posts.return_value': data})
            response = self.view.post(request, site.pk)
        self.assertEqual(response.status_code, 200)
        # assert this post now exists
        self.assertTrue(WPPost.objects.filter(wp=site, id=521).exists())
        post = WPPost.objects.filter(wp=site, id=521).get()
        self.assertTrue(post.author)

        # Contrived example: author changes locally, will it get set again?
        post.author = None
        post.save()

        data = json.load(open(os.path.join(BASE_DIR, 'support', 'posts_521.json')))
        with mock.patch('wjordpress.models.WPApi') as MockApi:
            # got to be a better syntax for this
            MockApi.return_value = mock.MagicMock(**{'posts.return_value': data})
            response = self.view.post(request, site.pk)
        # sanity check
        self.assertEqual(response.status_code, 200)
        post = WPPost.objects.filter(wp=site, id=521).get()
        # assert foreign relations were created
        self.assertTrue(post.author)
Example #25
0
    def load_config(self):
        """载入配置文件

        先读入 conf/config.json 中的配置,再读入 conf/ 中其他 json 文件里的 entry
        """
        config_dir = os.path.join(sys.path[0], 'conf')
        configs = {}
        # main config
        config_file_name = os.path.join(config_dir, 'config.json')
        try:
            with open(config_file_name) as config_fp:
                configs = json.load(config_fp)
        except Exception as e:
            log.error('main: failed to load config from conf/config.json: %s' % e)
        if 'entry' not in configs.keys():
            configs['entry'] = {}

        # other entries
        for dir_path, dir_names, file_names in os.walk(config_dir):
            for file_name in file_names:
                if file_name != 'config.json' and file_name.endswith('.json'):
                    # load sub config
                    config_file_name = os.path.join(config_dir, file_name)
                    try:
                        with open(config_file_name) as config_fp:
                            sub_configs = json.load(config_fp)
                        # insert into main config
                        for name, config in sorted(sub_configs['entry'].items()):
                            if name not in configs['entry'].keys():
                                configs['entry'][name] = config
                    except Exception as e:
                        log.error('main: failed to load config from conf/%s: %s' % (file_name, e))

        return configs
Example #26
0
def main():
    args = parse_args()
    ins = args.in_
    out = args.out
    out_format = FORMATS[args.format]
    info = args.info
    one_file_no_info = False
    if len(ins) > 1 and info is None:
        print('error: cannot have multiple inputs without an info header.', file=sys.stderr)
        sys.exit(1)

    elif info is None:
        info = ins[0]
        one_file_no_info = True
    info_json = json.load(info)
    platform = PlatformInfo(info_json)

    print(out_format.open(platform), file=out)

    for in_ in ins:

        if one_file_no_info:
            data = info_json
        else:
            data = json.load(in_)
            data.update(info_json)

        intrinsics = IntrinsicSet(platform, data)
        for intr in intrinsics.intrinsics():
            for mono in intr.monomorphise():
                print(out_format.render(mono), file=out)

    print(out_format.close(), file=out)
Example #27
0
	def __init__(self, name, require_local=False):
		

		localpath = os.path.join(conf_dir, name +'.local.json')
		globalpath = os.path.join(conf_dir, name + '.global.json')

		conf = {}
		localconf = None
		try:
			with open(globalpath, 'r') as f:
				conf = json.load(f)
		except IOError:
			pass #global file can not exist

		try:
			with open(localpath, 'r') as f:
				localconf = json.load(f)
		except IOError:
			if require_local:
				raise ValueError('Local settings file does not exist and is required: %s' % name +'.local.json')

		if localconf is not None:
			merge_dicts(conf, localconf)

		self.conf = conf
Example #28
0
    def download(self, ud, d):
        """Fetch url"""
        jsondepobj = {}
        shrinkobj = {}
        lockdown = {}

        if not os.listdir(ud.pkgdatadir) and os.path.exists(ud.fullmirror):
            dest = d.getVar("DL_DIR", True)
            bb.utils.mkdirhier(dest)
            runfetchcmd("tar -xJf %s" % (ud.fullmirror), d, workdir=dest)
            return

        shwrf = d.getVar('NPM_SHRINKWRAP', True)
        logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
        try:
            with open(shwrf) as datafile:
                shrinkobj = json.load(datafile)
        except:
            logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
        lckdf = d.getVar('NPM_LOCKDOWN', True)
        logger.debug(2, "NPM lockdown file is %s" % lckdf)
        try:
            with open(lckdf) as datafile:
                lockdown = json.load(datafile)
        except:
            logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)

        if ('name' not in shrinkobj):
            self._getdependencies(ud.pkgname, jsondepobj, ud.version, d, ud)
        else:
            self._getshrinkeddependencies(ud.pkgname, shrinkobj, ud.version, d, ud, lockdown, jsondepobj)

        with open(ud.localpath, 'w') as outfile:
            json.dump(jsondepobj, outfile)
 def test_init_manifest_packageid(self):
     comm.setUp()
     os.chdir(comm.XwalkPath)
     comm.clear("org.xwalk.test")
     os.mkdir("org.xwalk.test")
     cmd = (
         comm.HOST_PREFIX
         + comm.PackTools
         + "crosswalk-app manifest "
         + comm.XwalkPath
         + "org.xwalk.test --platforms=android --package-id=org.xwalk.test"
     )
     os.system(cmd)
     with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file:
         data = json.load(json_file)
     updatecmd = (
         comm.HOST_PREFIX
         + comm.PackTools
         + "crosswalk-app manifest "
         + comm.XwalkPath
         + "org.xwalk.test --platforms=android --package-id=org.test.foo"
     )
     os.system(updatecmd)
     with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file_update:
         updatedata = json.load(json_file_update)
     comm.clear("org.xwalk.test")
     self.assertEquals(data["xwalk_package_id"].strip(os.linesep), "org.xwalk.test")
     self.assertEquals(updatedata["xwalk_package_id"].strip(os.linesep), "org.test.foo")
Example #30
0
    def get_cur(self):
        types = ('alpha','beta','pre','rc',None,'p')
        version = self.base[0]
        if self.opts.version is not None:
            version = self.opts.version

        type = self.base[1]
        if self.opts.type == 'tagged':
            type = 4
            self.opts.hash = None
        elif self.opts.type is not None:
            type = types.index(self.opts.type)

        if self.opts.branch:
            self.branch = self.opts.branch
        elif type < 4:
            self.branch = 'master'
        else:
            self.branch = 'fixes/{0}'.format('.'.join(version.split('.')[0:2]))

        if type != 4:
            if self.opts.hash is None:
                commit = json.load(urllib.urlopen("https://api.github.com/repos/mythtv/MythTV/commits/" + urllib.quote(self.branch, '')))
                self.opts.hash = commit['sha']
                if self.opts.date is None:
                    self.opts.date = process_date(commit['commit']['committer']['date']).strftime('%Y%m%d')
                print "Autoselecting hash: "+self.opts.hash
            elif self.opts.date is None:
                commit = json.load(urllib.urlopen("https://api.github.com/repos/mythtv/MythTV/commits/" + self.opts.hash))
                self.opts.date = process_date(commit['commit']['committer']['date']).strftime('%Y%m%d')

        self.cur = (version, type, self.opts.date)
        if self.opts.verbose: print 'New version set to: {0}-{1}'.format(self.name,self.get_version(self.cur))