def insertStuff(self, pre, after, preTwo, afterTwo, type):
		sels = self.view.sel()

		nSel = 0
		for sel in sels:
			nSel += 1
		
		for sel in sels:
			if sel.empty():
				continue
			str    	= self.view.substr(sel)
			tab    	= self.insert_start_line(sel)
			opts = jsbeautifier.default_options()
			opts.indent_size = 4;
			if nSel == 1:
				str    	= self.insert_tab_line(str)
				str 	= str.replace('\n'+tab, '\n')
				str    	= pre + '\n\t' + str + '\n' + after
				str    	= self.normalize_line_endings(str)
				str     = jsbeautifier.beautify(str, opts)
				self.view.run_command('insert_snippet', {"contents": str})
			else:
				str    	= self.insert_tab_line(str)
				str 	= preTwo + '\n\t' + tab + str + '\n' + tab + afterTwo
				str    	= self.normalize_line_endings(str)
				str     = jsbeautifier.beautify(str, opts)
				self.view.replace(self.edit, sel, str)
			sublime.status_message(type + ' added')
Example #2
0
def load_settings():
    settings_path = join(dirname(realpath(__file__)), "settings.json")
    try:
        settings = loads(open(settings_path).read())
        has_change = False
        for i, item in enumerate(settings["processes"]):
            for key, val in {"process_name": "input_process_name_here", "grep_string": "grep_string", "owner": None,
                             "class": "default"}.items():
                if key not in item:
                    settings["processes"][i][key] = val
                    has_change = True
        for item in settings["processes"]:
            if item["class"] not in settings["classes"]:
                settings[item["class"]] = settings["classes"]["default"].copy()
                has_change = True
        for prio_class, config in settings["classes"].items():
            if "prio_class" not in config or "prio_data" not in config or len(config) != 2:
                settings["classes"][prio_class].clear()
                settings["classes"][prio_class]["prio_class"] = settings["classes"]["default"]["prio_class"]
                settings["classes"][prio_class]["prio_data"] = settings["classes"]["default"]["prio_data"]
                has_change = True
        if has_change:
            open(settings_path, "w+").write(beautify(dumps(settings)))
    except FileNotFoundError or ValueError:
        settings = IOVeryNice.default_settings.copy()
        open(settings_path, "w+").write(beautify(dumps(settings)))
    return settings
Example #3
0
def find_tracts_with_users():
	with open('data/' + my.DATA_FOLDER + 'user_homes.json', 'rb') as fp:
		homes = anyjson.loads(fp.read())
	homes = dict( ( int(h[0]), ogr.CreateGeometryFromWkt('POINT(%s %s)' \
						% (h[1][1], h[1][0])) )  for h in homes.items() )

	with open('data/' + my.DATA_FOLDER + 'city_tracts.json', 'rb') as fp:
		polygons = anyjson.loads(fp.read())

	user_counts = dict( (gid, 0) for gid in polygons.keys())
	users_in_tracts = []
	tracts_with_users = []
	tract_users = dict( (gid, []) for gid in polygons.keys())

	for gid, json_poly in polygons.iteritems():
		json_poly_str = anyjson.dumps(json_poly)
		poly = ogr.CreateGeometryFromJson(json_poly_str)
		
		for user_id, h in homes.items():
			if h.Within(poly):
				user_counts[gid] += 1
				users_in_tracts.append(user_id)
				tract_users[gid].append(user_id)
				del homes[user_id]

	tracts_with_users = [gid for gid, v in user_counts.items() if v != 0]

	with open('data/' + my.DATA_FOLDER + 'user_counts.json', 'wb') as fp:
		fp.write( jsb.beautify( anyjson.dumps(user_counts) ) )
	with open('data/' + my.DATA_FOLDER + 'tract_users.json', 'wb') as fp:
		fp.write( jsb.beautify( anyjson.dumps(tract_users) ) )
	with open('data/' + my.DATA_FOLDER + 'users_in_tracts.json', 'wb') as fp:
		fp.write( anyjson.dumps(users_in_tracts) )
	with open('data/' + my.DATA_FOLDER + 'tracts_with_users.json', 'wb') as fp:
		fp.write( anyjson.dumps(tracts_with_users) )
Example #4
0
    def do_jsbeautify(self,line):
        try:
            import jsbeautifier
            l = line.split(" ")
            if len(l) < 2:
                self.help_jsbeautify()
            else:
                OPTIONS = ['slice','obj']
                option = l[0]

                if option not in OPTIONS:
                    print "Invalid option"
                    return False

                id = l[1]
                response, size = CTCore.get_response_and_size(id, "all")
                name = CTCore.get_name(id)

                if option == "slice":
                    offset = int(l[2])
                    length = l[3]

                    bytes, length = get_bytes(response,offset,length)
                    js_bytes = bytes
                    res = jsbeautifier.beautify(js_bytes)
                    print res

                if option == "obj":
                    res = jsbeautifier.beautify(response)
                    obj_num = CTCore.add_object("jsbeautify",res,id=id)
                    print " JavaScript Beautify of object {} ({}) successful!".format(str(id), name)
                    print " New object created: {}".format(obj_num) + newLine

        except Exception,e:
            print str(e)
    def decodesto(self, input, expectation=None):
        self.assertEqual(
            jsbeautifier.beautify(input, self.options), expectation or input)

        # if the expected is different from input, run it again
        # expected output should be unchanged when run twice.
        if not expectation == None:
            self.assertEqual(
                jsbeautifier.beautify(expectation, self.options), expectation)
def show_campaign(campaign_id):
  campaign = CampaignController.get(campaign_id)

  beautified_text = {}

  if campaign != None:
    beautified_text = {
      'kernel' : jsbeautifier.beautify(campaign.kernel),
      'static_dataset' : jsbeautifier.beautify(campaign.static_dataset),
      'dataset_gen' : jsbeautifier.beautify(campaign.dataset_gen)
    }

  return render_template('/campaign/show_campaign.html', campaign=campaign, beautified_text=beautified_text)
Example #7
0
def _make_volume_mat(id):
	''''''
	print '\n', id, '\n'
	volume_mat = {}
	with open('data/' + my.DATA_FOLDER + 'sets.json', 'rb') as fp:
		sets = anyjson.loads(fp.read()).items()

	with open('data/' + my.DATA_FOLDER + 'time.json', 'rb') as fp:
		time = anyjson.loads(fp.read())
	ts_start = datetime.strptime(time['ts_start'], my.TS_FORMAT)
	ts_end = datetime.strptime(time['ts_end'], my.TS_FORMAT)
	diff = ts_end - ts_start
	diff = int(diff.total_seconds())
	combine = 120
	x, x_ticks, x_tickslabels = _make_x(ts_start, diff, combine=combine)
	
	for pid, st in sets:
		offsets = _get_offsets(id, st)
		print pid, st, len(offsets)
		y = dict((i, 0) for i in x)
		for o in offsets:
			idx = int(o/combine)
			if idx in y: y[idx] += 1
		y = y.values()
		volume_mat[pid] = y

	path = 'data/' + my.DATA_FOLDER + 'volume_mat/'
	if not os.path.exists(path): os.makedirs(path)
	with open(path + str(id) + '.json', 'wb') as fp:
		fp.write(jsb.beautify(anyjson.dumps(volume_mat)))
Example #8
0
 def beautifier(self, buffer):
     'clean up the JS'
     try:
         temp = jsbeautifier.beautify(buffer.read())
     except Exception, e:
         logging.error("ERROR: jsbeautifier: {}".format(e))
         return
Example #9
0
def test_fragment(input, expectation = None):
    global opts
    if expectation == None:
        expectation = input

    res = beautify(input, opts)
    if res != expectation:
        print("""=== TEST FAILED ===
%s
Input:
------
%s

Expected:
---------
%s

Received:
---------
%s
""" % (opts, input, expectation, res))
        sys.exit(-1) # bail out immediately

    global tests_passed
    tests_passed += 1
Example #10
0
File: DFT.py Project: chorsley/thug
    def handle_javascript(self, script):
        try:
            log.info(jsbeautifier.beautify(str(script)))
        except:
            log.info(script)

        #if isinstance(script, BeautifulSoap.Tag):
        #   js = ' '.join(script.contents)
        #else:
        #    js = script.text
        js = getattr(script, 'text', None)
        relationship = 'Contained_Inside'

        if not js:
            src = script.get('src', None)
            if not src:
                return
        
            try:
                response, js = self.window._navigator.fetch(src)
            except:
                return
                
            if response.status == 404:
                return

            relationship = 'External'

        if len(js):
            log.ThugLogging.add_code_snippet(js, 'Javascript', relationship)

        self.window.evalScript(js, tag = script)
Example #11
0
    def run():
        """
        Start program
        """
        options = Options()

        for file in options.get_files():
            if not os.path.isfile(file):
                raise SystemExit(
                    sys.argv[0] + ': Cannot find "' + file + '" file.')
            print('Re-formatting "' + file + '" XML file...')

            lines = []
            try:
                with open(file, errors='replace') as ifile:
                    for line in ifile:
                        lines.append(line.strip())
            except OSError:
                raise SystemExit(
                    sys.argv[0] + ': Cannot read "' + file + '" file.')

            tmpfile = file + '-tmp' + str(os.getpid())
            try:
                with open(tmpfile, 'w', newline='\n') as ofile:
                    print(jsbeautifier.beautify(''.join(lines)), file=ofile)
            except OSError:
                raise SystemExit(
                    sys.argv[0] + ': Cannot create "' + tmpfile + '" file.')
            try:
                shutil.move(tmpfile, file)
            except OSError:
                raise SystemExit(
                    sys.argv[0] + ': Cannot rename "' + tmpfile +
                    '" file to "' + file + '".'
                )
Example #12
0
def find_user_race():
	with open('data/' + my.DATA_FOLDER + 'user_homes.json', 'rb') as fp:
		homes = anyjson.loads(fp.read())

	global loc_data
	with open('data/' + my.DATA_FOLDER + 'loc_data.json', 'rb') as fp:
		loc_data = anyjson.loads(fp.read())

	#for user in homes.items()[:100]:
	#	_find_race(user)
	pool = Pool(my.PROCESSES)
	user_race = pool.map(_find_race, homes.items())
	user_race = filter(None, user_race)

	races = {
		'w' : [],
		'b' : [],
		'a' : [],
		'h' : [],
		'o' : []
	}
	for user_id, race in user_race:
		races[race].append(user_id)

	with open('data/' + my.DATA_FOLDER + 'user_race.json', 'wb') as fp:
		fp.write(jsb.beautify(anyjson.dumps(races)))
Example #13
0
def toEntryPoint(pathJs):
    firstLine = True
    newContents = ""
    contents = source.GetFileContents(pathJs)
    source_lines = contents.splitlines()

    for line in source_lines:
        if _REGEX_GOOG.search(line):
            continue

        match = _BASE_REGEX_STRING.match(line)
        if match:
            if not firstLine:
                newContents += ",\n"
            else:
                newContents += '\n_moduleConfig={alias:{'
            newContents += '"' + match.group(2) + '":"' + getAmdFilePath(match.group(1)) + '"'
            firstLine = False
        else:
            newContents += "\n" + line

    newContents += '\n}};'


    #输出
    out = open(getEntryPointFilePath(pathJs), 'w')
    beautifier_options = jsbeautifier.BeautifierOptions()
    beautifier_options.indent_with_tabs = True
    out.write(jsbeautifier.beautify(newContents, beautifier_options))
Example #14
0
File: test.py Project: hex705/tiles
def run():
	base = open('Processing.hub').read()
	# find all the $sections
	sections = re.findall('\$\w+', base)
		
	subs = dict(zip(sections, [""]*len(sections)))
	print subs
	
	xml = ET.parse("Serial-Processing.xml")
	
	# apply the core elements to the template
	core = xml.find('core')
	for element in core:
		section = "$" + element.tag
		# remove extra newlines and whitespace
		content = element.text.strip()
		# set counters and addresses
		content = content.replace("$count", "")
		# add this code to the right section 
		if section in subs:
			subs[section] += content

	# substitute the section labels with the code 			
	for sub, code in subs.iteritems():
		base = base.replace(sub, code) 
	
	# format // prettify 
	result = jsbeautifier.beautify(base)
	print result
	def run(self, edit):
		settings = self.view.settings()

		# settings
		opts = jsbeautifier.default_options()
		opts.indent_char = " " if settings.get("translate_tabs_to_spaces") else "\t"
		opts.indent_size = int(settings.get("tab_size")) if opts.indent_char == " " else 1 
		opts.max_preserve_newlines = s.get("max_preserve_newlines") or 3
		opts.preserve_newlines = s.get("preserve_newlines") or True
		opts.jslint_happy = s.get("jslint_happy") or False
		opts.brace_style = s.get("brace_style") or "collapse"
		opts.s = s.get("keep_array_indentation") or False
		opts.indent_level = s.get("indent_level") or 0

		selection = self.view.sel()[0]
		nwsOffset = self.prev_non_whitespace()		

		# do formatting and replacement
		replaceRegion = selection if len(selection) > 0 else sublime.Region(0, self.view.size())
		res = jsbeautifier.beautify(self.view.substr(replaceRegion), opts)
		self.view.replace(edit, replaceRegion, res)

		# re-place cursor
		offset = self.get_nws_offset(nwsOffset, self.view.substr(sublime.Region(0, self.view.size())))
		rc = self.view.rowcol(offset)
		pt = self.view.text_point(rc[0], rc[1])
		sel = self.view.sel()
		sel.clear()
		self.view.sel().add(sublime.Region(pt))

		self.view.show_at_center(pt)
Example #16
0
def dev_norm():
	with open('data/' + my.DATA_FOLDER + 'X.json', 'rb') as fp:
		X = anyjson.loads(fp.read())

	AVG = [0]*len(X[0])
	for x in X:
		for i in range(len(x)):
			AVG[i] += x[i]
	for i in range(len(AVG)):
		AVG[i] /= len(X)

	X_ = []
	for x in X:
		for i in range(len(x)):
			x[i] -= AVG[i]
		
		mn = min([i for i in x if i < 0])
		if mn != 0:
			mn = mn*(-1)
		for i in range(len(x)):
			if x[i] != 0:
				#x[i] += mn
				x[i] = abs(x[i])

		X_.append(x)

	with open('data/' + my.DATA_FOLDER + 'X.json', 'wb') as fp:
		fp.write(jsb.beautify(anyjson.dumps(X_)))
Example #17
0
def PLAYVIDEO(url, name):
    progress.create('Play video', 'Searching videofile.')
    progress.update( 10, "", "Loading video page", "" )
    hosts = []
    videosource = getHtml(url, url)
    if re.search('videomega', videosource, re.DOTALL | re.IGNORECASE):
        hosts.append('VideoMega')
    if re.search('openload', videosource, re.DOTALL | re.IGNORECASE):
        hosts.append('OpenLoad')
    if re.search('www.flashx.tv', videosource, re.DOTALL | re.IGNORECASE):
        hosts.append('FlashX')        
    if len(hosts) == 0:
        progress.close()
        dialog.ok('Oh oh','Couldn\'t find any video')
        return
    elif len(hosts) > 1:
        if addon.getSetting("dontask") == "true":
            vidhost = hosts[0]            
        else:
            vh = dialog.select('Videohost:', hosts)
            vidhost = hosts[vh]
    else:
        vidhost = hosts[0]
    
    if vidhost == 'VideoMega':
        progress.update( 40, "", "Loading videomegatv", "" )
        if re.search("http://videomega.tv/iframe.js", videosource, re.DOTALL | re.IGNORECASE):
            hashref = re.compile("""javascript["']>ref=['"]([^'"]+)""", re.DOTALL | re.IGNORECASE).findall(videosource)
        else:
            hashkey = re.compile("""hashkey=([^"']+)""", re.DOTALL | re.IGNORECASE).findall(videosource)
            hashpage = getHtml('http://videomega.tv/validatehash.php?hashkey='+hashkey[0], url)
            hashref = re.compile('ref="([^"]+)', re.DOTALL | re.IGNORECASE).findall(hashpage)
        progress.update( 80, "", "Getting video file", "" )
        videopage = getHtml('http://videomega.tv/view.php?ref='+hashref[0], url)
        videourl = re.compile('<source src="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(videopage)
        videourl = videourl[0]
    elif vidhost == 'OpenLoad':
        progress.update( 40, "", "Loading Openload", "" )
        openloadurl = re.compile('<iframe src="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(videosource)
        openloadsrc = getHtml(openloadurl[0], url)
        videourl = re.compile('<source src="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(openloadsrc)
        progress.update( 80, "", "Getting video file", "" )
        openload302 = getVideoLink(videourl[0],openloadurl[0])
        realurl = openload302.replace('https://','http://')
        videourl = realurl + "|" + openloadurl[0]
    elif vidhost == 'FlashX':
        progress.update( 40, "", "Loading FlashX", "" )
        flashxurl = re.compile('<iframe src="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(videosource)
        flashxsrc = getHtml2(flashxurl[0])
        flashxjs = re.compile("<script type='text/javascript'>([^<]+)</sc", re.DOTALL | re.IGNORECASE).findall(flashxsrc)
        progress.update( 80, "", "Getting video file", "" )
        flashxujs = beautify(flashxjs[0])
        videourl = re.compile(r',.*file: "([^"]+)".*\}\],', re.DOTALL | re.IGNORECASE).findall(flashxujs)
        videourl = videourl[0]
    progress.close()
    iconimage = xbmc.getInfoImage("ListItem.Thumb")
    listitem = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
    listitem.setInfo('video', {'Title': name, 'Genre': 'P**n'})
    xbmc.Player().play(videourl, listitem)
Example #18
0
def get_code_via_id(root,id):
    selector = ''.join(('//code[@id','=','"',id,'"',']'))
    eles = root.xpath(selector)
    children = eles[0].getchildren()
    comment = children[0]
    rslt_dict = json.loads(comment.text)
    print(jb.beautify(comment.text))
    return(rslt_dict)
Example #19
0
File: app.py Project: zaibacu/wutu
 def wutu_js():
     if minify:
         from jsmin import jsmin
         jsdata = jsmin(get_data(api.jsstream))
     else:
         from jsbeautifier import beautify
         jsdata = beautify(get_data(api.jsstream))
     return Response(jsdata, mimetype="text/javascript")
Example #20
0
	def format_whole_file(self, edit, opts):
		view = self.view
		region = sublime.Region(0, view.size())
		code = view.substr(region)
		formatted_code = jsbeautifier.beautify(code, opts)
		_, err = merge_utils.merge_code(view, edit, code, formatted_code)
		if err:
			sublime.error_message("JsFormat: Merge failure: '%s'" % err)
    def prettystringjs(self, str):

        opts = jsbeautifier.default_options()
        opts.max_preserve_newlines = 1

        res = jsbeautifier.beautify(str)

        return res
def writeJsx(appdef, folder, app, progress):
    layers = appdef["Layers"]
    viewCrs = appdef["Settings"]["App view CRS"]
    mapbounds = bounds(appdef["Settings"]["Extent"] == "Canvas extent", layers, viewCrs)
    mapextent = "extent: %s," % mapbounds if appdef["Settings"]["Restrict to extent"] else ""
    maxZoom = int(appdef["Settings"]["Max zoom level"])
    minZoom = int(appdef["Settings"]["Min zoom level"])

    app.variables.append("var view = new ol.View({%s maxZoom: %d, minZoom: %d, projection: '%s'});" % (mapextent, maxZoom, minZoom, viewCrs))
    app.variables.append("var originalExtent = %s;" % mapbounds)

    permalink = str(appdef["Settings"]["Add permalink functionality"]).lower()

    logoImg = appdef["Settings"]["Logo"].strip()
    logoOption = ""
    if logoImg:
        ext = os.path.splitext(logoImg)[1]
        shutil.copyfile(logoImg, os.path.join(folder, "logo" + ext))
        logoOption = ', iconElementLeft: React.createElement("img", {className:"pull-left", style:{margin:"5px",height:"50px"}, src:"logo%s"})' % ext

    toolbarOptions = '{style:{height: 71}, title:"%s"%s}' % (appdef["Settings"]["Title"], logoOption)

    app.mappanels.append('''React.createElement("div", {id: 'popup', className: 'ol-popup'},
                                    React.createElement(InfoPopup, {map: map, hover: %s})
                                  )''' % str(appdef["Settings"]["Show popups on hover"]).lower())

    variables ="\n".join(app.variables)
    try:
        variables = jsbeautifier.beautify(variables)
    except:
        pass #jsbeautifier gives some random errors sometimes due to imports

    def join(array):
        if array:
            return ",\n" + ",\n".join(array)
        else:
            return ""
    values = {"@IMPORTS@": "\n".join(app.imports),
              "@TABS@": join(app.tabs),
                "@OL3CONTROLS@": ",\n".join(app.ol3controls),
                "@PANELS@": join(app.panels),
                "@MAPPANELS@": join(app.mappanels),
                "@TOOLBAR@": ",\n".join(app.tools),
                "@TOOLBAROPTIONS@": toolbarOptions,
                "@VARIABLES@": variables,
                "@POSTTARGETSET@": "\n".join(app.posttarget),
                "@PERMALINK@": permalink}

    template = os.path.join(os.path.dirname(__file__), "themes",
                            appdef["Settings"]["Theme"], "app.jsx")
    jsx = replaceInTemplate(template, values)

    name = "app.jsx"
    jsxFilepath = os.path.join(folder, name)
    with codecs.open(jsxFilepath, "w", encoding="utf-8") as f:
        f.write(jsx)
Example #23
0
def test_str(str, expected):
    global fails
    res = jsbeautifier.beautify(str, opts)
    if(res == expected):
        print(".")
        return True
    else:
        print("___got:" + res + "\n___expected:" + expected + "\n")
        fails = fails + 1
        return False
Example #24
0
def spatial_norm():
	with open('data/' + my.DATA_FOLDER + 'X.json', 'rb') as fp:
		X = anyjson.loads(fp.read())

	X_ = []
	for x in X:
		X_.append(_snorm(x))

	with open('data/' + my.DATA_FOLDER + 'X.json', 'wb') as fp:
		fp.write(jsb.beautify(anyjson.dumps(X_)))
Example #25
0
def plot_grid():
	'''Plot all geo-tagged tweets on map'''
	lat1, lng1, lat2, lng2 = my.BBOX
	xticks = np.arange(lng1, lng2, my.LNG_DELTA).tolist()
	xticks.append(lng2)
	print xticks
	yticks = np.arange(lat1, lat2 + my.LAT_DELTA, my.LAT_DELTA).tolist()

	fig=plt.figure(figsize=(18,13))
	fig.set_tight_layout(True)
	ax=fig.add_subplot(111)
	m = Basemap(llcrnrlon=lng1, llcrnrlat=lat1, 
				urcrnrlon=lng2, urcrnrlat=lat2,
				projection='mill')
	ax.set_xlim(lng1, lng2)
	ax.set_ylim(lat1, lat2)
	ax.set_xticks(xticks)
	ax.set_yticks(yticks)
	#ax.grid(ls='-')
	ax.grid(ls='--', lw=1.25)
	plt.setp(plt.xticks()[1], rotation=90)

	bg = matplotlib.image.imread('data/' + my.DATA_FOLDER + 'map.png')
	ax.imshow(bg, aspect='auto', extent=(lng1, lng2, lat1, lat2), alpha=0.9)

	plt.savefig('data/' + my.DATA_FOLDER + 'grid' + '.png')

	yticks.reverse()

	grid = {
		'bbox' : my.BBOX,
		'lat_delta' : my.LAT_DELTA,
		'lng_delta' : my.LNG_DELTA,
		'xticks' : [round(i, 3) for i in xticks],
		'yticks' : [round(i, 3) for i in yticks],
		'rows' : len(yticks) - 1,
		'columns' : len(xticks) - 1,
		'cells' : (len(yticks) - 1) * (len(xticks) - 1),
		'grid' : {},
		#'grid_lookup' : {}
	}

	i = 0
	for r in range(len(yticks) - 1):
		#grid['grid_lookup'][round(yticks[r+1], 3)] = {}
		for c in range(len(xticks) - 1):
			grid['grid'][i] = (	round(yticks[r+1], 3), 
								round(xticks[c], 3), 
								round(yticks[r], 3), 
								round(xticks[c+1], 3))
			#grid['grid_lookup'][round(yticks[r+1], 3)][round(xticks[c], 3)] = i
			i += 1	

	with open('data/' + my.DATA_FOLDER + 'grid.json', 'wb') as fp:
		fp.write(jsb.beautify(anyjson.dumps(grid)))
Example #26
0
    def eval(self, script):
        try:
            log.info(jsbeautifier.beautify(script))
        except:
            log.info(script) 
      
        if len(script) > 64: 
            log.warning("[Window] Eval argument length > 64 (%d)" % (len(script), ))

        log.ThugLogging.add_code_snippet(script, 'Javascript', 'Dynamically_Evaluated')
        return self.evalScript(script)
	def run(self, edit):
		opts = jsbeautifier.default_options();
		opts.indent_char = "\t"
		opts.indent_size = 1
		opts.max_preserve_newlines = 3
		selection = self.view.sel()[0]
		replaceRegion = selection if len(selection) > 0 else sublime.Region(0, self.view.size())
		res = jsbeautifier.beautify(self.view.substr(replaceRegion), opts)
		prePos = self.view.sel()[0]
		self.view.replace(edit, replaceRegion, res)
		self.view.show_at_center(prePos.begin())
Example #28
0
def male_X():
	''''''
	with open('data/' + my.DATA_FOLDER + 'dates.json', 'rb') as fp:
		dates = anyjson.loads(fp.read())
	path = 'data/' + my.DATA_FOLDER + 'grid_data/'

	X = []
	index = []

	for date in dates:
		with open(path + date + '.json', 'rb') as fp:
			data = anyjson.loads(fp.read())
			data = list(itertools.chain(*data))
			X.append(data)
			index.append(date)
	
	with open('data/' + my.DATA_FOLDER + 'X.json', 'wb') as fp:
		fp.write(jsb.beautify(anyjson.dumps(X)))
	with open('data/' + my.DATA_FOLDER + 'X_index.json', 'wb') as fp:
		fp.write(jsb.beautify(anyjson.dumps(index)))
Example #29
0
def jsbeautify(javascript):
    """Beautifies Javascript through jsbeautifier and ignore some messages."""
    with _jsbeautify_lock:
        origout, sys.stdout = sys.stdout, io.StringIO()
        javascript = jsbeautifier.beautify(javascript)

        if sys.stdout.getvalue() not in _jsbeautify_blacklist:
            log.warning("jsbeautifier returned error: %s", sys.stdout.getvalue())

        sys.stdout = origout
    return javascript
Example #30
0
 def _write(file, script):
     lines = jsbeautifier.beautify(script).splitlines()
     print('Writing "{0:s}" with {1:d} lines...'.format(file, len(lines)))
     try:
         with open(file, 'w') as ofile:
             for line in lines:
                 print(line, file=ofile)
     except OSError:
         raise SystemExit(
             sys.argv[0] + ': Cannot write "' + file +
             '" configuration file.')
Example #31
0
def general_code_analysis(paths):
    """Static Code Analysis"""
    try:
        scan_results = {}
        scan_rules = read_rules()
        security_issues = []
        good_finding = []
        missing_header = []
        all_files = []
        # Initializing Security Header flag as not present
        header_found = {}
        sec_issues_by_tag = {}
        good_finding_by_tag = {}
        missing_header_by_tag = {}
        vuln_n_count = {}
        count = {}
        # Sort By Tag
        tags = {'rce': 'Remote Command Execution',
                'rci': 'Remote Code Injection',
                'ssrf': 'Server Side Request Forgery',
                'module': 'Vulnerable Node Module',
                'node': 'Application Related',
                'web': 'Web Security',
                'dir': 'Directory Traversal',
                'opr': 'Open Redirect Vulnerability',
                'sqli': 'SQL Injection (SQLi)',
                'xss': 'Cross Site Scripting (XSS)',
                'nosqli': 'NoSQL Injection',
                'hhi': 'HTTP Header Injection',
                }
        for path in paths:
            for header in scan_rules["missing_sec_header"].iterkeys():
                header_found[header] = 0
            print "\n[INFO] Running Static Analyzer Running on - " + path + "\n"
            for root, _, files in os.walk(path):
                for filename in files:
                    full_file_path = os.path.join(root, filename)
                    relative_path = full_file_path.replace(path, "")
                    all_files.append({relative_path.replace(
                        "/", "", 1): full_file_path.replace(settings.UPLOAD_FOLDER, "", 1)})
                    data = is_valid_node(filename, full_file_path)
                    if data is not None:
                        # print relative_path
                        beautified_data = None
                        lines = data.splitlines()
                        if len(lines) <= 2:
                            # Possible Minified Single Line Code
                            try:
                                # Try Because of Possible Bug in JS Beautifier
                                beautified_data = jsbeautifier.beautify(data)
                                lines = beautified_data.splitlines()
                            except:
                                pass
                        for line_no, line in enumerate(lines):
                            # Avoid Comments - Multiline is a problem still
                            if not line.lstrip().startswith("//") and not line.lstrip().startswith("/*"):
                                # Limit the no of caracters in a line to 2000
                                line = line[0:2000]
                                # Vulnerability String Match
                                for rule_key in scan_rules["vuln_rules"].iterkeys():
                                    if scan_rules["vuln_rules"][rule_key] in line:
                                        finding = {}
                                        finding["title"] = rule_key
                                        finding["description"] = scan_rules[
                                            "desc"][rule_key]
                                        finding["tag"] = scan_rules[
                                            "tag"][rule_key]
                                        finding["line"] = line_no + 1
                                        finding["lines"] = get_lines(
                                            line_no, lines)
                                        finding["filename"] = filename
                                        finding["path"] = full_file_path.replace(
                                            settings.UPLOAD_FOLDER, "", 1)
                                        finding["sha2"] = utils.gen_sha256_hash(
                                            finding["lines"].encode(encoding="utf-8", errors="replace"))
                                        security_issues.append(finding)
                                # Vulnerability Regex Match
                                for regex in scan_rules["vuln_regex"].iterkeys():
                                    if re.search(scan_rules["vuln_regex"][regex], line):
                                        finding = {}
                                        finding["title"] = regex
                                        finding["description"] = scan_rules[
                                            "desc"][regex]
                                        finding["tag"] = scan_rules[
                                            "tag"][regex]
                                        finding["line"] = line_no + 1
                                        finding["lines"] = get_lines(
                                            line_no, lines)
                                        finding["filename"] = filename
                                        finding["path"] = full_file_path.replace(
                                            settings.UPLOAD_FOLDER, "", 1)
                                        finding["sha2"] = utils.gen_sha256_hash(
                                            finding["lines"].encode(encoding="utf-8", errors="replace"))
                                        security_issues.append(finding)
                                # Vulnerability Multi Regex Match
                                for mulregex in scan_rules["vuln_mul_regex"].iterkeys():
                                    sig_source = scan_rules["vuln_mul_regex"][
                                        mulregex]["sig_source"]
                                    sig_line = scan_rules["vuln_mul_regex"][
                                        mulregex]["sig_line"]
                                    if re.search(sig_source, data):
                                        if re.search(sig_line, line):
                                            finding = {}
                                            finding["title"] = mulregex
                                            finding["description"] = scan_rules[
                                                "desc"][mulregex]
                                            finding["tag"] = scan_rules[
                                                "tag"][mulregex]
                                            finding["line"] = line_no + 1
                                            finding["lines"] = get_lines(
                                                line_no, lines)
                                            finding["filename"] = filename
                                            finding["path"] = full_file_path.replace(
                                                settings.UPLOAD_FOLDER, "", 1)
                                            finding["sha2"] = utils.gen_sha256_hash(
                                                finding["lines"].encode(encoding="utf-8", errors="replace"))
                                            security_issues.append(finding)
                                # Dynamic Regex
                                for dynregex in scan_rules["vuln_dyn_regex"].iterkeys():
                                    signature = scan_rules["vuln_dyn_regex"][
                                        dynregex]["signature"]
                                    dyn_sig = scan_rules["vuln_dyn_regex"][
                                        dynregex]["dyn_sig"]
                                    sig = re.search(signature, line)
                                    if sig:
                                        index = line.find(sig.group())
                                        dyn_variable = line[0:index]
                                        dyn_variable = dyn_variable.replace(
                                            "var", "").replace("=", "").strip()
                                        if re.match(r'^[\w]+$', dyn_variable):
                                            dyn_sig = dyn_variable + dyn_sig
                                            for line_no, nline in enumerate(lines):
                                                if re.search(dyn_sig, nline):
                                                    finding = {}
                                                    finding["title"] = dynregex
                                                    finding["description"] = scan_rules[
                                                        "desc"][dynregex]
                                                    finding["tag"] = scan_rules[
                                                        "tag"][dynregex]
                                                    finding[
                                                        "line"] = line_no + 1
                                                    finding["lines"] = get_lines(
                                                        line_no, lines)
                                                    finding[
                                                        "filename"] = filename
                                                    finding[
                                                        "path"] = full_file_path.replace(settings.UPLOAD_FOLDER, "", 1)
                                                    finding["sha2"] = utils.gen_sha256_hash(
                                                        finding["lines"].encode(encoding="utf-8", errors="replace"))
                                                    security_issues.append(
                                                        finding)
                                # Good Finding String Match
                                for good_find in scan_rules["good_to_have_rgx"].iterkeys():
                                    if re.search(scan_rules["good_to_have_rgx"][good_find], line):
                                        finding = {}
                                        finding["title"] = good_find
                                        finding["description"] = scan_rules[
                                            "desc"][good_find]
                                        finding["tag"] = scan_rules[
                                            "tag"][good_find]
                                        finding["line"] = line_no + 1
                                        finding["lines"] = get_lines(
                                            line_no, lines)
                                        finding["filename"] = filename
                                        finding["path"] = full_file_path.replace(
                                            settings.UPLOAD_FOLDER, "", 1)
                                        finding["sha2"] = utils.gen_sha256_hash(
                                            finding["lines"].encode(encoding="utf-8", errors="replace"))
                                        good_finding.append(finding)
                                # Missing Security Headers String Match
                                for header in scan_rules["missing_sec_header"].iterkeys():
                                    if re.search(scan_rules["missing_sec_header"][header], line, re.I):
                                        # Good Header is present
                                        header_found[header] = 1
                        # Write Beautifed Data If Any
                        if beautified_data is not None:
                            utils.unicode_safe_file_write(
                                full_file_path, beautified_data)
        # After Every Files are done.
        # Check for missing Security Headers
        for header in header_found.iterkeys():
            if header_found[header] == 0:
                finding = {}
                finding["title"] = header
                finding["description"] = scan_rules["desc"][header]
                finding["tag"] = scan_rules["tag"][header]
                missing_header.append(finding)
        #Vulnerability and Count
        for issue in security_issues:
            if not issue["title"] in vuln_n_count:
                vuln_n_count[issue["title"]] = 0
            vuln_n_count[issue["title"]] += 1

        for issue in security_issues:
            if not tags[issue["tag"]] in sec_issues_by_tag:
                sec_issues_by_tag[tags[issue["tag"]]] = []
            sec_issues_by_tag[tags[issue["tag"]]].append(issue)

        for find in good_finding:
            if not tags[find["tag"]] in good_finding_by_tag:
                good_finding_by_tag[tags[find["tag"]]] = []
            good_finding_by_tag[tags[find["tag"]]].append(find)

        for sec_header in missing_header:
            if not tags[sec_header["tag"]] in missing_header_by_tag:
                missing_header_by_tag[tags[sec_header["tag"]]] = []
            missing_header_by_tag[tags[sec_header["tag"]]].append(sec_header)

        count["sec"] = len(security_issues)
        count["mis"] = len(missing_header)
        count["good"] = len(good_finding)

        scan_results["sec_issues"] = sec_issues_by_tag
        scan_results["good_finding"] = good_finding_by_tag
        scan_results["missing_sec_header"] = missing_header_by_tag
        scan_results["total_count"] = count
        scan_results["vuln_count"] = vuln_n_count
        scan_results["files"] = all_files
        return scan_results
    except:
        utils.print_exception("Error Performing Code Analysis")
Example #32
0
def ternaryToJson(filename,
                  jsonfile="",
                  outputfilepath="",
                  sheetname="Tie lines",
                  output="ZPF"):
    """
    To convert a spreadsheet file with phase information of ternary system to json file 
    
    filename: string, the location path of data file in csv or excel format.
    jsonfile: string, the final desired and saved json file 
    sheetname: string, the sheetname for differetn types of data. It includes "Unary phase", "Tie lines" 
    
    return: none
    """
    file_extension = os.path.splitext(filename)[1]
    if file_extension == '.csv':
        data = pd.read_csv(filename)
    elif file_extension == '.xls' or file_extension == '.xlsx':
        if sheetname == "default":
            sheetname = os.path.splitext(filename)[0]
        data = pd.read_excel(filename, sheet_name=sheetname)
    if "Components" in data.columns:
        components = list(data["Components"].dropna())
    else:
        raise NameError("Components are not in inputfile's columns names")
    #    elements_number = len(components)

    #    refs_content = ""
    comp1 = components[0]
    comp2 = components[1]

    options = jsbeautifier.default_options()
    options.indent_size = 4

    if sheetname == "Tie lines":
        if "Ref" in data.columns and (not data["Ref"].isnull().values.all()):
            Refs = data["Ref"].unique()
            for ref in Refs:
                df_ref = data[data["Ref"] == ref]
                Temps = list(df_ref["T/K"])
                if "Phase 3" in df_ref.columns:
                    phases = list(
                        np.unique(
                            np.concatenate([
                                df_ref["Phase 1"].dropna().unique(),
                                df_ref["Phase 2"].dropna().unique(),
                                df_ref["Phase 3"].dropna().unique()
                            ])))
                else:
                    phases = list(
                        np.unique(
                            np.concatenate([
                                df_ref["Phase 1"].dropna().unique(),
                                df_ref["Phase 2"].dropna().unique()
                            ])))
                if jsonfile == "":
                    jsonfile_list = components + [output
                                                  ] + phases + [ref + ".json"]
                    jsonfile_name = createFileName(jsonfile_list)
                    filepath = os.path.join(outputfilepath, jsonfile_name)
                    if not os.path.exists(filepath):  # create a json file
                        # os.makedirs(path1)
                        open(filepath, 'w')
                else:
                    filepath = jsonfile

                phase1_comp1 = comp1 + "_" + "1"
                phase1_comp2 = comp2 + "_" + "1"
                phase2_comp1 = comp1 + "_" + "2"
                phase2_comp2 = comp2 + "_" + "2"
                phase3_comp1 = comp1 + "_" + "3"
                phase3_comp2 = comp2 + "_" + "3"
                phase_data = []
                for i in df_ref.index:
                    phase1 = [
                        df_ref["Phase 1"][i], [comp1, comp2],
                        [df_ref[phase1_comp1][i], df_ref[phase1_comp2][i]]
                    ]
                    phase2 = [
                        df_ref["Phase 2"][i], [comp1, comp2],
                        [df_ref[phase2_comp1][i], df_ref[phase2_comp2][i]]
                    ]
                    if not pd.isna(df_ref["Phase 3"][i]):
                        phase3 = [
                            df_ref["Phase 3"][i], [comp1, comp2],
                            [df_ref[phase3_comp1][i], df_ref[phase3_comp2][i]]
                        ]
                        phase_data.append([phase1, phase2, phase3])
                    else:
                        phase_data.append([phase1, phase2])
                data_dict = {
                    "components": components,
                    "phases": phases,
                    "broadcast_conditions": False,
                    "conditions": {
                        "T": Temps,
                        "P": [101325]
                    },
                    "output": output,
                    "values": phase_data,
                    "reference": ref,
                    "comment": ""
                }
                #                json_object = json.dumps(data_dict, indent = 4)
                json_object = jsbeautifier.beautify(json.dumps(data_dict),
                                                    options)
                # write jsonfile
                with open(filepath, 'w') as outfile:
                    outfile.write(json_object)
Example #33
0
    def format(self, text):
        text = text.decode("utf-8")
        opts = self.formatter.settings.get('codeformatter_js_options')

        stderr = ""
        stdout = ""
        options = jsbeautifier.default_options()

        if ("indent_size" in self.opts and self.opts["indent_size"]):
            options.indent_size = self.opts["indent_size"]
        else:
            options.indent_size = 4

        if ("indent_char" in self.opts and self.opts["indent_char"]):
            options.indent_char = str(self.opts["indent_char"])
        else:
            options.indent_char = " "

        if ("indent_with_tabs" in self.opts and self.opts["indent_with_tabs"]):
            options.indent_with_tabs = True
        else:
            options.indent_with_tabs = False

        if ("eol" in self.opts and self.opts["eol"]):
            options.eol = self.opts["eol"]
        else:
            options.eol = "\n"

        if ("preserve_newlines" in self.opts
                and self.opts["preserve_newlines"]):
            options.preserve_newlines = True
        else:
            options.preserve_newlines = False

        if ("max_preserve_newlines" in self.opts
                and self.opts["max_preserve_newlines"]):
            options.max_preserve_newlines = self.opts["max_preserve_newlines"]
        else:
            options.max_preserve_newlines = 10

        if ("space_in_paren" in self.opts and self.opts["space_in_paren"]):
            options.space_in_paren = True
        else:
            options.space_in_paren = False

        if ("space_in_empty_paren" in self.opts
                and self.opts["space_in_empty_paren"]):
            options.space_in_empty_paren = True
        else:
            options.space_in_empty_paren = False

        if ("e4x" in self.opts and self.opts["e4x"]):
            options.e4x = True
        else:
            options.e4x = False

        if ("jslint_happy" in self.opts and self.opts["jslint_happy"]):
            options.jslint_happy = True
        else:
            options.jslint_happy = False

        if ("brace_style" in self.opts and self.opts["brace_style"]):
            options.brace_style = self.opts["brace_style"]
        else:
            options.brace_style = 'collapse'

        if ("keep_array_indentation" in self.opts
                and self.opts["keep_array_indentation"]):
            options.keep_array_indentation = True
        else:
            options.keep_array_indentation = False

        if ("keep_function_indentation" in self.opts
                and self.opts["keep_function_indentation"]):
            options.keep_function_indentation = True
        else:
            options.keep_function_indentation = False

        if ("eval_code" in self.opts and self.opts["eval_code"]):
            options.eval_code = True
        else:
            options.eval_code = False

        if ("unescape_strings" in self.opts and self.opts["unescape_strings"]):
            options.unescape_strings = True
        else:
            options.unescape_strings = False

        if ("wrap_line_length" in self.opts and self.opts["wrap_line_length"]):
            options.wrap_line_length = self.opts["wrap_line_length"]
        else:
            options.wrap_line_length = 0

        if ("break_chained_methods" in self.opts
                and self.opts["break_chained_methods"]):
            options.break_chained_methods = True
        else:
            options.break_chained_methods = False

        if ("end_with_newline" in self.opts and self.opts["end_with_newline"]):
            options.end_with_newline = True
        else:
            options.end_with_newline = False

        if ("comma_first" in self.opts and self.opts["comma_first"]):
            options.comma_first = True
        else:
            options.comma_first = False

        if ("space_after_anon_function" in self.opts
                and self.opts["space_after_anon_function"]):
            options.space_after_anon_function = True
        else:
            options.space_after_anon_function = False

        try:
            stdout = jsbeautifier.beautify(text, options)
        except Exception as e:
            stderr = str(e)

        #return "", ""

        if (not stderr and not stdout):
            stderr = "Formatting error!"

        return stdout, stderr
Example #34
0
 def save(self, *args, **kwargs):
     self.service_js = jsbeautifier.beautify(self.service_js)
     super(DjangularService, self).save(*args, **kwargs)
Example #35
0
 def save(self, *args, **kwargs):
     self.controller_js = jsbeautifier.beautify(self.controller_js)
     super(DjangularSlaveViewController, self).save(*args, **kwargs)
 def run(self, text):
     import jsbeautifier
     return jsbeautifier.beautify(text)
Example #37
0
# TODO wrap this up nicely

scraper = PokeScrapr()


output = []

with open('pokemon.txt') as f:
	for line in f.readlines()[:]:
		pokemon = line.strip().capitalize()
		print(pokemon)
		
		try:
			js = scraper.get_FSP_JSON(pokemon)
			print(js)
			output.append(js)
			time.sleep(3)

		except:
			print("error - ", pokemon, ". sleeping for 5 sec")
			time.sleep(5)

			scraper = PokeScrapr()
			js = scraper.get_FSP_JSON(pokemon)
			print(js)
			output.append(js)	

with open('pokemon-js.js', 'w') as f:
	json = jsbeautifier.beautify(','.join(output))
	f.write(json)
Example #38
0
def analyseJS(code, context=None, manualAnalysis=False):
    '''
        Hooks the eval function and search for obfuscated elements in the Javascript code
        
        @param code: The Javascript code (string)
        @return: List with analysis information of the Javascript code: [JSCode,unescapedBytes,urlsFound,errors,context], where 
                JSCode is a list with the several stages Javascript code,
                unescapedBytes is a list with the parameters of unescape functions, 
                urlsFound is a list with the URLs found in the unescaped bytes,
                errors is a list of errors,
                context is the context of execution of the Javascript code.
    '''
    errors = []
    JSCode = []
    unescapedBytes = []
    urlsFound = []

    try:
        code = unescapeHTMLEntities(code)
        scriptElements = re.findall(reJSscript, code,
                                    re.DOTALL | re.IGNORECASE)
        if scriptElements != []:
            code = ''
            for scriptElement in scriptElements:
                code += scriptElement + '\n\n'
        code = jsbeautifier.beautify(code)
        JSCode.append(code)

        if code != None and JS_MODULE and not manualAnalysis:
            if context == None:
                context = PyV8.JSContext(Global())
            context.enter()
            # Hooking the eval function
            context.eval('eval=evalOverride')
            #context.eval(preDefinedCode)
            while True:
                originalCode = code
                try:
                    context.eval(code)
                    evalCode = context.eval('evalCode')
                    evalCode = jsbeautifier.beautify(evalCode)
                    if evalCode != '' and evalCode != code:
                        code = evalCode
                        JSCode.append(code)
                    else:
                        break
                except:
                    error = str(sys.exc_info()[1])
                    open('jserror.log', 'ab').write(error + newLine)
                    errors.append(error)
                    break

            if False:
                escapedVars = re.findall('(\w*?)\s*?=\s*?(unescape\((.*?)\))',
                                         code, re.DOTALL)
                for var in escapedVars:
                    bytes = var[2]
                    if bytes.find('+') != -1 or bytes.find('%') == -1:
                        varContent = getVarContent(code, bytes)
                        if len(varContent) > 150:
                            ret = unescape(varContent)
                            if ret[0] != -1:
                                bytes = ret[1]
                                urls = re.findall('https?://.*$', bytes,
                                                  re.DOTALL)
                                if bytes not in unescapedBytes:
                                    unescapedBytes.append(bytes)
                                for url in urls:
                                    if url not in urlsFound:
                                        urlsFound.append(url)
                    else:
                        bytes = bytes[1:-1]
                        if len(bytes) > 150:
                            ret = unescape(bytes)
                            if ret[0] != -1:
                                bytes = ret[1]
                                urls = re.findall('https?://.*$', bytes,
                                                  re.DOTALL)
                                if bytes not in unescapedBytes:
                                    unescapedBytes.append(bytes)
                                for url in urls:
                                    if url not in urlsFound:
                                        urlsFound.append(url)
    except:
        traceback.print_exc(file=open(errorsFile, 'a'))
        errors.append('Unexpected error in the JSAnalysis module!!')
    finally:
        for js in JSCode:
            if js == None or js == '':
                JSCode.remove(js)
    return [JSCode, unescapedBytes, urlsFound, errors, context]
Example #39
0
import jsbeautifier

# sample_product = 'https://www.unisport.dk/api/products/batch/?list=189214'
products_men = 'https://www.unisport.dk/api/products/batch/?list=189214,188894,188896,187812,187743,174635,189187,188534,193539,189413,189188,188532,194477,187378,187404,193592,192837,193593,189052,189213,187361,193537,187738,187744,187424,187746,185074,184775,191599,187416,189212,186121,187814,193459,187813,181393,184771,187793,187717,181372,187745,181382,187425,181272,188914,193455,187431,188893,187373,187845,189032,187795,187718,187398,189216,187719,187417,188895,188571,189211,188940,193480,187739,188897,187741,187947,187349,188903,187720,187815,188918,187749,187796,188900,185078,187391,188735,188919,188924,184774,189057,181361,188569,187411,181332,187721,187354,187844,188961,188933,189215,187794,187945,188916,193466,181289,189056,181407,188907,187740,187341,188959,187377,188932,187810,187366,193468,187946,187405,187724,188819,179845,193600,187722,188970,187723,187798,188948,107,186046,188909,193573,187809,188926,188908,193446,188965,188930,187978,186043,189047,187452,188904,187750,180206,189054,187375,193465,188530,187799,187830,188734,188902,188928,188535,187453,187785,179854,187797,181378,189034,188921,187433,188832,187811,193536,192799,188574,181295,188901,187426,184744,193541,186045,188528,179351,193577,181265,193574,187933,184737,187808,141313,188934,189217,188527,185069,193599,189045,188548,187932,187470,188915,188823,179841,187367,186054,188830,185072,193575,187756,184769,188905,181399,193443,181271,184847,187800,188581,187342'
producs_kids = 'https://www.unisport.dk/api/products/batch/?list=193974,193976,187411,187452,187412,187426,193595,187427,187434,187457,187428,193596,187413,193594,187447,187437,193589,193587,187430,187414,187449,193585,187439,193588,187429,187415,193586,187448,187441,195867,195869,195877,195873,193541,193540,193542,193479,193473,193476,193469,193477,193472,193474,193471,193475,193470,193478,189217,189033,187341,186087,187933,187830,186092,189034,186090,186076,189035,189037,187777,187772,187774,189056,187847,187453,187939,187470,187755,187473,187400,189046,187776,187941,187947,187845,186116,187749,186118,186117,189047,187376,187454,187471,187843,187944,187751,189048,187474,187773,192056,189059,192052,192508,192344,189060,189061,189062,188554,188930,186089,188926,188931,188927,188835,188932,188933,188836,188934,188928,188552,188935,188929,188936,188553,188937,188837,188950,188969,188951,188972,188952,186075,188948,188970,188973,188971,188949,188954,188974,188925,187851,187932,187936,187849,188828,187832,187771,187935,187937,187753,187945,187942,186086,186072,186103,186091,186088,186115,188587,188596,188598,188588,188599,188589,188597,187852,185079,185081,185080,184777,187850,187931,187342,187848,187841,187934,184780,187831,187770,187840,184778,187846,187938,184779,187754,184781,187775,187837,187940,187747,184760,187844,187946,187750,184758,187838,187842,184755,184757,187943,187752,184759,180230,180187,180190,184714,179351,179355,179457,179453'
products_women = 'https://www.unisport.dk/api/products/batch/?list=193957,193974,193966,193969,193963,193960,193976,196165,196161,186096,186048,187404,187416,193592,187411,187424,187452,187431,193574,193575,187418,187432,187405,186121,187417,187412,187426,193854,193593,193595,187425,193852,187427,187433,187434,187468,193573,187829,187406,187423,193855,193572,193853,187435,193576,187420,187407,193579,187443,187442,193571,193581,187456,187408,193591,187419,187457,187428,187413,193590,187469,193594,187447,187436,193584,193589,187475,193582,193583,187476,187421,187430,187422,193578,187410,193588,187429,187415,187445,193570,193586,187448,187440,187441,195875,195863,195867,193537,193541,187361,193465,193466,193539,182529,193443,189601,193455,193468,193462,193536,193540,193538,187354,193542,193453,193479,193467,193464,193447,193469,193461,193477,193450,193472,193454,193474,193448,193470,193460,193478,188530,189213,189217,189211,187812,189212,189033,187743,187341,189626,187793,189625,186046,187810,186087,187933,186043,189214,187738,186095,187830,189636,189638,194239,186068,186061,186071,189216,186064,186092,189034,186090,186051,186076,189215,187762,187378,189035,189037,187797,187781,187777,187772,187380,189036,187788,187774,189629,189052,189605,189056,182521,189604,182525,189057,182523,182527,187807,187709,187458,187768,187472,187780,187355,187401,187367,187802,187712,187966,187847,187453,187939,187470,189054,187765,187399,189044,187377,187755,187473'

urls = {}
# urls['sample_product'] = sample_product
urls['products_men'] = products_men
urls['producs_kids'] = producs_kids
urls['products_women'] = products_women

for name, url in urls.items():

    print('Getting {products}'.format(products=name))
    response = requests.get(url)

    # converting response to json object
    data = json.dumps(response.json())
    data = jsbeautifier.beautify(data)

    # storing json data
    filename = 'www/webshop/{name}.json'.format(name=name)
    print('Saving {products} to {filename}'.format(products=name,
                                                   filename=filename))
    with open(filename, "w+") as file:
        file.writelines(data)

    print('..done!\n')
Example #40
0
import requests
import jsbeautifier
import json

headers = {
    "User-Agent":
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/48.0.2564.82 Chrome/48.0.2564.82 Safari/537.36"
}
"""
from 0 to 10 and when using 10 date is 2015-11-17
So for last 6 months issues use upto 10
"""
base_url = 'http://www.ichangemycity.com/card/get_complaints/2?agency=0&category_id=0&lat=12.9715987&lon=77.59456269999998&status_id=0&sub_category=67'
html = requests.get(base_url, headers=headers)
htmltext = html.text
res = jsbeautifier.beautify(htmltext)
data = json.loads(htmltext)
print data['status']
length_result = len(data['result'])
print length_result
for i in range(0, length_result):
    print data['result'][i]['complaint_created']
    print data['result'][i]['complaint_title']
    print data['result'][i]['complaint_description']
    print data['result'][i]['complaint_location']
    print data['result'][i]['complaint_address_1']
    print data['result'][i]['category_name']
    print data['result'][i]['ward_name']
    print data['result'][i]['complaint_source_id']
    print data['result'][i]['civic_agency_name']
#print res
Example #41
0
 def save_file(directory=os.path.join(os.getcwd(),'jutility.js')):
     f = open(directory, 'w+')
     #f.write(str(s))
     f.write(jsbeautifier.beautify(s))
     f.close()
Example #42
0
    async def pre_ast(self):

        while not self.target_queue.empty():

            fileext = self.target_queue.get()

            if not self.lan:
                break

            if fileext[0] in ext_dict['php'] and 'php' in self.lan:
                # 下面是对于php文件的处理逻辑
                for filepath in fileext[1]['list']:
                    all_nodes = []

                    filepath = self.get_path(filepath)
                    self.pre_result[filepath] = {}
                    self.pre_result[filepath]['language'] = 'php'
                    self.pre_result[filepath]['ast_nodes'] = []

                    fi = codecs.open(filepath, "r", encoding='utf-8', errors='ignore')
                    code_content = fi.read()
                    fi.close()

                    # self.pre_result[filepath]['content'] = code_content

                    try:
                        parser = make_parser()
                        all_nodes = parser.parse(code_content, debug=False, lexer=lexer.clone(), tracking=True)

                        # 合并字典
                        self.pre_result[filepath]['ast_nodes'] = all_nodes

                    except SyntaxError as e:
                        logger.warning('[AST] [ERROR] parser {}: {}'.format(filepath, traceback.format_exc()))

                    except AssertionError as e:
                        logger.warning('[AST] [ERROR] parser {}: {}'.format(filepath, traceback.format_exc()))

                    except:
                        logger.warning('[AST] something error, {}'.format(traceback.format_exc()))

                    # 搜索所有的常量
                    for node in all_nodes:
                        if isinstance(node, php.FunctionCall) and node.name == "define":
                            define_params = node.params
                            logger.debug(
                                "[AST][Pretreatment] new define {}={}".format(define_params[0].node,
                                                                              define_params[1].node))

                            self.define_dict[define_params[0].node] = define_params[1].node

            elif fileext[0] in ext_dict['chromeext'] and 'chromeext' in self.lan:

                # 针对chrome 拓展的预处理
                # 需要提取其中的js和html?
                for filepath in fileext[1]['list']:
                    child_files = []
                    child_files_html = []

                    filepath = self.get_path(filepath)
                    self.pre_result[filepath] = {}
                    self.pre_result[filepath]['language'] = 'chromeext'

                    # 首先想办法解压crx
                    try:
                        target_files_path = un_zip(filepath)
                        self.pre_result[filepath]['target_files_path'] = target_files_path

                    except zipfile.BadZipFile:
                        logger.warning("[Pretreatment][Chrome Ext] file {} not zip".format(filepath))
                        continue

                    except OSError:
                        logger.warning("[Pretreatment][Chrome Ext] file {} unzip error".format(filepath))
                        continue

                    # 分析manifest.json
                    manifest_path = os.path.join(target_files_path, "manifest.json")

                    # target可能是单个文件,这里需要专门处理
                    if not (self.target_directory.endswith("/") or self.target_directory.endswith("\\")) and not os.path.isdir(self.target_directory):

                        path_list = re.split(r'[\\|/]', self.target_directory)
                        relative_path = os.path.join(path_list[-1]+"_files")
                    else:
                        relative_path = target_files_path.split(self.target_directory)[-1]

                    if relative_path.startswith('\\') or relative_path.startswith("/"):
                        relative_path = relative_path[1:]

                    if os.path.isfile(manifest_path):
                        fi = codecs.open(manifest_path, "r", encoding='utf-8', errors='ignore')
                        manifest_content = fi.read()
                        fi.close()

                        try:
                            manifest = json.loads(manifest_content, encoding='utf-8')

                        except json.decoder.JSONDecodeError:
                            logger.warning(
                                "[Pretreatment][Chrome Ext] File {} parse error...".format(target_files_path))
                            continue

                        self.pre_result[filepath]["manifest"] = manifest

                        # 想办法优化,如果不想深入js和html的判断,那么就跳过
                        if len(self.lan) and self.lan == 'chromeext':
                            logger.debug("[Pretreatment][Chrome Ext] pass js & html scan...")
                            continue

                        # content scripts
                        if "content_scripts" in manifest:
                            for script in manifest["content_scripts"]:
                                if "js" in script:
                                    child_files.extend([os.path.join(relative_path, js) for js in script['js']])

                        # background js
                        if "background" in manifest:
                            if "scripts" in manifest["background"]:
                                child_files.extend([os.path.join(relative_path, js) for js in manifest["background"]["scripts"]])

                            # background html
                            if "page" in manifest["background"]:
                                child_files_html.append(os.path.join(relative_path, manifest["background"]["page"]))

                        # popup.html
                        if "browser_action" in manifest:
                            if "default_popup" in manifest["browser_action"]:
                                child_files_html.append(os.path.join(relative_path, manifest["browser_action"]["default_popup"]))

                        # web_accessible_resources
                        if "web_accessible_resources" in manifest:
                            for resource in manifest["web_accessible_resources"]:
                                if ".js" in resource:
                                    child_files.append(os.path.join(relative_path, resource))

                                if ".html" in resource:
                                    child_files_html.append(os.path.join(relative_path, resource))

                        # chrome_url_overrides
                        if "chrome_url_overrides" in manifest:
                            for key in manifest["chrome_url_overrides"]:
                                child_files_html.append(os.path.join(relative_path, manifest["chrome_url_overrides"][key]))

                        self.pre_result[filepath]["child_files"] = child_files

                        if len(child_files):
                            # 将content_scripts加入到文件列表中构造
                            self.target_queue.put(('.js', {'count': len(child_files), 'list': child_files}))

                            # 通过浅复制操作外部传入的files
                            self.file_list.append(('.js', {'count': len(child_files), 'list': child_files}))

                        if len(child_files_html):
                            self.target_queue.put(('.html', {'count': len(child_files_html), 'list': child_files_html}))

                    else:
                        logger.warning("[Pretreatment][Chrome Ext] File {} parse error...".format(target_files_path))
                        continue

            elif fileext[0] in ext_dict['html'] and 'javascript' in self.lan:
                # html only found js
                for filepath in fileext[1]['list']:
                    filepath = self.get_path(filepath)
                    script_list = []

                    try:
                        fi = codecs.open(filepath, "r", encoding='utf-8', errors='ignore')
                        code_content = fi.read()
                        fi.close()

                    except FileNotFoundError:
                        continue

                    except OSError:
                        continue

                    # tmp.js save all inline javascript code
                    tmp_path = os.path.join(os.path.dirname(filepath), "tmp.js")
                    fi2 = codecs.open(tmp_path, "a", encoding='utf-8', errors='ignore')

                    try:
                        soup = BeautifulSoup(code_content, "html.parser")

                        script_tag_list = soup.find_all('script')

                        for script_tag in script_tag_list:
                            script_attrs = script_tag.attrs

                            if 'src' in script_attrs:
                                parents_path = os.path.normpath("\\".join(re.split(r'[\\|/]', filepath)[:-1]))

                                script_path = os.path.join(parents_path, script_attrs['src'])
                                script_list.append(script_path)

                            else:
                                # 如果没有src,那么代表是内联js
                                script_content = script_tag.string

                                fi2.write(" \n{}\n ".format(script_content))

                        fi2.close()
                        if tmp_path not in script_list:
                            script_list.append(tmp_path)

                        # 将content_scripts加入到文件列表中构造
                        self.target_queue.put(('.js', {'count': len(script_list), 'list': script_list}))

                        # 通过浅复制操作外部传入的files
                        self.file_list.append(('.js', {'count': len(script_list), 'list': script_list}))

                    except:
                        logger.warning('[AST] something error, {}'.format(traceback.format_exc()))
                        continue

            elif fileext[0] in ext_dict['javascript'] and 'javascript' in self.lan:

                # 针对javascript的预处理
                # 需要对js做语义分析
                for filepath in fileext[1]['list']:
                    filepath = self.get_path(filepath)

                    if not filepath.endswith(".js"):
                        continue

                    self.pre_result[filepath] = {}
                    self.pre_result[filepath]['language'] = 'javascript'
                    self.pre_result[filepath]['ast_nodes'] = []

                    try:
                        fi = codecs.open(filepath, "r", encoding='utf-8', errors='ignore')
                        code_content = fi.read()
                        fi.close()

                    except FileNotFoundError:
                        continue

                    except OSError:
                        continue

                    # 添加代码美化并且写入新文件
                    new_filepath = filepath + ".pretty"

                    try:

                        if not os.path.isfile(new_filepath):
                            fi2 = codecs.open(new_filepath, "w", encoding='utf-8', errors='ignore')
                            code_content = jsbeautifier.beautify(code_content)
                            fi2.write(code_content)
                            fi2.close()

                        # self.pre_result[filepath]['content'] = code_content

                        all_nodes = esprima.parse(code_content, {"loc": True})

                        # 合并字典
                        self.pre_result[filepath]['ast_nodes'] = all_nodes

                    except SyntaxError as e:
                        logger.warning('[AST] [ERROR] parser {}: {}'.format(filepath, traceback.format_exc()))

                    except AssertionError as e:
                        logger.warning('[AST] [ERROR] parser {}: {}'.format(filepath, traceback.format_exc()))

                    except esprima.error_handler.Error:
                        logger.warning('[AST] [ERROR] Invalid regular expression in {}...'.format(filepath))

                    except KeyboardInterrupt:
                        logger.log('[AST stop...')
                        exit()

                    except:
                        logger.warning('[AST] something error, {}'.format(traceback.format_exc()))
                        continue

            # 手动回收?
            gc.collect()

        return True
Example #43
0
                have_stop = True
        if not have_stop and c in cpnet_vocab:
            prune_qc.append(c)

    ac = item["ac"]
    prune_ac = []
    for c in ac:
        if c[-2:] == "er" and c[:-2] in ac:
            continue
        if c[-1:] == "e" and c[:-1] in ac:
            continue
        all_stop = True
        for t in c.split("_"):
            if t not in nltk_stopwords:
                all_stop = False
        if not all_stop and c in cpnet_vocab:
            prune_ac.append(c)

    item["qc"] = prune_qc
    item["ac"] = prune_ac

    prune_data.append(item)

import jsbeautifier
opts = jsbeautifier.default_options()
opts.indent_size = 2

with open(sys.argv[1], 'w') as fp:
    # json.dump(prune_data, f)
    fp.write(jsbeautifier.beautify(json.dumps(prune_data), opts))
Example #44
0
 def decodesto(self, input, expectation=None):
     self.assertEqual(jsbeautifier.beautify(input, self.options),
                      expectation or input)
import jsbeautifier
import clipboard
import pyautogui

target = clipboard.paste()

opts = jsbeautifier.default_options()
opts.type = "html"

res = jsbeautifier.beautify(target, opts)

clipboard.copy(res)
Example #46
0
from framescript.Lexer import Lexer
from framescript.Parser import Parser
from framescript.Transpiler import Transpiler

import sys
import jsbeautifier


if __name__ == '__main__':
    lexer = Lexer(open(sys.argv[1]).read())
    parser = Parser(lexer)
    transpiler = Transpiler()

    tree = parser.parse()

    x = transpiler.visit(tree)
    y = transpiler.finalize()

    print(y)
    print('<script>')
    print(jsbeautifier.beautify(x))
    print('</script>')
Example #47
0
 def save(self, *args, **kwargs):
     self.directive_js = jsbeautifier.beautify(self.directive_js)
     super(DjangularDirective, self).save(*args, **kwargs)
Example #48
0
 def beautifyJS(self, content):
     return jsbeautifier.beautify(content)
Example #49
0
    def save(self, *args, **kwargs):
        self.module_js = jsbeautifier.beautify(self.module_js)
        self.controller_js = jsbeautifier.beautify(self.controller_js)

        super(DjangularMasterViewController, self).save(*args, **kwargs)
Example #50
0
def beautifier_test_underscore():
    jsbeautifier.beautify(data, options)
def pret(code):

    res = jsbeautifier.beautify(code)
    return res
Example #52
0
def beautifier_test_underscore_min():
    jsbeautifier.beautify(data_min, options)
Example #53
0
 def generate(self, beautify=True):
     code = self.template + \
         super(JSGenerator, self).generate(self.get_menu_func)
     if beautify:
         code = jsbeautifier.beautify(code)
     return code
Example #54
0
baseUrl = 'http://127.0.0.1:2330/project/public'

log.info(
    " ================================ 收藏测试 ================================ ")
print()

stopwatch = Stopwatch()
stopwatch.start()

try:

    res = requests.post(baseUrl + "/auth/nonce",
                        data={},
                        allow_redirects=False)
    jsonRaw = jsbeautifier.beautify(res.text)
    #log.response(jsonRaw, 1)
    jsonObj = json.loads(jsonRaw)
    email = '*****@*****.**'
    password = '******'
    nonce = jsonObj['data']['nonce']
    sign = sha1(nonce + email + sha1("moeje" + password))
    log.info("sign: " + sign)
    res = requests.post(baseUrl + "/auth/login/email",
                        data={
                            'email': email,
                            'nonce': nonce,
                            'sign': sign
                        },
                        allow_redirects=False)
    jsonRaw = jsbeautifier.beautify(res.text)
Example #55
0
def binaryToJson(filename,
                 jsonfile="",
                 outputfilepath="",
                 sheetname="default",
                 output="ZPF"):
    """
    filename: string
    path: string
    
    return: none
    """
    file_extension = os.path.splitext(filename)[1]
    if file_extension == '.csv':
        data = pd.read_csv(filename)
    elif file_extension == '.xls' or file_extension == '.xlsx':
        if sheetname == "default":
            sheetname = os.path.splitext(filename)[0]
        data = pd.read_excel(filename, sheet_name=sheetname)
    if "Components" in data.columns:
        components = list(data["Components"].dropna())
    else:
        raise NameError("Components are not in inputfile's columns names")

    options = jsbeautifier.default_options()
    options.indent_size = 4

    if sheetname == "Tie lines":
        if "Ref" in data.columns and (not data["Ref"].isnull().values.all()):
            Refs = data["Ref"].unique()
            for ref in Refs:
                df_ref = data[data["Ref"] == ref]
                Temps = list(df_ref["T/K"])
                if "Phase 3" in df_ref.columns:
                    phases = list(
                        np.unique(
                            np.concatenate([
                                df_ref["Phase 1"].dropna().unique(),
                                df_ref["Phase 2"].dropna().unique(),
                                df_ref["Phase 3"].dropna().unique()
                            ])))
                else:
                    phases = list(
                        np.unique(
                            np.concatenate([
                                df_ref["Phase 1"].dropna().unique(),
                                df_ref["Phase 2"].dropna().unique()
                            ])))
                if jsonfile == "":
                    jsonfile_list = components + [output
                                                  ] + phases + [ref + ".json"]
                    jsonfile_name = createFileName(jsonfile_list)
                    filepath = os.path.join(outputfilepath, jsonfile_name)
                    if not os.path.exists(filepath):  # create a json file
                        # os.makedirs(path1)
                        open(filepath, 'w')
                else:
                    filepath = jsonfile
                comp1 = components[0]
                phase1_comp1 = comp1 + "_" + "1"
                phase2_comp1 = comp1 + "_" + "2"

                phase_data = []
                for i in df_ref.index:
                    phase1 = [
                        df_ref["Phase 1"][i], [comp1],
                        [df_ref[phase1_comp1][i]]
                    ]
                    phase2 = [
                        df_ref["Phase 2"][i], [comp1],
                        [df_ref[phase2_comp1][i]]
                    ]
                    phase_data.append([phase1, phase2])
                    data_dict = {
                        "components": components,
                        "phases": phases,
                        "broadcast_conditions": False,
                        "conditions": {
                            "T": Temps,
                            "P": [101325]
                        },
                        "output": output,
                        "values": phase_data,
                        "reference": ref,
                        "comment": ""
                    }

                json_object = jsbeautifier.beautify(json.dumps(data_dict),
                                                    options)
                with open(filepath, 'w') as outfile:
                    outfile.write(json_object)
Example #56
0
import os
import re
from lxml import html
import requests
import jsbeautifier
response = requests.get('http://app.youneedabudget.com')
with open('index.html', 'w', encoding='utf-8') as file_before:
    file_before.write(response.text)
parsed = html.fromstring(response.text)
for src in parsed.xpath('//script/@src'):
    url_src = str(src)
    file = url_src.rsplit('/', 1)[-1]
    if file.startswith('before.'):
        before_response = requests.get(str(src))
        before_script = jsbeautifier.beautify(before_response.text)
        with open(os.path.join('web_app', 'before.js'), 'w+',
                  encoding='utf-8') as file_before:
            file_before.write(before_script)
        regex1 = re.compile('\s*(\d)\:\s\"appmain\"')

        regex2 = None
        for line in before_script.split('\n'):
            if regex1.match(line):
                idx = regex1.match(line).groups()[0]
                regex2 = re.compile('\s*%s\:\s\"(.*)\"' % idx)
            if regex2 is not None and regex2.match(line):
                test = regex2.match(line).groups()[0]
                if test != 'appmain':
                    random_id = test
                    break
        url_appmain = '/'.join(
Example #57
0
def beautifyjs(content):
    try:
        return jsbeautifier.beautify(content)
    except:
        pass
 def build_page(self, path):
     opts = jsbeautifier.default_options()
     opts.indent_size = 2
     opts.max_preserve_newlines = 30
     with open(f"{path}/dashboard-routing.module.ts", 'w') as ts_file:
         ts_file.write(jsbeautifier.beautify(self.build_ts(), opts))
Example #59
0
 def __call__(self, data, **metadata):
     opts = jsbeautifier.default_options()
     opts.indent_size = 2
     data = data.decode("utf-8", "replace")
     res = jsbeautifier.beautify(data, opts)
     return "JavaScript", base.format_text(res)
Example #60
0
	def format_selection(self, edit, opts):
		def get_line_indentation_pos(view, point):
			line_region = view.line(point)
			pos = line_region.a
			end = line_region.b
			while pos < end:
				ch = view.substr(pos)
				if ch != ' ' and ch != '\t':
					break
				pos += 1
			return pos

		def get_indentation_count(view, start):
			indent_count = 0
			i = start - 1
			while i > 0:
				ch = view.substr(i)
				scope = view.scope_name(i)
				# Skip preprocessors, strings, characaters and comments
				if 'string.quoted' in scope or 'comment' in scope or 'preprocessor' in scope:
					extent = view.extract_scope(i)
					i = extent.a - 1
					continue
				else:
					i -= 1

				if ch == '}':
					indent_count -= 1
				elif ch == '{':
					indent_count += 1
			return indent_count

		view = self.view
		regions = []
		for sel in view.sel():
			start = get_line_indentation_pos(view, min(sel.a, sel.b))
			region = sublime.Region(
				view.line(start).a,  # line start of first line
				view.line(max(sel.a, sel.b)).b)  # line end of last line
			indent_count = get_indentation_count(view, start)
			# Add braces for indentation hack
			code = '{' * indent_count
			if indent_count > 0:
				code += '\n'
			code += view.substr(region)
			# Performing astyle formatter
			formatted_code = jsbeautifier.beautify(code, opts)
			if indent_count > 0:
				for _ in range(indent_count):
					index = formatted_code.find('{') + 1
					formatted_code = formatted_code[index:]
				formatted_code = re.sub(r'[ \t]*\n([^\r\n])', r'\1', formatted_code, 1)
			else:
				# HACK: While no identation, a '{' will generate a blank line, so strip it.
				search = "\n{"
				if search not in code:
					formatted_code = formatted_code.replace(search, '{', 1)
			# Applying formatted code
			view.replace(edit, region, formatted_code)
			# Region for replaced code
			if sel.a <= sel.b:
				regions.append(sublime.Region(region.a, region.a + len(formatted_code)))
			else:
				regions.append(sublime.Region(region.a + len(formatted_code), region.a))
		view.sel().clear()
		# Add regions of formatted code
		[view.sel().add(region) for region in regions]