コード例 #1
0
ファイル: capture_metrics.py プロジェクト: anibit/sswm
def run_main_job():
    startTime = datetime.datetime.utcnow()

    print ("Anibit website timer, (c) 2014-2015 Anibit Technology");
    print ("Reading config...")
    with open("capture_metrics.config.json") as f:
        minified = minify_json.json_minify(f.read())
        config = json.loads(minified)

    #check to see if there is a valid database already
    dbConnection = setup_database(config['db_name'], config['enable_db_reset'])

    jobRunID = db_log_job_runs(dbConnection)

    #get list of URL's
    for urlJob in config['url_jobs']:
        run_job(dbConnection, config, urlJob['url'], urlJob['nickname'], jobRunID)

    endTime = datetime.datetime.utcnow()

    delta = endTime - startTime

    db_log_main_run_time(dbConnection, jobRunID, delta.total_seconds())

    print ("complete.")
コード例 #2
0
ファイル: keymaps.py プロジェクト: bizoo/KeymapTools
 def run(self):
     pluginspath = sublime.packages_path()
     allkeybindings = []
     for root, dirs, files in os.walk(pluginspath):
         for fpath in (
             os.path.join(root, f)
             for f in files
             if f.lower() in ("default (%s).sublime-keymap" % (sublime.platform(),), "default.sublime-keymap")
         ):
             with open(fpath) as f:
                 content = f.read()
             keybindings = json.loads(minify_json.json_minify(content))
             for e in keybindings:
                 # add package name
                 e["package"] = os.path.split(os.path.split(fpath)[0])[1]
                 # order context list to be able to compare it
                 if "context" in e:
                     e["context"].sort()
                 else:
                     e["context"] = None
                 # normalize command string
                 for i, command in enumerate(e["keys"]):
                     cmod, ckey = [], []
                     comp = command.split("+")
                     for c in comp:
                         if c in MODIFIERS:
                             cmod.append(c)
                         else:
                             ckey.append(c)
                         cmod.sort()
                     e["keys"][i] = "+".join(cmod + ckey)
             allkeybindings.extend(keybindings)
     sublime.set_timeout(functools.partial(self.on_done, allkeybindings), 0)
コード例 #3
0
ファイル: manifest.py プロジェクト: isabella232/bootstrap-vz
	def load_json(self, path):
		"""Loads JSON. Unused and will be removed.
		Use common.tools.load_json instead
		"""
		import json
		from minify_json import json_minify
		with open(path) as stream:
			return json.loads(json_minify(stream.read(), False))
コード例 #4
0
	def load_json(self, path):
		"""Loads JSON. Unused and will be removed.
		Use common.tools.load_json instead
		"""
		import json
		from minify_json import json_minify
		with open(path) as stream:
			return json.loads(json_minify(stream.read(), False))
コード例 #5
0
ファイル: timing_results.py プロジェクト: anibit/sswm
def run_server():
    global config
    print ("Anibit website timer results server, (c) 2014-2015 Anibit Technology");
    print ("Reading config...")
    with open("timing_results.config.json") as f:
        minified = minify_json.json_minify(f.read())
        config = json.loads(minified)
    print ("starting server...")
    run(host=config['server_host'], port = config['server_port'], debug=True)
コード例 #6
0
ファイル: __init__.py プロジェクト: maizy/PonyLib
def parse_json_with_comments(path):
    """
    Stript comment and parse json

    Based on solution from Damien Riquet:
    http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
    """
    with open(path, 'r') as file:
        content = file.read().decode('utf-8')
        content = minify_json.json_minify(content)

    return json.loads(content)
コード例 #7
0
    def check_for_conflicts(self, package):
        orig_path = os.path.join(PACKAGES_PATH, package)
        self.done = False

        for filename in os.listdir(orig_path):
            if filename.lower() == "default.sublime-keymap" or \
            filename.lower() == "default (%s).sublime-keymap" % (PLATFORM.lower()):
                path = os.path.join(orig_path, filename)

                content = open(path).read()
                try:
                    minified_content = json_minify(content)
                    if self.debug:
                        self.debug_minified[package] = minified_content
                    key_map = json.loads(minified_content)
                except:
                    if not self.prev_error:
                        self.prev_error = True
                        sublime.error_message("Could not parse a keymap file. See console for details")
                    error_path = os.path.join(os.path.basename(orig_path), filename)
                    print "FindKeyConflicts[Warning]: An error " + \
                          "occured while parsing '" + error_path + "'"
                    continue

                for entry in key_map:
                    keys = entry["keys"]
                    if "context" in entry:
                        entry["context"].sort()
                    key_array = []
                    key_string = ""
                    for key in keys:
                        key_array.append(self.order_key_string(key))

                    if self.check_ignore(key_array):
                        continue
                    key_string = ",".join(key_array)

                    if key_string in self.all_key_map:
                        tmp = self.all_key_map.get(key_string)
                        if package not in tmp["packages"]:
                            tmp["packages"].append(package)
                            tmp[package] = [entry]
                        else:
                            tmp[package].append(entry)

                        self.all_key_map[key_string] = tmp
                    else:
                        new_entry = {}
                        new_entry["packages"] = [package]
                        new_entry[package] = [entry]
                        self.all_key_map[key_string] = new_entry
        self.done = True
コード例 #8
0
def find_jshintrc(start_dir):
    """
    Locates the most relevant .jshintrc file. Of the following
    locations, the first to be found will be used:

        1. The file referenced by a jsonHintConfig value in a
           package.json file. The package.json file is found in the
           start_dir or any of its parents.
        2. A .jshintrc file in the start_dir or any of its parents.
        3. ~/.jshintrc

    start_dir is normally set to the directory of the file being
    validated.

    When start_dir is not provided (which happens with files that
    are not saved yet), the first two locations will not be
    available, so ~/.jshintrc is the only candidate that is
    considered.

    If no relevant .jshintrc is found, the return value is None.
    """

    if start_dir:
        # locate the nearest package.json
        pj = find_up_the_tree(start_dir, "package.json")
        if pj:
            # does it have a jsonHintConfig setting?
            try:
                pkg = json.loads(json_minify(open(pj, "r").read()))
                if "jsonHintConfig" in pkg:
                    # does it point to a file that exists?
                    pj_dir = os.path.dirname(pj)
                    full_path = os.path.join(pj_dir, pkg["jsonHintConfig"])
                    full_path = os.path.abspath(full_path)
                    if os.path.isfile(full_path):
                        # success
                        return full_path
            except ValueError:
                # package.json file is invalid JSON! Skip it.
                pass

        # locate the nearest .jshintrc
        jshrc = find_up_the_tree(start_dir, ".jshintrc")
        if jshrc:
            return jshrc

    # last ditch: look for .jshintrc in the user’s home directory
    home_jshrc = os.path.expanduser("~/.jshintrc")
    if os.path.isfile(home_jshrc):
        return home_jshrc

    return None
コード例 #9
0
def find_jshintrc(start_dir):
    """
    Locates the most relevant .jshintrc file. Of the following
    locations, the first to be found will be used:

        1. The file referenced by a jsonHintConfig value in a
           package.json file. The package.json file is found in the
           start_dir or any of its parents.
        2. A .jshintrc file in the start_dir or any of its parents.
        3. ~/.jshintrc

    start_dir is normally set to the directory of the file being
    validated.

    When start_dir is not provided (which happens with files that
    are not saved yet), the first two locations will not be
    available, so ~/.jshintrc is the only candidate that is
    considered.

    If no relevant .jshintrc is found, the return value is None.
    """

    if start_dir:
        # locate the nearest package.json
        pj = find_up_the_tree(start_dir, 'package.json')
        if pj:
            # does it have a jsonHintConfig setting?
            try:
                pkg = json.loads(json_minify(open(pj, 'r').read()))
                if 'jsonHintConfig' in pkg:
                    # does it point to a file that exists?
                    pj_dir = os.path.dirname(pj)
                    full_path = os.path.join(pj_dir, pkg['jsonHintConfig'])
                    full_path = os.path.abspath(full_path)
                    if os.path.isfile(full_path):
                        # success
                        return full_path
            except ValueError:
                # package.json file is invalid JSON! Skip it.
                pass

        # locate the nearest .jshintrc
        jshrc = find_up_the_tree(start_dir, '.jshintrc')
        if jshrc:
            return jshrc

    # last ditch: look for .jshintrc in the user’s home directory
    home_jshrc = os.path.expanduser('~/.jshintrc')
    if os.path.isfile(home_jshrc):
        return home_jshrc

    return None
コード例 #10
0
ファイル: __init__.py プロジェクト: EMSL-MSC/pacifica-2.0
def token_gen(pub, priv):
	if priv == None:
		priv = ''
	if len(priv) > 0:
		pub['o'] = len(priv)
	else:
		try:
			del pub['o']
		except:
			pass
	jss = minify_json.json_minify(json.dumps(pub))
	td = jss[1:-1]
	sig = sign.sign(td + priv)
	tok = "%s%s%s%s" %(len(td), td, priv, sig)
	return tok.encode('base64').replace('\n', '').replace('=', '')
コード例 #11
0
def token_gen(pub, priv):
    if priv == None:
        priv = ''
    if len(priv) > 0:
        pub['o'] = len(priv)
    else:
        try:
            del pub['o']
        except:
            pass
    jss = minify_json.json_minify(json.dumps(pub))
    td = jss[1:-1]
    sig = sign.sign(td + priv)
    tok = "%s%s%s%s" % (len(td), td, priv, sig)
    return tok.encode('base64').replace('\n', '').replace('=', '')
コード例 #12
0
ファイル: main.py プロジェクト: chuy08/renamepics
def main():

   parser = argparse.ArgumentParser(description='File Manipulation')
   parser.add_argument( '-c', '--conf'
                       ,default=__CONFFILE
                       ,help='default config file is params.conf.json'
                      )
   parser.add_argument( '-d', '--directory'
                       ,default=None
                       ,help='The Absolute path of the pictures we are dealing with'
                      )
   args=parser.parse_args()
   rootdir = args.directory
   CONFFILE = args.conf

   # Loading config file
   json_data = open( CONFFILE )
   data = json.loads( json_minify( json_data.read()))
   json_data.close()

   conf = data["config"]

   # Overriding rootdir by command line if set
   if rootdir:
      conf["rootDir"] = rootdir

   ## Logger configuration.
   try:
      logging.config.dictConfig(data["loggerConf"])
   except ValueError:
      print >> sys.stderr, "logger configuration not excepted"
      print >> sys.stderr, data["loggerConf"]
      sys.exit()

   logger = logging.getLogger(LOGNAME)
   logger.info( "Config file used: %s" % ( CONFFILE ))
   logger.info( "Rootdir used: %s" % ( conf["rootDir"] ))

   fm = fileManipulation( conf, LOGNAME )
   fm.files()
   
   logger.info( "Finished..." )
コード例 #13
0
    def manage_package(self, package):
        orig_path = os.path.join(PACKAGES_PATH, package)
        self.done = False

        for filename in os.listdir(orig_path):
            if filename.lower() == "default.sublime-keymap" or \
            filename.lower() == "default (%s).sublime-keymap" % (PLATFORM.lower()):
                path = os.path.join(orig_path, filename)

                content = open(path).read()
                try:
                    minified_content = json_minify(content)
                    if self.debug:
                        self.debug_minified[package] = minified_content
                    key_map = json.loads(minified_content)
                except:
                    if not self.prev_error:
                        self.prev_error = True
                        sublime.error_message("Could not parse a keymap file. See console for details")
                    error_path = os.path.join(os.path.basename(orig_path), filename)
                    logger.warning("FindKeyConflicts[Warning]: An error " + "occured while parsing '" + error_path + "'")
                    continue
                self.handle_key_map(package, key_map)
        self.done = True
コード例 #14
0
def getRampsFromFile(fname):
    """
    Read extra color ramps into our global RAMPS dictionary
    from specified file
    """
    from minify_json import json_minify
    # Read palette file
    palettesFobj = open(fname, "r")
    # Minify file contents
    palsMinified = json_minify(palettesFobj.read())
    # Loads palettes in a dict
    pals = json.loads(palsMinified)

    # For each palette that's been detected
    for pal in pals:
        # Check name is present and unique
        if "name" in pal:
            cur_name = pal["name"]
        else:
            # Quit - invalid colour scheme structure
            msg = 'Invalid colour ramp structure'
            raise viewererrors.ColorRampException(msg)

        if cur_name in RAMP.keys():
            # Quit - invalid colour scheme name
            msg = 'Duplicated colour ramp name'
            raise viewererrors.ColorRampException(msg)

        # Check red, green and blue fields are present
        if "description" in pal:
            # Parsing description of the colour ramp
            descriptionKeys = pal["description"].keys()
            # Is it RGB coded?
            isRgb = set(descriptionKeys).issubset(set(['red', 'green',
                                                       'blue']))
            # Is it hex coded?
            isHex = descriptionKeys == ['hex']
            # If RGB is given it has priority
            if isRgb:
                reds = pal["description"]["red"]
                greens = pal["description"]["green"]
                blues = pal["description"]["blue"]
            else:
                # Else look for HEX
                if isHex:
                    # Convert HEX to RGB
                    colrgb = numpy.array([
                        HEXtoRGB(col)
                        for col in pal["description"]["hex"].split()
                    ])
                    # Add RGB values to palette decription
                    reds = " ".join(map(str, colrgb[:, 0]))
                    greens = " ".join(map(str, colrgb[:, 1]))
                    blues = " ".join(map(str, colrgb[:, 2]))
                else:
                    # Quit - invalid colour scheme
                    msg = 'Invalid colour ramp structure'
                    raise viewererrors.ColorRampException(msg)
        else:
            # Quit - invalid colour scheme
            msg = 'Invalid colour ramp structure'
            raise viewererrors.ColorRampException(msg)

        # Other fields optional
        # not sure what we'll be doing with this yet
        if "author" in pal:
            cur_author = pal["author"]
        else:
            cur_author = ''

        if "comments" in pal:
            cur_comments = pal["comments"]
        else:
            cur_comments = ''

        if "type" in pal:
            cur_type = pal["type"]
        else:
            cur_type = ''

        # Assembling dictionary entry
        RAMP[cur_name] = {
            'author': cur_author,
            'comments': cur_comments,
            'type': cur_type,
            'description': {}
        }
        # Add decsription fields
        RAMP[cur_name]["description"]["red"] = reds
        RAMP[cur_name]["description"]["green"] = greens
        RAMP[cur_name]["description"]["blue"] = blues
コード例 #15
0
ファイル: server.py プロジェクト: hafen/tangelo
    def invoke_service(self, module, *pargs, **kwargs):
        # TODO(choudhury): This method should attempt to load the named module,
        # then invoke it with the given arguments.  However, if the named module
        # is "config" or something similar, the method should instead launch a
        # special "config" app, which lists the available app modules, along
        # with docstrings or similar.  It should also allow the user to
        # add/delete search paths for other modules.
        tangelo.content_type("text/plain")

        # Save the system path (be sure to *make a copy* using the list()
        # function) - it will be modified before invoking the service, and must
        # be restored afterwards.
        origpath = list(sys.path)

        # By default, the result should be a bare response that we will place an
        # error message in if something goes wrong; if nothing goes wrong this
        # will be replaced with some other object.
        result = tangelo.empty_response()

        # Store the modpath in the thread-local storage (tangelo.paths() makes
        # use of this per-thread data, so this is the way to get the data across
        # the "module boundary" properly).
        modpath = os.path.dirname(module)
        cherrypy.thread_data.modulepath = modpath
        cherrypy.thread_data.modulename = module

        # Extend the system path with the module's home path.
        sys.path.insert(0, modpath)

        # Import the module if not already imported previously (or if the module
        # to import, or its configuration file, has been updated since the last
        # import).
        try:
            stamp = self.modules.get(module)
            mtime = os.path.getmtime(module)

            config_file = module[:-2] + "json"
            config_mtime = None
            if os.path.exists(config_file):
                config_mtime = os.path.getmtime(config_file)

            if stamp is None or mtime > stamp["mtime"] or (config_mtime is not None and config_mtime > stamp["mtime"]):
                if stamp is None:
                    tangelo.log("loading new module: " + module)
                else:
                    tangelo.log("reloading module: " + module)

                # Load any configuration the module might carry with it.
                if config_mtime is not None:
                    try:
                        with open(config_file) as f:
                            config = json.loads(json_minify(f.read()))
                            if type(config) != dict:
                                msg = "Service module configuration file does not contain a key-value store (i.e., a JSON Object)"
                                tangelo.log(msg)
                                raise TypeError(msg)
                    except IOError:
                        tangelo.log("Could not open config file %s" % (config_file))
                        raise
                    except ValueError as e:
                        tangelo.log("Error reading config file %s: %s" % (config_file, e))
                        raise
                else:
                    config = {}

                cherrypy.config["module-config"][module] = config

                # Remove .py to get the module name
                name = module[:-3]

                # Load the module.
                service = imp.load_source(name, module)
                self.modules[module] = { "module": service,
                                         "mtime": max(mtime, config_mtime) }
            else:
                service = stamp["module"]
        except:
            bt = traceback.format_exc()

            tangelo.log("Error importing module %s" % (tangelo.request_path()), "SERVICE")
            tangelo.log(bt, "SERVICE")

            result = tangelo.HTTPStatusCode("501 Error in Python Service", "There was an error while trying to import module %s:<br><pre>%s</pre>" % (tangelo.request_path(), bt))
        else:
            # Try to run the service - either it's in a function called "run()",
            # or else it's in a REST API consisting of at least one of "get()",
            # "put()", "post()", or "delete()".
            #
            # Collect the result in a variable - depending on its type, it will be
            # transformed in some way below (by default, to JSON, but may also raise
            # a cherrypy exception, log itself in a streaming table, etc.).
            #
            try:
                if 'run' in dir(service):
                    # Call the module's run() method, passing it the positional and
                    # keyword args that came into this method.
                    result = service.run(*pargs, **kwargs)
                else:
                    # Reaching here means it's a REST API.  Check for the
                    # requested method, ensure that it was marked as being part
                    # of the API, and call it; or give a 405 error.
                    method = cherrypy.request.method
                    restfunc = service.__dict__[method.lower()]
                    if restfunc is not None and hasattr(restfunc, "restful") and restfunc.restful:
                        result = restfunc(*pargs, **kwargs)
                    else:
                        result = tangelo.HTTPStatusCode(405, "Method not allowed")
            except Exception as e:
                bt = traceback.format_exc()

                tangelo.log("Caught exception while executing service %s" % (tangelo.request_path()), "SERVICE")
                tangelo.log(bt, "SERVICE")

                result = tangelo.HTTPStatusCode("501 Error in Python Service", "There was an error executing service %s:<br><pre>%s</pre>" % (tangelo.request_path(), bt))

        # Restore the path to what it was originally.
        sys.path = origpath

        # Check the type of the result to decide what result to finally return:
        #
        # 1. If it is an HTTPStatusCode object, raise a cherrypy HTTPError
        # exception, which will cause the browser to do the right thing.
        #
        # 2. TODO: If it's a Python generator object, log it with the Tangelo
        # streaming API.
        #
        # 3. If it's a Python dictionary, convert it to JSON.
        #
        # 4. If it's a string, don't do anything to it.
        #
        # This allows the services to return a Python object if they wish, or to
        # perform custom serialization (such as for MongoDB results, etc.).
        if isinstance(result, tangelo.HTTPStatusCode):
            if result.msg:
                raise cherrypy.HTTPError(result.code, result.msg)
            else:
                raise cherrypy.HTTPError(result.code)
        elif "next" in dir(result):
            # Generate a key corresponding to this object, using 100 random
            # bytes from the system - ensure the random key is not already in
            # the table (even though it would be crazy to wind up with a
            # collision).
            #
            # TODO(choudhury): replace this with a call to generate_key().
            # Move the comment above into the generate_key() function.
            key = md5.md5(os.urandom(100)).hexdigest()
            while key in self.streams:
                key = md5.md5(os.urandom(100)).hexdigest()

            # Log the object in the streaming table.
            self.streams[key] = result

            # Create an object describing the logging of the generator object.
            result = tangelo.empty_response()
            result["stream_key"] = key

            # Serialize it to JSON.
            result = json.dumps(result)
        elif not isinstance(result, types.StringTypes):
            try:
                result = json.dumps(result)
            except TypeError as e:
                t = e.message.split("<service.")[1].split()[0]
                msg = "Service %s returned an object of type %s that could not be serialized to JSON" % (tangelo.request_path(), t)

                tangelo.log("Error: %s" % (msg), "SERVICE")

                raise cherrypy.HTTPError("501 Error in Python Service", msg)

        return result
コード例 #16
0
import argparse
import json
import minify_json
# dodaj folder krak do pythonpath
sys.path.append(os.path.abspath("krak"))
import simulator
from robot_controller import compile_robot

parser = argparse.ArgumentParser()
parser.add_argument("test_file", help="path to a file with tests specification")
parser.add_argument("robot_file", help="path to a RobotController file")
#parser.add_argument("--maps_base_path", help="path to a RobotController file") TODO
args = parser.parse_args()

tests_file = open(args.test_file)
minified = minify_json.json_minify(tests_file.read())

robot_class = compile_robot(args.robot_file)[0]

tests = json.loads(minified);
# todo: run tests in parallel - or not - it's bad for debugging
test_id = 0
num_tests = len(tests)
failed_tests = []
results = []
for test in tests:
    test_id += 1
    print "--+Running test ({},{}) of {}:".format(test_id, test["map"], num_tests)
    test["map"] = str(test["map"]) # because of stupid check in simulator
    sim = simulator.KrakrobotSimulator(robot_controller_class=robot_class, simulation_dt=0.0, visualisation=False, iteration_write_frequency=1000000, **test)
    result = sim.run();
コード例 #17
0
    # z position comes from p1 and assumes face of PMT level with top of UI - may need adjustment in future !!!! FIXME !!!!
    # Check units - have increase by order of mag as think in cm not mm (need to be given in mm in detector units)
    if (num_ == 0):
        pos_dir_ = numpy.array([131., 524., 14340.1, 0, 0, 1])
    elif (num_ == 1):
        pos_dir_ = numpy.array([-364., 32., 14340.1, 0, 0, 1])
    elif (num_ == 2):
        pos_dir_ = numpy.array([356., 0., 14340.1, 0, 0, 1])
    elif (num_ == 3):
        pos_dir_ = numpy.array([298., -396., 14340.1, 0, 0, 1])
    # Only expect 4 neck tubes
    return pos_dir_


with open(args[1], "r") as panel_info_file:
    panel_data = yaml.load(json_minify(panel_info_file.read(), False))

f = open(args[0], "rU")
noel_file = csv.reader(f)
# Add dictionary that will map PMT location to a list of PMTs at that location
mapping = {}
# Make the map first
noel_file.next()  # Ignore the first line
for pmt in noel_file:
    # location is pmt[13]
    if pmt[13] not in mapping:
        mapping[pmt[13]] = []
    # Add whatever descriptor here that tells you e.g. LCN and pmt type
    # Here I've added a tuple with ccc and pmt type
    mapping[pmt[13]].append(
        (pmt[0], pmt[1], pmt[2], noel_type_to_rat_type(pmt[9])))
コード例 #18
0
def validate(quiet=False):
    # locate the .jshintrc to use
    jshintrc = find_jshintrc(os.environ.get('TM_DIRECTORY', None))
    jshintrc_valid = False
    if jshintrc:
        try:
            json.loads(json_minify(open(jshintrc, 'r').read()))
            jshintrc_valid = True
        except ValueError:
            jshintrc_valid = False

    # path to our custom jshint reporter
    reporter_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                 'reporter.js')

    # build jshint args
    args = [
        os.environ.get('TM_JSHINT_EXTERNAL_JSHINT', 'jshint'),
        '--reporter="' + reporter_path + '"'
    ]

    if jshintrc and jshintrc_valid:
        args.append('--config="%s"' % jshintrc)
    args.append('-')

    # Build env for our command: JSHint (and Node) are often
    # installed to /usr/local/bin, which may not be on the
    # bundle’s PATH in a default install of TextMate.
    env = os.environ.copy()
    path_parts = env['PATH'].split(':')
    if '/bin' not in path_parts:
        path_parts.append('/bin')
    if '/usr/bin' not in path_parts:
        path_parts.append('/usr/bin')
    if '/usr/local/bin' not in path_parts:
        path_parts.append('/usr/local/bin')
    env['PATH'] = ':'.join(path_parts)

    try:
        jshint = subprocess.Popen(args,
                                  stdin=subprocess.PIPE,
                                  stdout=subprocess.PIPE,
                                  env=env)
    except OSError as e:
        msg = [
            'Hi there. This is the “JavaScript JSHint (External)” bundle for '
            + 'TextMate. I validate your code using JSHint.', '',
            'I had the following problem running <code>jshint</code>:', '',
            '<code>%s</code>' % e, '', '<h4>How to fix it</h4>',
            'Make sure the <code>jshint</code> and <code>node</code> ' +
            'commands are on the <code>PATH</code>.', '',
            '<ol>' + '<li>Go to <i>TextMate</i> > <i>Preferences…</i> > ' +
            '<i>Variables</i></li>' +
            '<li>Ensure the <code>PATH</code> is enabled there and that ' +
            'it includes the location of your <code>jshint</code> ' +
            'and <code>node</code> commands.</li>'
            '</ol>', 'The path currently used by TextMate bundles is:', '',
            '<div style="overflow:auto"><code>%s</code></div>' % env['PATH'],
            '<h4>How to disable validation</h4>',
            'If you mistakenly installed this validation tool and want to ' +
            'disable it, you can do so in TextMate:', '',
            '<ol>' + '<li>On the TextMate menu, choose ' +
            '<i>Bundles</i> > <i>Edit Bundles…</i></li>' +
            '<li>Locate “JavaScript JSHint (External)”</li>' +
            '<li>Uncheck “Enable this item”</li>' +
            '<li>Close the Bundle Editor and choose “Save”</li>' + '</ol>'
        ]
        show_error_message('<br>'.join(msg))
        sys.exit()

    # Pipe stdin to the subprocess; if we are validating an HTML
    # file with embedded JavaScript, only pipe content within the
    # <script>…</script> tags to the subprocess.
    if os.environ['TM_SCOPE'].startswith('source.js'):
        for line in sys.stdin:
            jshint.stdin.write(line)
    else:
        start_tag = re.compile('(\<\s*script)[\s\>]', re.IGNORECASE)
        end_tag = re.compile('\<\/\s*script[\s\>]', re.IGNORECASE)
        state = 'IGNORE'
        for line in sys.stdin:
            while line:
                if state == 'IGNORE':
                    match = start_tag.search(line)
                    if match:
                        # found a script tag
                        line = ' ' * match.end(1) + line[match.end(1):]
                        state = 'LOOK_FOR_END_OF_OPENING_TAG'
                    else:
                        jshint.stdin.write('\n')
                        line = None

                elif state == 'LOOK_FOR_END_OF_OPENING_TAG':
                    gt_pos = line.find('>')
                    if gt_pos != -1:
                        line = ' ' * (gt_pos + 1) + line[gt_pos + 1:]
                        state = 'PIPE_TO_OUTPUT'
                    else:
                        jshint.stdin.write('\n')
                        line = None

                elif state == 'PIPE_TO_OUTPUT':
                    match = end_tag.search(line)
                    if match:
                        # found closing </script> tag
                        jshint.stdin.write(line[:match.start()])
                        line = line[match.end():]
                        state = 'IGNORE'
                    else:
                        jshint.stdin.write(line)
                        line = None

    jshint.stdin.close()

    # parse the results
    try:
        issues = json.loads(json_minify(jshint.stdout.read()))
    except ValueError:
        print('could not parse data returned from jshint')
        sys.exit(1)

    # normalize line numbers
    input_start_line = int(os.environ['TM_INPUT_START_LINE']) - 1
    for issue in issues:
        issue['line'] += input_start_line

    # add URLs to the issues
    if 'TM_FILEPATH' in os.environ:
        url_maker = lambda x: \
            'txmt://open?url=file://%s&amp;line=%d&amp;column=%d' % \
            (os.environ['TM_FILEPATH'], x['line'], x['character'])
    else:
        url_maker = lambda x: \
            'txmt://open?line=%d&amp;column=%d' % (x['line'], x['character'])

    for issue in issues:
        issue['url'] = url_maker(issue)

    # context data we will send to JavaScript
    context = {
        'jshintrc': jshintrc,
        'jshintrcValid': jshintrc_valid,
        'issues': issues,
        'timestamp': time.strftime('%c')
    }

    if 'TM_FILEPATH' in os.environ:
        context['fileUrl'] = \
            'txmt://open?url=file://%s' % os.environ['TM_FILEPATH']
        context['targetFilename'] = os.path.basename(os.environ['TM_FILEPATH'])
    else:
        context['fileUrl'] = 'txmt://open?line=1&amp;column=0'
        context['targetFilename'] = '(current unsaved file)'

    # Identify the marker file that we will use to indicate the
    # TM_FILEPATH of the file currently shown in the validation
    # window.
    markerDir = get_marker_directory()
    hash = hashlib.sha224(context['fileUrl']).hexdigest()
    context['markerFile'] = os.path.join(markerDir, hash + '.marker')

    context['errorCount'] = \
        len([_ for _ in context['issues'] if _['code'][0] == 'E'])
    context['warningCount'] = \
        len([_ for _ in context['issues'] if _['code'][0] == 'W'])

    if context['errorCount'] == 0 and context['warningCount'] == 0:
        # There are no errors or warnings. We can bail out if all of
        # the following are True:
        #
        #     * There is no validation window currently open for
        #       this document.
        #     * quiet is True.
        if not os.path.exists(context['markerFile']):
            if quiet:
                return

    # create the marker file
    markerFile = open(context['markerFile'], 'w+')
    markerFile.close()

    # read and prepare the template
    my_dir = os.path.abspath(os.path.dirname(__file__))

    content_ejs_path = os.path.join(my_dir, 'content.ejs')
    content_ejs = open(content_ejs_path, 'r').read()

    template_path = os.path.join(my_dir, 'template.html')
    template = open(template_path, 'r').read()
    template = template.replace('{{ TM_BUNDLE_SUPPORT }}',
                                os.environ['TM_BUNDLE_SUPPORT'])
    template = template.replace('{{ EJS_TEMPLATE }}', json.dumps(content_ejs))
    template = template.replace('{{ CONTEXT }}', json.dumps(context))

    print(template)
コード例 #19
0
def validate(quiet=False):
    # locate the .jshintrc to use
    jshintrc = find_jshintrc(os.environ.get("TM_DIRECTORY", None))
    jshintrc_valid = False
    if jshintrc:
        try:
            json.loads(json_minify(open(jshintrc, "r").read()))
            jshintrc_valid = True
        except ValueError:
            jshintrc_valid = False

    # path to our custom jshint reporter
    reporter_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "reporter.js")

    # build jshint args
    args = [os.environ.get("TM_JSHINT_EXTERNAL_JSHINT", "jshint"), '--reporter="' + reporter_path + '"']

    if jshintrc and jshintrc_valid:
        args.append('--config="%s"' % jshintrc)
    args.append("-")

    # Build env for our command: JSHint (and Node) are often
    # installed to /usr/local/bin, which may not be on the
    # bundle’s PATH in a default install of TextMate.
    env = os.environ.copy()
    path_parts = env["PATH"].split(":")
    if "/bin" not in path_parts:
        path_parts.append("/bin")
    if "/usr/bin" not in path_parts:
        path_parts.append("/usr/bin")
    if "/usr/local/bin" not in path_parts:
        path_parts.append("/usr/local/bin")
    env["PATH"] = ":".join(path_parts)

    try:
        jshint = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
    except OSError as e:
        msg = [
            "Hi there. This is the “JavaScript JSHint (External)” bundle for "
            + "TextMate. I validate your code using JSHint.",
            "",
            "I had the following problem running <code>jshint</code>:",
            "",
            "<code>%s</code>" % e,
            "",
            "<h4>How to fix it</h4>",
            "Make sure the <code>jshint</code> and <code>node</code> " + "commands are on the <code>PATH</code>.",
            "",
            "<ol>"
            + "<li>Go to <i>TextMate</i> > <i>Preferences…</i> > "
            + "<i>Variables</i></li>"
            + "<li>Ensure the <code>PATH</code> is enabled there and that "
            + "it includes the location of your <code>jshint</code> "
            + "and <code>node</code> commands.</li>"
            "</ol>",
            "The path currently used by TextMate bundles is:",
            "",
            '<div style="overflow:auto"><code>%s</code></div>' % env["PATH"],
            "<h4>How to disable validation</h4>",
            "If you mistakenly installed this validation tool and want to " + "disable it, you can do so in TextMate:",
            "",
            "<ol>"
            + "<li>On the TextMate menu, choose "
            + "<i>Bundles</i> > <i>Edit Bundles…</i></li>"
            + "<li>Locate “JavaScript JSHint (External)”</li>"
            + "<li>Uncheck “Enable this item”</li>"
            + "<li>Close the Bundle Editor and choose “Save”</li>"
            + "</ol>",
        ]
        show_error_message("<br>".join(msg))
        sys.exit()

    # Pipe stdin to the subprocess; if we are validating an HTML
    # file with embedded JavaScript, only pipe content within the
    # <script>…</script> tags to the subprocess.
    if os.environ["TM_SCOPE"].startswith("source.js"):
        for line in sys.stdin:
            jshint.stdin.write(line)
    else:
        start_tag = re.compile("(\<\s*script)[\s\>]", re.IGNORECASE)
        end_tag = re.compile("\<\/\s*script[\s\>]", re.IGNORECASE)
        state = "IGNORE"
        for line in sys.stdin:
            while line:
                if state == "IGNORE":
                    match = start_tag.search(line)
                    if match:
                        # found a script tag
                        line = " " * match.end(1) + line[match.end(1) :]
                        state = "LOOK_FOR_END_OF_OPENING_TAG"
                    else:
                        jshint.stdin.write("\n")
                        line = None

                elif state == "LOOK_FOR_END_OF_OPENING_TAG":
                    gt_pos = line.find(">")
                    if gt_pos != -1:
                        line = " " * (gt_pos + 1) + line[gt_pos + 1 :]
                        state = "PIPE_TO_OUTPUT"
                    else:
                        jshint.stdin.write("\n")
                        line = None

                elif state == "PIPE_TO_OUTPUT":
                    match = end_tag.search(line)
                    if match:
                        # found closing </script> tag
                        jshint.stdin.write(line[: match.start()])
                        line = line[match.end() :]
                        state = "IGNORE"
                    else:
                        jshint.stdin.write(line)
                        line = None

    jshint.stdin.close()

    # parse the results
    try:
        issues = json.loads(json_minify(jshint.stdout.read()))
    except ValueError:
        print("could not parse data returned from jshint")
        sys.exit(1)

    # normalize line numbers
    input_start_line = int(os.environ["TM_INPUT_START_LINE"]) - 1
    for issue in issues:
        issue["line"] += input_start_line

    # add URLs to the issues
    if "TM_FILEPATH" in os.environ:
        url_maker = lambda x: "txmt://open?url=file://%s&amp;line=%d&amp;column=%d" % (
            os.environ["TM_FILEPATH"],
            x["line"],
            x["character"],
        )
    else:
        url_maker = lambda x: "txmt://open?line=%d&amp;column=%d" % (x["line"], x["character"])

    for issue in issues:
        issue["url"] = url_maker(issue)

    # context data we will send to JavaScript
    context = {
        "jshintrc": jshintrc,
        "jshintrcValid": jshintrc_valid,
        "issues": issues,
        "timestamp": time.strftime("%c"),
    }

    if "TM_FILEPATH" in os.environ:
        context["fileUrl"] = "txmt://open?url=file://%s" % os.environ["TM_FILEPATH"]
        context["targetFilename"] = os.path.basename(os.environ["TM_FILEPATH"])
    else:
        context["fileUrl"] = "txmt://open?line=1&amp;column=0"
        context["targetFilename"] = "(current unsaved file)"

    # Identify the marker file that we will use to indicate the
    # TM_FILEPATH of the file currently shown in the validation
    # window.
    markerDir = get_marker_directory()
    hash = hashlib.sha224(context["fileUrl"]).hexdigest()
    context["markerFile"] = os.path.join(markerDir, hash + ".marker")

    context["errorCount"] = len([_ for _ in context["issues"] if _["code"][0] == "E"])
    context["warningCount"] = len([_ for _ in context["issues"] if _["code"][0] == "W"])

    if context["errorCount"] == 0 and context["warningCount"] == 0:
        # There are no errors or warnings. We can bail out if all of
        # the following are True:
        #
        #     * There is no validation window currently open for
        #       this document.
        #     * quiet is True.
        if not os.path.exists(context["markerFile"]):
            if quiet:
                return

    # create the marker file
    markerFile = open(context["markerFile"], "w+")
    markerFile.close()

    # read and prepare the template
    my_dir = os.path.abspath(os.path.dirname(__file__))

    content_ejs_path = os.path.join(my_dir, "content.ejs")
    content_ejs = open(content_ejs_path, "r").read()

    template_path = os.path.join(my_dir, "template.html")
    template = open(template_path, "r").read()
    template = template.replace("{{ TM_BUNDLE_SUPPORT }}", os.environ["TM_BUNDLE_SUPPORT"])
    template = template.replace("{{ EJS_TEMPLATE }}", json.dumps(content_ejs))
    template = template.replace("{{ CONTEXT }}", json.dumps(context))

    print(template)
コード例 #20
0
def load_json(path):
    import json
    from minify_json import json_minify
    with open(path) as stream:
        return json.loads(json_minify(stream.read(), False))
コード例 #21
0
    """ Calculate the T10, T14 axis, choose the furthest PMT from the origin."""
    dists = sorted( [ [numpy.linalg.norm( pmt - origin ), index] for index, pmt in enumerate( pmtPos ) ], key=lambda tup: tup[0] )
    xAxis = pmtPos[dists[-1][1]] - origin
    xAxis = xAxis / numpy.linalg.norm( xAxis )
    return xAxis

def CalculateT21Axis( pmtPos, origin ):
    """ Calculate the T21 axis, use furthest pmts."""
    # Now choose the 2 pmtPos furthest from the origin
    dists = sorted( [ [numpy.linalg.norm( pmt - origin ), index] for index, pmt in enumerate( pmtPos ) ], key=lambda tup: tup[0] )
    xAxis = ( pmtPos[dists[-1][1]] + pmtPos[dists[-2][1]] ) / 2.0 - origin
    return xAxis / numpy.linalg.norm( xAxis )

# Now start parsing the file
pmtInfoFile = open( "data/PMTINFO_rat3.ratdb", "r" )
data = yaml.load( json_minify( pmtInfoFile.read(), False ) )
pmtInfoFile.close()
# Loop over the pmts, and condense data to once per panel
newData = {}
newData["panel_number"] = []
newData["panel_type"] = []
newData["r"] = []
newData["s"] = []
newData["t"] = []
newData["u"] = []
newData["v"] = []
newData["w"] = []
newData["x"] = []
newData["y"] = []
newData["z"] = []
コード例 #22
0
ファイル: manifest.py プロジェクト: boltronics/bootstrap-vz
	def load_json(self, path):
		import json
		from minify_json import json_minify
		with open(path) as stream:
			return json.loads(json_minify(stream.read(), False))
コード例 #23
0
#!/usr/bin/env python
import yaml  # Needs installing
import sys
import numpy
from minify_json import json_minify  # In this directory

with open("data/PMTINFO_rat3.ratdb", "r") as original_file:
    rat_original = yaml.load(json_minify(original_file.read(), False))

with open("data/snoman_phil.ratdb", "r") as snoman_file:
    snoman = yaml.load(json_minify(snoman_file.read(), False))

with open("data/snoman.ratdb", "r") as snoman_file:
    rat_snoman = yaml.load(json_minify(snoman_file.read(), False))

with open("data/airfill2.ratdb", "r") as airfill2_file:
    airfill2 = yaml.load(json_minify(airfill2_file.read(), False))

with open("data/airfill3.ratdb", "r") as airfill3_file:
    airfill3 = yaml.load(json_minify(airfill3_file.read(), False))

# Correct snoman cm to mm
for lcn in range(0, len(snoman["pmt_type"])):
    snoman["x"][lcn] = snoman["x"][lcn] * 10.0
    snoman["y"][lcn] = snoman["y"][lcn] * 10.0
    snoman["z"][lcn] = snoman["z"][lcn] * 10.0


def compare(info1, info2):

    for lcn in range(0, len(info1["pmt_type"])):