コード例 #1
0
    def configure(self):
        if os.path.exists(
            os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
            utils.output("vtktud build already configured.")
            return
        
        if not os.path.exists(self.build_dir):
            os.mkdir(self.build_dir)

        cmake_params = "-DBUILD_SHARED_LIBS=ON " \
                       "-DBUILD_CONTRIB=OFF " \
                       "-DBUILD_TESTING=OFF " \
                       "-DCMAKE_BACKWARDS_COMPATIBILITY=2.6 " \
                       "-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
                       "-DCMAKE_INSTALL_PREFIX=%s " \
                       "-DVTK_DIR=%s" % (self.inst_dir, config.VTK_DIR)

        # we only add this under posix as a work-around to compile the
        # STLib code under g++
        if os.name == 'posix':
            cmake_params = cmake_params + " -DCMAKE_CXX_FLAGS=-fpermissive "

        ret = utils.cmake_command(self.build_dir, self.source_dir,
                cmake_params)

        if ret != 0:
            utils.error("Could not configure vtktud.  Fix and try again.")
コード例 #2
0
def fires(jira, args):
    m = re.match(r'(\w+)?', args)

    if not m:
        return utils.not_valid_args(args)

    project_key = m.group(1) or config.get('jira_default_project')

    if not project_key:
        return utils.error('Project name is required')

    if not utils.check_project(jira, project_key):
        return utils.error('Project {} does not exist'.format(project_key))

    try:
        query = 'project={0} and labels in (fire)'.format(project_key)
        issues = jira.search_issues(query)

        if not issues:
            return 'No issues found'

        return '\n'.join([utils.issue_info(issue) for issue in issues])
    except JIRAError as e:
        response = utils.error('{} {}'.format(str(e.status_code), str(e.text)))
        return response
コード例 #3
0
def close(jira, args):
    m = re.match(r'(\w+-\d+) ?(.*)', args)

    if not m:
        return utils.not_valid_args(args)

    issue_key = m.group(1)
    issue_status = 'Closed'
    comment = m.group(2)

    try:
        issue = jira.issue(issue_key)

        transitions = jira.transitions(issue)
        transition_id = utils.get_transition(transitions, issue_status)

        if issue_status == issue.fields.status.name:
            return utils.error('Issue already closed')

        if not transition_id:
            return utils.error('Operation not permitted')

        jira.transition_issue(issue, transition_id, comment=comment)
        issue = jira.issue(issue_key)

        return utils.issue_info(issue)
    except JIRAError as e:
        if e.status_code == 500:
            return utils.error('operation not permitted')

        response = utils.error('{} {}'.format(str(e.status_code), str(e.text)))
        return response
コード例 #4
0
def checkGbcFilesEq(name, genfile, reffile):
    """check if non-zero bytes in binary files are equal"""
    with open(genfile, "rb") as fg, open(reffile, "rb") as fr:
        genimage = fg.read()
        refimage = fr.read()[6:]
    if genimage != refimage and genimage != refimage[:-1]:
        error("GPL image", "Image mismatch: " + name)
コード例 #5
0
def status(jira, args):
    m = re.match(r'(\w+-\d+) (.*)', args)

    if not m:
        return utils.not_valid_args(args)

    issue_key = m.group(1)
    issue_status = m.group(2)

    try:
        issue = jira.issue(issue_key)
        statuses = jira.statuses()

        if issue_status not in [s.name for s in statuses]:
            return utils.error('Status {} does not exist'.format(issue_status))

        if issue_status == issue.fields.status.name:
            return utils.error('Status {} already set'.format(issue_status))

        transitions = jira.transitions(issue)
        transition_id = utils.get_transition(transitions, issue_status)

        if not transition_id:
            return utils.error('Operation not permitted')

        jira.transition_issue(issue, transition_id)
        issue = jira.issue(issue_key)

        return utils.issue_info(issue)
    except JIRAError as e:
        response = utils.error('{} {}'.format(str(e.status_code), str(e.text)))
        return response
コード例 #6
0
def check_build_sanity():
    if len(e('${BUILD_ROOT}')) > 38:
        error("Current path too long ({0} characters) for nullfs mounts during build",
              len(os.getcwd()))

    if e('${BE_ROOT}') in sh_str('mount'):
        error("You have dangling mounts inside {0}, did last build crash?", e('${BE_ROOT}'))
コード例 #7
0
def check_port(name, port):
    debug('Checking for "{0}" command', name)
    for i in e('${PATH}').split(':'):
        if os.path.exists(e('${i}/${name}')):
            return

    error('Command {0} not found. Please run "pkg install {1}" or install from ports', name, port)
コード例 #8
0
ファイル: utils_math.py プロジェクト: maa8g09/phd_code
def add(u, v):
    a = []
    if abs(len(u) - len(v)) < 1e-12:
        a = [sum(x) for x in zip(u, v)] 
    else:
        utils.error('Vectors are of different length (utils_math: add)')
    return a
コード例 #9
0
ファイル: conf.py プロジェクト: apanda/splunk-sdk-python
    def create(self, opts):
        """Create a conf stanza."""

        argv = opts.args
        count = len(argv)

        # unflagged arguments are conf, stanza, key. In this order
        # however, we must have a conf and stanza.
        cpres = True if count > 0 else False
        spres = True if count > 1 else False
        kpres = True if count > 2 else False 

        if kpres:
            kvpair = argv[2].split("=")
            if len(kvpair) != 2:
                error("Creating a k/v pair requires key and value", 2)

        if not cpres and not spres:
            error("Conf name and stanza name is required for create", 2)

        name = argv[0]
        stan = argv[1]
        conf = self.service.confs[name]

        if not kpres:
            # create stanza
            conf.create(stan)
            return 

        # create key/value pair under existing stanza
        stanza = conf[stan]
        stanza.submit(argv[2])
コード例 #10
0
ファイル: ip_dcmtk.py プロジェクト: codester2/devide.johannes
    def configure_posix(self):
        os.chdir(self.build_dir)

        if os.path.exists("dcmdata/config.log"):
            utils.output("DCMTK already configured.  Not redoing.")
        else:
            # we need to configure this without zlib, otherwise dcmtk
            # complains (at least on this system) about the symbol
            # inflateEnd not being available.
            ret = os.system("./configure --without-zlib " "--prefix=%s" % (self.inst_dir,))
            if ret != 0:
                utils.error("Could not configure dcmtk.  Fix and try again.")

            # now modify the generated config/Makefile.def to enable
            # building shared libraries as per
            # http://forum.dcmtk.org/viewtopic.php?t=19
            repls = [
                ("(^CFLAGS\s*=\s*)-O", "\\1-fPIC -O2"),
                ("(^CXXFLAGS\s*=\s*)-O", "\\1-fPIC -O2"),
                ("(^AR\s*=\s*)ar", "\\1gcc"),
                ("(^ARFLAGS\s*=\s*)cruv", "\\1-shared -o"),
                ("(^LIBEXT\s*=\s*)a", "\\1so"),
                ("(^RANLIB\s*=\s*)ranlib", "\\1:"),
            ]

            utils.re_sub_filter_file(repls, "config/Makefile.def")
コード例 #11
0
ファイル: utils_math.py プロジェクト: maa8g09/phd_code
def sub(u, v):
    a = []
    if abs(len(u) - len(v)) < 1e-12:
        a = [ u[i]-v[i] for i in range(len(u)) ]
    else:
        utils.error('Vectors are of different length (utils_math: sub)')
    return a
コード例 #12
0
ファイル: stail.py プロジェクト: apanda/splunk-sdk-python
def main():
    usage = "usage: %prog <search>"
    opts = utils.parse(sys.argv[1:], {}, ".splunkrc", usage=usage)

    if len(opts.args) != 1:
        utils.error("Search expression required", 2)
    search = opts.args[0]

    service = connect(**opts.kwargs)

    try:
        result = service.get(
            "search/jobs/export",
            search=search,
            earliest_time="rt", 
            latest_time="rt", 
            search_mode="realtime")

        reader = results.ResultsReader(result.body)
        while True:
            kind = reader.read()
            if kind == None: break
            if kind == results.RESULT:
                event = reader.value
                pprint(event)

    except KeyboardInterrupt:
        print "\nInterrupted."
コード例 #13
0
ファイル: ip_dcmtk.py プロジェクト: codester2/devide.johannes
    def build_nt(self):
        os.chdir(self.build_dir)
        # do check for some file

        if os.path.exists(os.path.join("dcmdata/libsrc", BUILD_TARGET, "dcmdata.lib")):
            utils.output("dcmtk::dcmdata already built.  Skipping.")

        else:
            # Release buildtype (vs RelWithDebInfo) so we build with
            # /MD and not /MDd
            ret = utils.make_command("dcmtk.sln", install=False, project="dcmdata", win_buildtype=BUILD_TARGET)

            if ret != 0:
                utils.error("Could not build dcmtk::dcmdata.")

        if os.path.exists(os.path.join("ofstd/libsrc", BUILD_TARGET, "ofstd.lib")):
            utils.output("dcmtk::ofstd already built.  Skipping.")

        else:
            # Release buildtype (vs RelWithDebInfo) so we build with
            # /MD and not /MDd
            ret = utils.make_command("dcmtk.sln", install=False, project="ofstd", win_buildtype=BUILD_TARGET)

            if ret != 0:
                utils.error("Could not build dcmtk::ofstd.")
コード例 #14
0
def checkRecordsByLen(infile, fixed=None):
    """check records by encoded length"""
    refline = "*".join(["".join([chr(c) for c in xrange(64, 127)])
                        for _ in xrange(4)])
    if fixed is None:
        with open(infile, "r") as f:
            records = f.readlines()
    else:
        with open(infile, "rb") as f:
            data = f.read()
            records = [data[i:i + fixed]
                       for i in xrange(0, len(data), fixed)]
    for i, line in enumerate(records):
        if fixed:
            l = line
            if len(l) != fixed:
                error("VAR Records",
                      "%s: Record %d length mismatch: %d != %d" % (
                          infile, i, len(l), fixed))
        else:
            l = line[:-1] if line[-1] == "\n" else line
        l = l.rstrip()
        s = "!" + refline[:len(l) - 2] + chr(i + 49) if len(l) > 1 else ""
        if l != s:
            error("VAR Records",
                  "%s: Record %i content mismatch" % (infile, i))
コード例 #15
0
ファイル: convert.py プロジェクト: mkouhei/hatena2rest
def parse_blog_parts(string):
    """Parse and convert blog parts.

    Argument:

        string: blog entry body string.

    """

    ex_ref_char = re.compile('\&(?!amp;)')
    string = ex_ref_char.sub('&amp;', string)

    string = string.replace('alt="no image"', '')

    try:
        xmltree = xml.etree.ElementTree.fromstring(string)
    except:
        utils.error(string)

    if xmltree.get('class') == 'amazlet-box':
        repl_amazon = parse_amazlet(xmltree)
        return repl_amazon
    if xmltree.get('class'):
        if xmltree.get('class').find('bbpBox') == 0:
            repl_twitter = parse_twitter(xmltree)
            return repl_twitter
    if str(xmltree.get('id')).find('__ss_') == 0:
        repl_slideshare = parse_slideshare(xmltree)
        return repl_slideshare
    if str(xmltree.get('href')).find('heyquiz.com') >= 0:
        repl_heyquiz = parse_heyquiz(xmltree)
        return repl_heyquiz
コード例 #16
0
ファイル: tests.py プロジェクト: maa8g09/phd_code
def SVDNorm(U, S, V, A):
    if np.linalg.norm(np.dot( np.dot(U, np.diag(S)), V) - A) >= 1e-10:
        nrm = str(np.linalg.norm(np.dot( np.dot(U, np.diag(S)), V) - A))
        err = 'Something went wrong with the SVD, norm is ' + str(nrm)
        ut.error(err)
        
    return 0
コード例 #17
0
ファイル: coq.py プロジェクト: QuanticPotato/vcoq
	def sendXML(self, xml):
		""" First, check wether the coq process is still running.
		Then it send the XML command, and finally it waits for the response """
		if self.coqtop == None:
			utils.error('ERROR: The coqtop process is not running or died !')
			print('Trying to relaunch it ...')
			self.launchCoqtopProcess()
		try:
			self.coqtop.stdin.write(XMLFactory.tostring(xml, 'utf-8'))
		except IOError as e:
			utils.error('Cannot communicate with the coq process : ' + str(e))
			self.coqtop = None
			return None
		response = ''
		file = self.coqtop.stdout.fileno()
		while True:
			try:
				response += os.read(file, 0x4000)
				try:
					t = XMLFactory.fromstring(response)
					return t
				except XMLFactory.ParseError:
					continue
			except OSError:
				return None
コード例 #18
0
ファイル: omg_enum.py プロジェクト: Julow/oh-my-generator
def enum_def(code):
	if len(code) == 0 or len(code[0]) <= 1:
		utils.error("?enum-def need an argument (enum name)")
	name = code[0][:-1]
	if not name in enums:
		utils.error("?enum-def unknown enum: %s" % name)
	_print_def_enum(name, enums[name])
コード例 #19
0
ファイル: ip_pip.py プロジェクト: codester2/devide.johannes
    def install(self):
        # first ez_install ############################################
        os.chdir(config.working_dir) # we need to be elsewhere!
        ret = os.system('%s -c "import setuptools"' % (sys.executable,))
        if ret == 0:
            utils.output('setuptools already installed.  Skipping step.')

        else:
            utils.output('ImportError test shows that setuptools is not '
                         'installed.  Installing...')

            os.chdir(config.archive_dir)
            ret = os.system('%s %s' %
                    (config.PYTHON_EXECUTABLE, EZ_BASENAME))
                    
            if ret != 0:
                utils.error('Error during setuptools install.')

        # then pip # ##############################################

        os.chdir(config.working_dir) # we need to be elsewhere!
        ret = os.system('%s -c "import pip"' % (sys.executable,))
        if ret == 0:
            utils.output('pip already installed.  Skipping step.')

        else:
            utils.output('ImportError test shows that pip is not '
                         'installed.  Installing...')

            os.chdir(config.archive_dir)
            ret = os.system('%s %s' %
                    (config.PYTHON_EXECUTABLE, PIP_BASENAME))
            if ret != 0:
                utils.error('Error during pip install.')
コード例 #20
0
ファイル: third_party_utils.py プロジェクト: rchyena/chapel
def pkgconfig_get_link_args(pkg, ucp='', system=True, static=True):
  havePcFile = pkg.endswith('.pc')
  pcArg = pkg
  if not havePcFile:
    if system:
      # check that pkg-config knows about the package in question
      run_command(['pkg-config', '--exists', pkg])
    else:
      # look for a .pc file
      if ucp == '':
        ucp = default_uniq_cfg_path()
      pcfile = pkg + '.pc' # maybe needs to be an argument later?

      pcArg = os.path.join(get_cfg_install_path(pkg, ucp), 'lib',
                           'pkgconfig', pcfile)

      if not os.access(pcArg, os.R_OK):
        error("Could not find '{0}'".format(pcArg), ValueError)

  static_arg = [ ]
  if static:
    static_arg = ['--static']

  libs_line = run_command(['pkg-config', '--libs'] + static_arg + [pcArg]);
  libs = libs_line.split()
  return libs
コード例 #21
0
ファイル: jwlib.py プロジェクト: raphaelamorim/infer
    def run(self):
        if self.args.version:
            return subprocess.call(["javac"] + self.original_arguments)
        else:
            javac_cmd = ["javac", "-verbose", "-g"]

            if self.args.bootclasspath is not None:
                javac_cmd += ["-bootclasspath", self.args.bootclasspath]

            if self.args.classpath is None:
                classpath = utils.ANNOT_PROCESSOR_JAR
            else:
                classpath = os.pathsep.join([utils.ANNOT_PROCESSOR_JAR, self.args.classpath])
            javac_cmd += ["-cp", classpath]

            if self.args.classes_out is not None:
                javac_cmd += ["-d", self.args.classes_out]
            javac_cmd += self.remaining_args
            javac_cmd.append("-J-Duser.language=en")

            with tempfile.NamedTemporaryFile(mode="w", suffix=".out", prefix="annotations_", delete=False) as annot_out:
                self.annotations_out = annot_out.name

            with tempfile.NamedTemporaryFile(mode="w", suffix=".out", prefix="javac_", delete=False) as file_out:
                self.verbose_out = file_out.name
                os.environ["INFER_ANNOTATIONS_OUT"] = self.annotations_out
                try:
                    subprocess.check_call(javac_cmd, stderr=file_out)
                except subprocess.CalledProcessError:
                    error_msg = "Javac compilation error with: \n\n{}\n"
                    failing_cmd = [arg for arg in javac_cmd if arg != "-verbose"]
                    utils.error(error_msg.format(failing_cmd))
                    subprocess.check_call(failing_cmd)

        return os.EX_OK
コード例 #22
0
ファイル: user.py プロジェクト: chmielu/tpm
	def post(self, username):
		username = urllib.unquote(username)
		if username != str(self.user) and not self.is_admin:
			error(self, 403); return

		if not self.is_admin and (self.request.get("is_member") or self.request.get("is_admin")):
			error(self, 403); return

		profile = db.Query(models.Profile).filter("user ="******"/user/create"); return

		screenname = self.request.get("screenname")
		if screenname:
			if screenname != profile.screenname:
				if db.Query(models.Profile).filter("screenname =", screenname).get():
					self.misc_render("user_profile.html", message="This screen name is taken.", form_profile=profile, username=username); return
			profile.screenname = screenname
		if self.request.get("realname"):
			profile.realname = self.request.get("realname")
		if self.request.get("is_member"):
			profile.is_member = True
		else:
			profile.is_member = False
		if self.request.get("is_admin"):
			profile.is_admin = True
		else:
			profile.is_admin = False

		profile.put()
		self.redirect("/user/%s/profile" % username)
コード例 #23
0
ファイル: input.py プロジェクト: apanda/splunk-sdk-python
def output(record):
    print_record(record)

    for k in sorted(record.keys()):
        if k.endswith("_str"): 
            continue # Ignore

        v = record[k]

        if v is None:
            continue # Ignore

        if isinstance(v, list):
            if len(v) == 0: continue
            v = ','.join([str(item) for item in v])

        # Field renames
        k = { 'source': "status_source" }.get(k, k)

        if isinstance(v, str):
            format = '%s="%s" '
            v = v.replace('"', "'")
        else:
            format = "%s=%r "
        result = format % (k, v)

        ingest.send(result)

    end = "\r\n---end-status---\r\n"
    try: 
        ingest.send(end)
    except:
        error("There was an error with the TCP connection to Splunk.", 2)
コード例 #24
0
ファイル: placescore.py プロジェクト: regan-sarwas/arc2osm
def validate(featureclass, quiet=False):

    """
    Checks if a feature class is suitable for uploading to Places.

    Requires arcpy and ArcGIS 10.x ArcView or better license

        Checks: geodatabase = good, shapefile = ok, others = fail
        geometry: polys, lines, point, no multipoints, patches, etc
        must have a spatial reference system
        geometryid (or alt) column = good othewise = ok
        if it has a placesid column it must be empty

    :rtype : basestring
    :param featureclass: The ArcGIS feature class to validate
    :param quiet: Turns off all messages
    :return: 'ok' if the feature class meets minimum requirements for upload
             'good' if the feature class is suitable for syncing
             anything else then the feature class should not be used
    """
    if not featureclass:
        if not quiet:
            utils.error("No feature class provided.")
        return 'no feature class'

    if not arcpy.Exists(featureclass):
        if not quiet:
            utils.error("Feature class not found.")
        return 'feature class not found'

    return 'ok'
コード例 #25
0
ファイル: command.py プロジェクト: mkouhei/hatena2rest
def main():
    try:

        args = parse_options()
        f = args.__dict__.get('infile')
        if f.find('~') == 0:
            infile = os.path.expanduser(f)
        else:
            infile = os.path.abspath(f)

        if args.__dict__.get('dstdir'):
            dstdir = args.__dict__.get('dstdir')
        else:
            # default: ~/tmp/hatena2rest/
            dstdir = None

        if args.__dict__.get('retrieve'):
            retrieve_image_flag = True
        else:
            retrieve_image_flag = False

        processing.xml2rest(infile, dstdir, retrieve_image_flag)

    except RuntimeError as e:
        utils.error(e)
        return
    except UnboundLocalError as e:
        utils.error(e)
        return
コード例 #26
0
ファイル: conf.py プロジェクト: malmoore/splunk-sdk-python
 def run(self, command, opts):
     """Dispatch the given command & args."""
     handlers = {"create": self.create, "delete": self.delete, "list": self.list}
     handler = handlers.get(command, None)
     if handler is None:
         error("Unrecognized command: %s" % command, 2)
     handler(opts)
コード例 #27
0
ファイル: placescore.py プロジェクト: regan-sarwas/arc2osm
def init4places(featureclass, quiet=False):

    """
    Sets up a Geodatabase feature class for syncing with Places.

    Adds a PlacesID column if it doesn't exist,
    turn on archiving if not already on.

    :rtype : bool
    :param featureclass: The ArcGIS feature class to validate
    :param quiet: Turns off all messages
    :return: True if successful, False otherwise

    """

    if not featureclass:
        if not quiet:
            utils.error("No feature class provided.")
        return False

    if not arcpy.Exists(featureclass):
        if not quiet:
            utils.error("Feature class not found.")
        return False

    return True
コード例 #28
0
ファイル: misc.py プロジェクト: chmielu/tpm
	def get(self, key):
		profile = db.get(key)
		if profile.avatar:
			self.response.headers['Content-Type'] = "image/png"
			self.response.out.write(profile.avatar)
		else:
			error(self, 404); return
コード例 #29
0
    def configure(self):
        if os.path.exists(
            os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
            utils.output("vtkdevide build already configured.")
            return
        
        if not os.path.exists(self.build_dir):
            os.mkdir(self.build_dir)

        cmake_params = "-DBUILD_SHARED_LIBS=ON " \
                       "-DBUILD_TESTING=OFF " \
                       "-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
                       "-DCMAKE_INSTALL_PREFIX=%s " \
                       "-DVTK_DIR=%s " \
                       "-DDCMTK_INCLUDE_PATH=%s " \
                       "-DDCMTK_LIB_PATH=%s " \
                       "-DPYTHON_EXECUTABLE=%s " \
                       "-DPYTHON_LIBRARY=%s " \
                       "-DPYTHON_INCLUDE_PATH=%s" % \
                       (self.inst_dir, config.VTK_DIR,
                        config.DCMTK_INCLUDE, config.DCMTK_LIB,
                        config.PYTHON_EXECUTABLE,
                        config.PYTHON_LIBRARY,
                        config.PYTHON_INCLUDE_PATH)

        ret = utils.cmake_command(self.build_dir, self.source_dir,
                cmake_params)

        if ret != 0:
            utils.error("Could not configure vtkdevide.  Fix and try again.")
コード例 #30
0
def checkFileSizes(files):
    for fn, fs in files:
        size = None
        with open(fn, "rb") as f:
            size = len(f.read())
        if fs != size:
            error("Files", "Incorect file size " + fn + ": " + str(size))
コード例 #31
0
 def set_component_outputs(self):
     error("Attempted to set outputs via abstract function.")
コード例 #32
0
ファイル: test.py プロジェクト: fran757/pertes
pca = PCA(n_components=m, whiten=True)

X_train_pca = pca.fit_transform(X_train_s)
X_val_pca = pca.fit_transform(X_val_s)
X_test_pca = pca.fit_transform(X_test_s)

# Testing Linear_regression
# With normalized data

print("Linear regression with normalized data")
LR = LinearRegression(lamb=0.1, delta=0.000015)
LR.fit(X_train_n, Y_train, epochs=1000, Visual=True)
Y_pred_1 = LR.predict(X_val_n)
Y_pred_2 = LR.predict(X_test_n)
error(Y_pred_1, Y_val, "val")
error(Y_pred_2, Y_test, "test")
#compare(Y_val, Y_pred)
# With standardized data

print("Linear regression with standardized data")
LR = LinearRegression(lamb=0.1, delta=0.00001)
LR.fit(X_train_s, Y_train, epochs=1000, Visual=True)
Y_pred_1 = LR.predict(X_val_s)
Y_pred_2 = LR.predict(X_test_s)
error(Y_pred_1, Y_val, "val")
error(Y_pred_2, Y_test, "test")
compare(Y_test, Y_pred_2, 2018)

# With standardized and orthogonalized data
コード例 #33
0
def main():

    if len(sys.argv) < 2:
        usage()

    else:
        start_time = time.time()

        rpad = 60
        rpad_char = '+'

        # this is the default list of install packages
        #
        # you can override this by:
        # - specifying packages on the johannes command line
        # - specifying packages in the working dir johannes.py
        # (command line has preference over config file)
        #
        # capitalisation has to match the capitalisation of your
        # install package class, name of install package module is
        # exactly that, but all lower case, so e.g. MyModule will
        # become: install_packages.ip_mymodule.MyModule()
        #
        # johannes will:
        # - attempt to import the ip_name from install_packages
        # - instantiate ip_name.Name
        #
        ip_names = [
            'pip', 'NumPy', 'WXPython', 'matplotlib', 'CMake', 'DCMTK',
            'VTK56', 'IPython', 'VTKTUDOSS', 'ITK', 'SWIG', 'CableSwig',
            'WrapITK', 'ItkVtkGlue', 'itkPyBuffer', 'ITKTUDOSS', 'GDCM',
            'DeVIDE', 'VTKDEVIDE', 'SetupEnvironment'
        ]

        try:
            optlist, args = getopt.getopt(sys.argv[1:], 'hm:p:dw:vt:', [
                'help', 'mode=', 'install-packages=', 'auto-deps',
                'working-dir=', 'no-prereq-check', 'versions'
                'target='
            ])

        except getopt.GetoptError, e:
            usage()
            return

        mode = 'everything'
        #ip_names = None
        working_dir = None
        profile = 'default'
        no_prereq_check = False
        ip_names_cli = False
        auto_deps = False
        target = None

        for o, a in optlist:
            if o in ('-h', '--help'):
                usage()
                return

            elif o in ('-m', '--mode'):
                if a in ('clean', 'clean_build'):
                    mode = 'clean_build'
                else:
                    mode = a

            elif o in ('--install-packages'):
                # list of package name to perform the action on
                ip_names = [i.strip() for i in a.split(',')]
                # remember that the user has specified ip_names on the command-line
                ip_names_cli = True

            elif o in ('-d', '--auto-deps'):
                auto_deps = True

            elif o in ('-w', '--working-dir'):
                working_dir = a

            elif o in ('--profile'):
                profile = a

            elif o in ('--no-prereq-check'):
                no_prereq_check = True

            elif o in ('-v', '--versions'):
                mode = 'show_versions'

            elif o in ('-t', '--target'):
                target = a

        # we need at LEAST a working directory
        if not working_dir:
            usage()
            return

        # init config (DURR)
        config.init(working_dir, profile)

        # set some variables we'll need to check later depending on
        # the configuration
        ip_dirs = []
        # now try to read johannes config file from the working dir
        cp = ConfigParser.ConfigParser()
        # returns list of filenames successfully parsed
        cfgfns = cp.read(os.path.join(working_dir, 'johannes.cfg'))
        if cfgfns and cp.has_section('default'):
            if not ip_names_cli:
                # first packages that need to be installed
                # we only do this if the user has NOT specified install
                # packages on the command line.
                try:
                    ip_names = [
                        i.strip()
                        for i in cp.get('default', 'packages').split(',')
                    ]
                except NoOptionError:
                    pass

            # also try to read extra install package paths
            # this is also a comma separated list
            try:
                ip_dirs = [
                    i.strip() for i in cp.get('default', 'ip_dirs').split(',')
                ]
            except NoOptionError:
                pass

        # if user is asking for versions, we don't do the
        # prerequisites check as we're not going to build anything
        if mode == 'show_versions':
            no_prereq_check = True

        if os.name == 'nt' and not no_prereq_check:
            if not windows_prereq_check(working_dir):
                utils.output(
                    'Windows prerequisites do not check out.  '
                    'Fix and try again.', 70, '-')
                return
            else:
                utils.output('Windows prerequisites all good.', 70, '-')

        elif os.name == 'posix' and not no_prereq_check:
            if not posix_prereq_check(working_dir):
                utils.output(
                    'Posix prerequisites do not check out.  '
                    'Fix and try again.', 70, '-')
                return
            else:
                utils.output('Posix prerequisites all good.', 70, '-')

        # In case of a target, check whether the target is actually specified
        # in the ip list (does not check dependencies in case of auto-deps)
        if target != None:
            if not target in ip_names:
                utils.error(
                    "Target '%s' was not found in the install package list." %
                    target)

        # we're going to do some imports, so let's set the sys.path
        # correctly.
        # 1. first the default install packages dir config.ip_dir
        sys.path.insert(0, config.ip_dir)

        # 2. insert the extra specified paths BEFORE that, so they get
        # preference
        for uip_dir in ip_dirs:
            sys.path.insert(0, uip_dir)

        # now import only the specified packages
        ip_instance_list = []
        imported_names = []

        def import_ip(ip_name):
            # don't import more than once
            if ip_name in imported_names:
                return

            # turn Name into ip_name
            ip_name_l = 'ip_' + ip_name.lower()
            # import the module, but don't instantiate the ip class yet
            ip_m = __import__(ip_name_l)

            # import dependencies first if user has specified
            # auto-deps.
            if auto_deps:
                for dep in ip_m.dependencies:
                    import_ip(dep)

            # instantiate ip_name.Name
            ip = getattr(ip_m, ip_name)()
            ip_instance_list.append(ip)
            imported_names.append(ip_name)
            print "%s imported from %s." % \
                    (ip_name, ip_m.__file__)

        # import all ip_names, including dependencies
        for ip_name in ip_names:
            import_ip(ip_name)

        # now check for dependencies and error if necessary
        # (in the case of auto_deps this will obviously be fine)
        deps_errors = []
        for ip in ip_instance_list:
            n = ip.__class__.__name__
            # there must be a more elegant way to get the module instance?
            deps = sys.modules[ip.__module__].dependencies
            for d in deps:
                # remember that if a package asks for "VTK", "VTK561" is also fine
                d_satisfied = False
                for ip_name in imported_names:
                    if ip_name.startswith(d):
                        d_satisfied = True
                        # we don't have to finish more loops
                        break

                    elif ip_name == n:
                        # this means we have reached the module whose deps
                        # we're checking without satisfying dependency d,
                        # which also means dependency problems, so we
                        # can jut break out of the for loop
                        break

                if not d_satisfied:
                    deps_errors.append(
                        '>>>>> Unsatisfied dependency: %s should be specified before %s'
                        % (d, n))

        if deps_errors:
            print "\n".join(deps_errors)
            utils.error("Unsatisfied dependencies. Fix and try again.")

        def get_stage(ip, n):
            utils.output("%s :: get()" % (n, ), rpad, rpad_char)
            ip.get()

        def unpack_stage(ip, n):
            utils.output("%s :: unpack()" % (n, ), rpad, rpad_char)
            ip.unpack()

        def configure_stage(ip, n):
            utils.output("%s :: configure()" % (n, ), rpad, rpad_char)
            ip.configure()

        def build_stage(ip, n):
            utils.output("%s :: build()" % (n, ), rpad, rpad_char)
            ip.build()

        def install_stage(ip, n):
            utils.output("%s :: install()" % (n, ), rpad, rpad_char)
            ip.install()

        def all_stages(ip, n):
            get_stage(ip, n)

            unpack_stage(ip, n)

            configure_stage(ip, n)

            build_stage(ip, n)

            install_stage(ip, n)

        if mode == 'show_versions':
            utils.output('Extracting all install_package versions.')
            print "python: %d.%d.%d (%s)" % \
                    (sys.version_info[0:3] +
                            (config.PYTHON_EXECUTABLE,))

        for ip in ip_instance_list:
            n = ip.__class__.__name__

            if not n in ip_names:
                # n is a dependency, so do everything
                utils.output("%s (DEPENDENCY)" % (n, ), 70, '#')
                all_stages(ip, n)

            elif target != None and target != n:
                # A target has been specified (but this ip is not it),
                # so do everything for all other install packages we encounter.
                utils.output("%s (NON-TARGET)" % (n, ), 70, '#')
                all_stages(ip, n)

            elif mode == 'get_only':
                utils.output("%s GET_ONLY" % (n, ), 70, '#')
                utils.output("%s" % (n, ), 70, '#')
                get_stage(ip, n)

            elif mode == 'unpack_only':
                utils.output("%s UNPACK_ONLY" % (n, ), 70, '#')
                utils.output("%s" % (n, ), 70, '#')
                unpack_stage(ip, n)

            elif mode == 'configure_only':
                utils.output("%s CONFIGURE_ONLY" % (n, ), 70, '#')
                utils.output("%s" % (n, ), 70, '#')
                configure_stage(ip, n)

            elif mode == 'everything':
                utils.output("%s" % (n, ), 70, '#')
                all_stages(ip, n)

            elif mode == 'clean_build':
                utils.output("%s CLEAN_BUILD" % (n, ), 70, '#')
                ip.clean_build()

            elif mode == 'show_versions':
                print '%s: %s' % (n, ip.get_installed_version())

            elif mode == 'rebuild':
                utils.output("%s REBUILD" % (n, ), 70, '#')
                # clean up
                ip.clean_build()
                # rebuild (executes all stages, as previous
                # stages are required and user will likely
                # need an install also)
                all_stages(ip, n)

            elif mode == 'reinstall':
                utils.output("%s REINSTALL" % (n, ), 70, '#')
                # clean up
                ip.clean_install()
                # reinstall
                all_stages(ip, n)

            else:
                utils.output("%s CUSTOM MODE" % (n, ), 70, '#')
                if hasattr(ip, mode):
                    utils.output("%s :: %s()" % (n, mode), rpad, rpad_char)
                    getattr(ip, mode)()
                else:
                    utils.error("Mode not found: %s" % (mode, ))

        if mode != 'show_versions':
            # Print elapsed time and final message
            t = time.time() - start_time
            utils.output("Execution time (h:mm:ss): %d:%02d:%02d" %
                         (int(t / 3600), int((t % 3600) / 60), t % 60))
            utils.output("Now please read the RESULTS section of README.txt!")
コード例 #34
0
def do_stat_sig_testing(methods,
                        measures,
                        label_aggregations,
                        configs,
                        results,
                        limit_variables=None,
                        run_mode="run"):
    testable_variables = list(configs[0].ddict.keys())
    if limit_variables:
        testable_variables = [
            x for x in testable_variables if x in limit_variables
        ]
    info("Running statistical tests on{} variables: {}".format(
        " all" if limit_variables is None else " specified",
        testable_variables))
    for method, measure, label_aggregation in product(methods, measures,
                                                      label_aggregations):
        info("Running statistical testing via {} on {} {}".format(
            method, label_aggregation, measure))
        # get ids and variable values per configuration
        df_inputs = []
        try:
            for run_id in results:
                # find corresp. configuration
                conf = [c for c in configs if c.id == run_id]
                error(
                    "Num configurations found with id: {} is: {} during stat-testing!"
                    .format(run_id, len(conf)),
                    len(conf) != 1)
                conf = conf[0]
                # get variables
                df_row = {k: v for (k, v) in conf.ddict.items() if k != "id"}
                for score in results[run_id][run_mode][measure][
                        label_aggregation]['folds']:
                    df_row["score"] = score
                    df_inputs.append(deepcopy(df_row))
        except:
            warning("Encountered invalid results accessors: {}".format(
                (run_mode, measure, label_aggregation)))
            continue
        data = pd.DataFrame(df_inputs)
        inst = instantiator.Instantiator()
        stat_test = inst.create(method)

        for v, variable in enumerate(testable_variables):
            info("Experiment variable {}/{}: {}".format(
                v + 1, len(testable_variables), variable))
            if limit_variables is not None:
                if variable not in limit_variables:
                    continue
            if len(data[variable]) == len(set(data[variable])):
                warning(
                    "Skipping testing for parameter [{}] due to having only 1 observation per value"
                    .format(variable))
                continue
            if len(set(data[variable])) == 1:
                warning(
                    "Skipping testing for parameter [{}] due to having only 1 unique parameter value: {}"
                    .format(variable, data[variable].values[0]))
                continue
            stat_result = stat_test.run(data["score"], data[variable])
            stat_test.report()
コード例 #35
0
def main(input_path,
         only_report=False,
         force_dir=False,
         no_config_check=False,
         restart=False,
         is_testing_run=False,
         manual_config_tag=None):
    # settable parameters
    ############################################################

    email = "*****@*****.**"
    passw = None

    ############################################################

    # set the experiment parameters
    error("Non-existent input path: {} ".format(input_path),
          not exists(input_path))
    if isdir(input_path):
        # assume a single .yml file in the directory
        ymls = [
            x for x in listdir(input_path) if any(
                x.endswith(suff) for suff in [".yaml", ".yml"])
        ]
        error(
            "Input path {} is a directory with no yaml configuration files.".
            format(input_path), not ymls)
        error(
            "Input path is a directory with more than one yaml configuration files."
            .format(input_path),
            len(ymls) > 1)
        config_file = join(input_path, ymls[0])
    else:
        config_file = input_path

    # if input file is existing csv scores, just print them
    if config_file.endswith(".csv"):
        print_existing_csv_results(config_file)
        return

    conf = read_ordered_yaml(config_file)

    try:
        exps = conf[EXPERIMENTS_KEY_NAME]
    except KeyError:
        error(
            f"Need an [{EXPERIMENTS_KEY_NAME}] key for large-scale experiments."
        )

    # folder to run experiments in
    run_dir = exps["run_folder"]
    if force_dir:
        warning(
            "Overriding experiment folder from yml value: {} to current dir: {}, due to force-dir"
            .format(run_dir, dirname(run_dir)))
        run_dir = dirname(input_path)
    if not isabs(run_dir):
        run_dir = join(os.getcwd(), run_dir)

    # dir checks
    # ----------
    # virtualenv folder
    venv_dir = conf[EXPERIMENTS_KEY_NAME]["venv"] if "venv" in conf[
        EXPERIMENTS_KEY_NAME] else None
    # results csv file
    # results_file = conf["experiments"]["results_file"]
    results_file = join(run_dir, "run_results.csv")

    if venv_dir and not exists(venv_dir):
        error("Virtualenv dir {} not found".format(venv_dir))
    if not exists(run_dir):
        info("Run dir {} not found, creating.".format(run_dir))
        makedirs(run_dir)
    else:
        error(
            "Specified a non-dir path as the running directory: {}".format(
                run_dir), not isdir(run_dir))
        if restart:
            warning(
                "Specified restart, and experiment dir {} exists. Deleting!")
            rmtree(run_dir)
            makedirs(run_dir)

    # logging
    os.makedirs(run_dir, exist_ok=True)
    setup_simple_logging(conf["print"]["log_level"], logging_dir=run_dir)

    info("Generating configurations from source file {}".format(config_file))

    # evaluation measures
    try:
        eval_measures = as_list(exps["measures"]) if "measures" in exps else [
            "f1-score", "accuracy"
        ]
        print(eval_measures)
        aggr_measures = as_list(exps["label_aggregation"]) if "label_aggregation" in exps \
            else ["macro", "micro"]
        stat_functions = as_list(
            exps["fold_aggregation"]) if "fold_aggregation" in exps else [
                "mean"
            ]
        run_types = as_list(
            exps["run_types"]) if "run_types" in exps else ["run"]
        do_sstests = "sstests" in exps
        if not do_sstests:
            warning("No statistical tests specified.")
        else:
            sstests = ["tukeyhsd"
                       ] if "names" not in exps["sstests"] else as_list(
                           exps["sstests"]["names"])
            sstests_measures = [
                "f1-score"
            ] if "measures" not in exps["sstests"] else as_list(
                exps["sstests"]["measures"])
            sstests_aggregations = [
                "macro"
            ] if "aggregations" not in exps["sstests"] else as_list(
                exps["sstests"]["aggregations"])
            sstests_limit_vars = None if "limit_variables" not in exps[
                "sstests"] else as_list(exps["sstests"]["limit_variables"])
    except Exception as ex:
        error(
            "Failed to read evaluation / testing options due to: [{}]".format(
                ex))

    # folder where run scripts are
    sources_dir = exps["sources_dir"] if "sources_dir" in exps else os.getcwd()
    warning("Defaulting sources folder to the current directory: {}".format(
        sources_dir))
    error(
        "Main module: {} not found. Is the sources dir ok?".format(
            join(sources_dir, "main.py")),
        not exists(join(sources_dir, "main.py")))

    configs = make_configs(conf, run_dir, sources_dir)
    # check run id uniqueness
    if len(set([c.id for c in configs])) != len(configs):
        error("Duplicate run folders from the input: {}".format(
            [c.id for c in configs]))
    if len(set([c['folders']['run'] for c in configs])) != len(configs):
        error("Duplicate run folders from the input: {}".format(
            [c["folders"]["run"] for c in configs]))
    # if we're running a testing suite, filter out incompatible configs
    if is_testing_run:
        configs = filter_testing(configs, config_file)

    # mail
    do_send_mail = exps["send_mail"] if "send_mail" in exps else None
    if do_send_mail:
        passw = getpass.getpass()

    # copy the experiments configuration file in the target directory
    experiments_conf_path = join(run_dir, basename(config_file))
    if exists(experiments_conf_path):
        # make sure it's the same effing config, unless check is overriden
        if not no_config_check:
            config_to_copy = OrderedDict(
                {k: v
                 for (k, v) in conf.items() if k != EXPERIMENTS_KEY_NAME})
            existing_exp_conf = read_ordered_yaml(experiments_conf_path)
            existing_exp_conf = OrderedDict({
                k: v
                for (k, v) in existing_exp_conf.items()
                if k != EXPERIMENTS_KEY_NAME
            })
            equal, diff = compare_dicts(config_to_copy, existing_exp_conf)
            if not equal:
                error(
                    "The workflow contents derived from the original config [{}] differ from the ones in the experiment directory: [{}]!\nDifference is: {}"
                    .format(config_file, experiments_conf_path, diff))
    else:
        if not only_report:
            info("Copying experiments configuration at {}".format(
                experiments_conf_path))
            with open(experiments_conf_path, "w") as f:
                write_ordered_dump(OrderedDict(conf), f)
        else:
            info(
                "Only-report run: will not copy experiment configuration at {}"
                .format(experiments_conf_path))

    results, result_paths = {}, {}

    #################################################################################
    skipped_configs = []

    # prelim experiments
    for conf_index, conf in enumerate(configs):
        run_id = conf.id
        # prepend a configuration id tag, if supplied
        if manual_config_tag is not None:
            run_id += manual_config_tag
            experiment_dir = conf["folders"]["run"] + manual_config_tag
        else:
            experiment_dir = conf["folders"]["run"]
        info("Running experimens for configuration {}/{}: {}".format(
            conf_index + 1, len(configs), run_id))
        completed_file = join(experiment_dir, "completed")
        error_file = join(experiment_dir, "error")
        # results to run folders, if not specified otherwise
        respath = join(experiment_dir, "results")
        if not isabs(respath):
            conf["folders"]["results"] = join(experiment_dir, respath)

        if exists(completed_file):
            info("Skipping completed experiment {}".format(run_id))
        elif only_report:
            info("Only-report execution: skipping non-completed experiment {}".
                 format(run_id))
            skipped_configs.append(run_id)
            continue
        else:
            # run it
            if exists(error_file):
                os.remove(error_file)
            makedirs(experiment_dir, exist_ok=True)

            conf_path = join(experiment_dir, "config.yml")
            if exists(conf_path) and not no_config_check:
                warning("Configuration file at {} already exists!".format(
                    conf_path))
                existing = read_ordered_yaml(conf_path)
                equal, diff = compare_dicts(existing, conf)
                if not equal:
                    error(
                        "Different local config encountered: {} \nDifference: {}"
                        .format(conf_path, diff))
                #if not (OrderedDict(conf) == existing):
                #    error("Different local config encountered at {}".format(conf_path))
            else:
                with open(conf_path, "w") as f:
                    write_ordered_dump(OrderedDict(conf), f)
            info("Configuration file: {}".format(conf_path))
            # write the run script file
            script_path = join(experiment_dir, "run.sh")
            with open(script_path, "w") as f:
                if venv_dir:
                    f.write("source \"{}/bin/activate\"".format(venv_dir))
                f.write("cd \"{}\"\n".format(sources_dir))
                f.write(
                    "python3 \"{}\" \"{}\" && touch \"{}\" && exit 0\n".format(
                        join(sources_dir, "main.py"), conf_path,
                        completed_file))
                f.write("touch '{}' && exit 1\n".format(error_file))

            subprocess.run(["/usr/bin/env", "bash", script_path])
            if exists(error_file):
                print("An error has occurred in the run, exiting.")
                info("An error has occurred in the run, exiting.")
                if do_send_mail:
                    sendmail(email, passw, "an error occurred")
                exit(1)
        # read experiment results
        exp_res_file = join(experiment_dir, "results", "results.pkl")
        with open(exp_res_file, "rb") as f:
            res_data = pickle.load(f)
        results[run_id] = res_data
        result_paths[run_id] = exp_res_file

    # messages = []
    total_results = {}

    # show results
    for stat in stat_functions:
        info("Results regarding {} statistic:".format(stat))
        print_vals = {}
        for run_id in results:
            print_vals[run_id] = {}
            for m in eval_measures:
                for run in run_types:
                    for ag in aggr_measures:
                        try:
                            results[run_id][run][m][ag]
                        except KeyError:
                            continue
                        header = "{}.{}.{}.{}".format(run[:3], m[:3], ag[:3],
                                                      stat)

                        if stat in "var mean std".split():
                            val = results[run_id][run][m][ag][stat]
                        if val is None:
                            continue
                        val = round(val, decimals=4)
                        print_vals[run_id][header] = val
        # print'em
        info("SCORES:")
        print_dataframe_results(print_vals)

        total_results[stat] = print_vals
    info("Writing these results to file {}".format(results_file))
    total_df = pd.DataFrame.from_dict(total_results, orient='index')
    if total_df.size == 0:
        info("No results parsed.")
    else:
        total_df.to_csv(results_file)

    if skipped_configs:
        for s, sk in enumerate(skipped_configs):
            info("Skipped incomplete config: {}/{} : {}".format(
                s + 1, len(skipped_configs), sk))

    if do_sstests:
        do_stat_sig_testing(sstests, sstests_measures, sstests_aggregations,
                            configs, results, sstests_limit_vars)

    # [info(msg) for msg in messages]
    if do_send_mail:
        sendmail(email, passw, "run complete.")
コード例 #36
0
def validate(chplLocaleModel, chplComm):
    if chplLocaleModel == 'gpu' and chplComm != "none":
        error("The prototype GPU support does not work when CHPL_COMM is not set to\n\"none\".");
コード例 #37
0
 def produce_outputs(self):
     error("Attempted to produce outputs via abstract function.")
コード例 #38
0
def main():
    global options, device

    # Get the ENV context
    script_dir = os.path.dirname(__file__)
    env = os.environ.copy()

    # Set the input folder
    input_dir = os.path.expanduser(options.input_dir) if options.input_dir \
        else os.path.join(script_dir, '..', 'data')
    vgg_path = os.path.join(input_dir, 'vgg', 'imagenet-vgg-verydeep-19.mat')
    coco_dir = os.path.join(input_dir, 'train')
    if not os.path.isdir(input_dir):
        fail('Failed to find the input folder at ' + input_dir)
    if not os.path.isfile(vgg_path):
        error('Failed to find the VGG model file at ' + vgg_path)
        fail(
            'Please download it from http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
        )
    if not os.path.isdir(coco_dir):
        error('Failed to find the COCO 2014 training images in ' + coco_dir)
        fail(
            'Plese download it from http://images.cocodataset.org/zips/train2014.zip'
        )

    # Set the output folder
    output_dir = os.path.expanduser(options.output_dir) if options.output_dir \
        else env.get('OUTPUT_DIR', os.path.join(script_dir, '..', 'output'))
    model_dir = os.path.join(output_dir, 'checkpoint')
    if os.path.isdir(output_dir):
        if not os.path.isdir(model_dir):
            info('Creating a folder to store checkpoint at ' + model_dir)
            os.makedirs(model_dir)
    else:
        info('Creating a folder to store checkpoint at ' + model_dir)
        os.makedirs(model_dir)

    # Set the TensorBoard folder
    log_dir = os.path.expanduser(options.log_dir) if options.log_dir \
        else env.get('LOG_DIR', os.path.join(script_dir, '..', 'log'))
    if not os.path.isdir(log_dir):
        info('Creating a folder to store TensorBoard events at ' + log_dir)
        os.makedirs(log_dir)

    # Set the style image path
    style_path = os.path.expanduser(options.style_image) if os.path.isfile(options.style_image) \
        else os.path.join(input_dir, 'style_images', options.style_image)
    style_name = os.path.basename(os.path.splitext(style_path)[0])
    ckpt_path = os.path.join(model_dir, style_name + '.ckpt')
    if not os.path.isfile(style_path):
        fail('Failed to find the style image at ' + style_path)

    # Set hyper parameters
    batch_size = options.batch_size
    epochs = options.epoch
    lr = options.lr
    lambda_tv = options.lambda_tv
    lambda_feat = options.lambda_feat
    lambda_style = options.lambda_style

    # Print parsed arguments
    info('--------- Training parameters -------->')
    info('Style image path: ' + style_path)
    info('VGG model path: ' + vgg_path)
    info('Training image dir: ' + coco_dir)
    info('Checkpoint path: ' + ckpt_path)
    info('TensorBoard log dir: ' + log_dir)
    info('Training device: ' + device)
    info('Batch size: %d' % batch_size)
    info('Epoch count: %d' % epochs)
    info('Learning rate: ' + str(lr))
    info('Lambda tv: ' + str(lambda_tv))
    info('Lambda feat: ' + str(lambda_feat))
    info('Lambda style: ' + str(lambda_style))
    info('<-------- Training parameters ---------')

    # COCO images to train
    content_targets = list_jpgs(coco_dir)
    if len(content_targets) % batch_size != 0:
        content_targets = content_targets[:-(len(content_targets) %
                                             batch_size)]
    info('Total training data size: %d' % len(content_targets))

    # Image shape
    image_shape = (224, 224, 3)
    batch_shape = (batch_size, ) + image_shape

    # Style target
    style_target = read_img(style_path)
    style_shape = (1, ) + style_target.shape

    with tf.device(device), tf.Session() as sess:
        # Compute gram maxtrix of style target
        style_image = tf.placeholder(tf.float32,
                                     shape=style_shape,
                                     name='style_image')
        vggstyletarget = vgg.net(vgg_path, vgg.preprocess(style_image))
        style_vgg = vgg.get_style_vgg(vggstyletarget, style_image,
                                      np.array([style_target]))

        # Content target feature
        content_vgg = {}
        inputs = tf.placeholder(tf.float32, shape=batch_shape, name='inputs')
        content_net = vgg.net(vgg_path, vgg.preprocess(inputs))
        content_vgg['relu4_2'] = content_net['relu4_2']

        # Feature after transformation
        outputs = stylenet.net(inputs / 255.0)
        vggoutputs = vgg.net(vgg_path, vgg.preprocess(outputs))

        # Compute feature loss
        loss_f = options.lambda_feat * vgg.total_content_loss(
            vggoutputs, content_vgg, batch_size)

        # Compute style loss
        loss_s = options.lambda_style * vgg.total_style_loss(
            vggoutputs, style_vgg, batch_size)

        # Total variation denoising
        loss_tv = options.lambda_tv * vgg.total_variation_regularization(
            outputs, batch_size, batch_shape)

        # Total loss
        total_loss = loss_f + loss_s + loss_tv
        train_step = tf.train.AdamOptimizer(options.lr).minimize(total_loss)

        # Create summary
        tf.summary.scalar('loss', total_loss)
        merged = tf.summary.merge_all()

        # Used to save model
        saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        # Restore checkpoint if available
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            info('Restoring from ' + ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)

        # Write the graph
        writer = tf.summary.FileWriter(log_dir, sess.graph)

        # Start to train
        total_step = 0
        for epoch in range(epochs):
            info('epoch: %d' % epoch)
            step = 0
            while step * batch_size < len(content_targets):
                time_start = time.time()

                # Load one batch
                batch = np.zeros(batch_shape, dtype=np.float32)
                for i, img in enumerate(
                        content_targets[step * batch_size:(step + 1) *
                                        batch_size]):
                    batch[i] = read_img(img, image_shape).astype(
                        np.float32)  # (224,224,3)

                # Proceed one step
                step += 1
                total_step += 1
                _, loss, summary = sess.run([train_step, total_loss, merged],
                                            feed_dict={inputs: batch})

                time_elapse = time.time() - time_start
                if total_step % 5 == 0:
                    info('[step {}] elapse time: {} loss: {}'.format(
                        total_step, time_elapse, loss))
                    writer.add_summary(summary, total_step)

                # Write checkpoint
                if total_step % 2000 == 0:
                    info('Saving checkpoint to ' + ckpt_path)
                    saver.save(sess, ckpt_path, global_step=total_step)

        info('Saving final checkpoint to ' + ckpt_path)
        saver.save(sess, ckpt_path, global_step=total_step)
コード例 #39
0
def check_eq(value1, value2, msg):
    if value1 != value2:
        error(msg,
              'Count mismatch: expected %d, but got %d' % (value2, value1))
コード例 #40
0
 def get_component_inputs(self):
     error("Attempted to get inputs via abstract function.")
コード例 #41
0
def main_denoising(wav_files, output_dir, verbose=False, **kwargs):
    """Perform speech enhancement for WAV files in ``wav_dir``.

    Parameters
    ----------
    wav_files : list of str
        Paths to WAV files to enhance.

    output_dir : str
        Path to output directory for enhanced WAV files.

    verbose : bool, optional
        If True, print full stacktrace to STDERR for files with errors.

    kwargs
        Keyword arguments to pass to ``denoise_wav``.
    """
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Load global MVN statistics.
    global_mean_var = sio.loadmat(GLOBAL_MEAN_VAR_MATF)
    global_mean = global_mean_var['global_mean']
    global_var = global_mean_var['global_var']

    # Perform speech enhancement.
    for src_wav_file in wav_files:
        # Perform basic checks of input WAV.
        if not os.path.exists(src_wav_file):
            utils.error('File "%s" does not exist. Skipping.' % src_wav_file)
            continue
        if not utils.is_wav(src_wav_file):
            utils.error('File "%s" is not WAV. Skipping.' % src_wav_file)
            continue
        if utils.get_sr(src_wav_file) != SR:
            utils.error('Sample rate of file "%s" is not %d Hz. Skipping.' %
                        (src_wav_file, SR))
            continue
        if utils.get_num_channels(src_wav_file) != NUM_CHANNELS:
            utils.error('File "%s" is not monochannel. Skipping.' %
                        src_wav_file)
            continue
        if utils.get_bitdepth(src_wav_file) != BITDEPTH:
            utils.error('Bitdepth of file "%s" is not %d. Skipping.' %
                        (src_wav_file, BITDEPTH))
            continue

        # Denoise.
        try:
            bn = os.path.basename(src_wav_file)
            dest_wav_file = os.path.join(output_dir, bn)
            denoise_wav(src_wav_file, dest_wav_file, global_mean, global_var,
                        **kwargs)
            print('Finished processing file "%s".' % src_wav_file)
        except Exception as e:
            msg = 'Problem encountered while processing file "%s". Skipping.' % src_wav_file
            if verbose:
                msg = '%s Full error output:\n%s' % (msg, e)
            utils.error(msg)
            continue
コード例 #42
0
ファイル: as-checkcli.py プロジェクト: deicherse/xdt99
def runtest():
    """check command line interface"""

    # input and output files
    source = os.path.join(Dirs.sources, "ashello.asm")
    with open(Files.output, "wb") as f:
        xas(source, "-R", "-o", "-", stdout=f)
    xdm(Disks.asmsrcs, "-e", "ASHELLO-O", "-o", Files.reference)
    check_obj_code_eq(Files.output, Files.reference)

    with open(Files.output, "wb") as f:
        xas(source, "-R", "-i", "-o", "-", stdout=f)
    xdm(Disks.asmsrcs, "-e", "ASHELLO-I", "-o", Files.reference)
    check_image_files_eq(Files.output, Files.reference)

    with open(Files.output, "w") as f:
        xas(source, "-R", "-o", Files.output, "-L", "-", stdout=f)
    xdm(Disks.asmsrcs, "-e", "ASHELLO-L", "-o", Files.reference)
    check_list_files_eq(Files.output, Files.reference)

    source = os.path.join(Dirs.sources, "nonexisting")
    with open(Files.error, "w") as ferr:
        xas(source, "-i", "-R", "-o", Files.output, stderr=ferr, rc=1)
    with open(Files.error, "r") as ferr:
        errs = ferr.readlines()
    if len(errs) != 1 or errs[0][:10] != "File error":
        error("File error", "Incorrect file error message")

    # include path
    source = os.path.join(Dirs.sources, "ascopyi.asm")
    incls = os.path.join(Dirs.sources, "test") + "," + \
        os.path.join(Dirs.sources, "test", "test")
    xas(source, "-i", "-I", incls, "-o", Files.output)
    with open(Files.output, "rb") as f:
        data = f.read()
    if len(data[6:]) != 20:
        error("Include paths", "Incorrect image length")

    # command-line definitions
    source = os.path.join(Dirs.sources, "asdef.asm")
    xas(source, "-b", "-D", "s1=1", "s3=3", "s2=4", "-o", Files.output)
    assert content(Files.output) == "\x01\x03"
    xas(source, "-b", "-D", "s1=2,s2=2,s3=3", "-o", Files.output)
    assert content(Files.output) == "\x02\x03"

    # various parameter combinations
    source = os.path.join(Dirs.sources, "asxbank1.asm")
    remove([Files.reference])
    xas(source, "-b", "-o", Files.output, "-L", Files.reference)
    check_exists([Files.reference])

    # text data output
    source = os.path.join(Dirs.sources, "ascart.asm")
    xas(source, "-b", "-R", "-o", Files.reference)
    xas(source, "-t", "a2", "-R", "-o", Files.output)
    check_bin_text_equal(Files.output, Files.reference)

    source = os.path.join(Dirs.sources, "asmtext.asm")
    xas(source, "-t", "a2", "-R", "-o", Files.output)
    check_instructions(Files.output,
                       [";aorg>1000", "byte", ";aorg>2000", "byte"])

    # symbols
    source = os.path.join(Dirs.sources, "assyms.asm")
    xas(source, "-b", "-R", "-o", Files.reference, "-E", Files.output)
    check_symbols(Files.output, (("START", ">0000"), ("S1", ">0001"),
                                 ("S2", ">0018"), ("VDPWA", ">8C02")))

    # disable warnings
    source = os.path.join(Dirs.sources, "aswarn.asm")
    with open(Files.error, "w") as ferr:
        xas(source, "-b", "-R", "-w", "-o", Files.output, stderr=ferr, rc=0)
    if content_len(Files.error) > 0:
        error("warn", "warnings, even though disabled")

    # cleanup
    os.remove(Files.output)
    os.remove(Files.reference)
    os.remove(Files.error)
コード例 #43
0
def get(flag='host'):
    if flag == 'host':
        platform_val = overrides.get('CHPL_HOST_PLATFORM')
    elif flag == 'target':
        platform_val = overrides.get('CHPL_TARGET_PLATFORM')
        if not platform_val:
            platform_val = get('host')
    else:
        raise error("Invalid flag: '{0}'".format(flag), ValueError)

    if not platform_val:
        # Check for cray platform. It is a cray platform if there is a
        # cle-release/CLEinfo config file with a known network value in it.
        cle_info_file = os.path.abspath(
            '/etc/opt/cray/release/cle-release')  # CLE >= 6
        if not os.path.exists(cle_info_file):
            cle_info_file = os.path.abspath(
                '/etc/opt/cray/release/CLEinfo')  # CLE <= 5

        if os.path.exists(cle_info_file):
            with open(cle_info_file, 'r') as fp:
                cle_info = fp.read()
            net_pattern = re.compile('^NETWORK=(?P<net>[a-zA-Z]+)$',
                                     re.MULTILINE)
            net_match = net_pattern.search(cle_info)
            if net_match is not None and len(net_match.groups()) == 1:
                net = net_match.group('net')
                if net.lower() == 'ari':
                    platform_val = 'cray-xc'

    if not platform_val:
        network = os.environ.get('CRAYPE_NETWORK_TARGET', '')
        if network.startswith("slingshot") or network == "ofi":
            platform_val = 'hpe-cray-ex'

    if not platform_val:
        # uname() -> (system, node, release, version, machine, processor)
        uname = platform.uname()
        platform_val = uname[0].lower().replace('_', '')
        machine = uname[4]
        if platform_val == 'linux':
            if 'ppc' in machine:
                endianness = 'le' if 'le' in machine else ''
                bits = '64' if '64' in machine else '32'
                platform_val = 'linux_ppc_{0}{1}'.format(endianness, bits)
            elif machine == 'x86_64':
                build_64_as_32 = os.environ.get('CHPL_BUILD_X86_64_AS_32')
                if build_64_as_32 == "1":
                    platform_val = "linux64_32"
                else:
                    platform_val = "linux64"
            elif machine == 'aarch64':
                platform_val = "linux64"
            else:
                platform_val = "linux32"
        elif platform_val.startswith("cygwin"):
            if machine == 'x86_64':
                platform_val = "cygwin64"
            else:
                platform_val = "cygwin32"
        elif platform_val.startswith('netbsd'):
            if machine == 'amd64':
                platform_val = 'netbsd64'
            else:
                platform_val = 'netbsd32'

    return platform_val
コード例 #44
0
def check_trunc(infile, lines, length):
    """check prefixes of lines"""
    with open(infile, 'r') as f:
        for i, inline in enumerate(f):
            if inline.rstrip() != lines[i].rstrip()[:length]:
                error('Truncated records', 'Record %d mismatch' % i)
コード例 #45
0
 def configure_embedding(self):
     # incorporate embeddings in the neural architecture
     error("Attempted to access abstract embedding configuration function.")
コード例 #46
0
def new(message):
    # logging.info('(' + message.qq + '): ' + message.text)

    qq_group_id = int(message.group)
    _, tg_group_id, forward_index = get_forward_index(qq_group_id=qq_group_id)

    text = message.text  # get message text

    # text, _ = cq_image_regex.subn('', text)   # clear CQ:image in text

    # replace special characters
    text = decode_cq_escape(text)

    text = cq_emoji_regex.sub(lambda x: chr(int(x.group(1))),
                              text)  # replace [CQ:emoji,id=*]
    text = cq_face_regex.sub(lambda x: qq_emoji_list[int(x.group(1))]
                             if int(x.group(1)) in qq_emoji_list else '\u2753',
                             text)  # replace [CQ:face,id=*]
    text = cq_bface_regex.sub('\u2753', text)  # replace bface to '?'
    text = cq_sface_regex.sub(
        lambda x: qq_sface_list[int(x.group(1)) & 255]
        if int(x.group(1)) > 100000 and int(x.group(1)) & 255 in qq_sface_list
        else '\u2753', text)  # replace [CQ:sface,id=*], https://cqp.cc/t/26206

    def replace_name(qq_number):  # replace each qq number with preset id
        qq_number = qq_number.group(1)
        if int(qq_number) == QQ_BOT_ID:
            return '@bot'
        result = '@' + get_qq_name(int(qq_number), forward_index)
        result = result.replace(':', ' ')
        return result

    text = CQAt.PATTERN.sub(replace_name, text)  # replace CQAt to @username

    # send pictures to Telegram group
    pic_send_mode = 2
    # mode = 0 -> direct mode: send cqlink to tg server
    # mode = 1 -> (deprecated) download mode: download to local,send local link to tg server
    # mode = 2 -> download mode: download to local, upload from disk to tg server
    message_parts = cq_image_simple_regex.split(text)
    message_parts_count = len(message_parts)
    image_num = message_parts_count - 1
    if image_num == 0:
        # send plain text message with bold group member name
        full_msg_bold = '<b>' + get_qq_name(int(
            message.qq), forward_index) + '</b>: ' + text.strip().replace(
                '<', '&lt;').replace('>', '&gt;')
        global_vars.tg_bot.sendMessage(tg_group_id,
                                       full_msg_bold,
                                       parse_mode='HTML')
    else:
        if message_parts[0]:
            part_msg_bold = '<b>' + get_qq_name(int(message.qq), forward_index) + '</b>: ' +\
                        '(1/' + str(message_parts_count) + ')' + message_parts[0].strip().replace('<', '&lt;').replace('>', '&gt;')
            global_vars.tg_bot.sendMessage(tg_group_id,
                                           part_msg_bold,
                                           parse_mode='HTML')
            part_index = 1
        else:
            message_parts.pop(0)
            message_parts_count -= 1
            part_index = 0
        for matches in CQImage.PATTERN.finditer(message.text):
            # replace QQ number to group member name, get full message text
            if message_parts_count == 1:
                part_msg = get_qq_name(
                    int(message.qq),
                    forward_index) + ': ' + message_parts[part_index].strip()
            else:
                part_msg = get_qq_name(int(
                    message.qq), forward_index) + ': ' + '(' + str(
                        part_index + 1) + '/' + str(
                            message_parts_count
                        ) + ')' + message_parts[part_index].strip()
            part_index += 1
            decode_cq_escape(part_msg)
            filename = matches.group(1)
            url = cq_get_pic_url(filename)
            pic = url
            if pic_send_mode == 1:
                cq_download_pic(filename)
                pic = SERVER_PIC_URL + filename
            elif pic_send_mode == 2:
                cq_download_pic(filename)
                pic = open(os.path.join(CQ_IMAGE_ROOT, filename), 'rb')
            # gif pictures send as document
            if filename.lower().endswith('gif'):
                try:
                    global_vars.tg_bot.sendDocument(tg_group_id,
                                                    pic,
                                                    caption=part_msg)
                except BadRequest:
                    # when error occurs, download picture and send link instead
                    error(message)
                    traceback.print_exc()
                    if pic_send_mode == 0:
                        cq_download_pic(filename)
                    pic = get_short_url(SERVER_PIC_URL + filename)
                    global_vars.tg_bot.sendMessage(tg_group_id,
                                                   pic + '\n' + part_msg)

            # jpg/png pictures send as photo
            else:
                try:
                    # the first image in message attach full message text
                    global_vars.tg_bot.sendPhoto(tg_group_id,
                                                 pic,
                                                 caption=part_msg)
                except BadRequest:
                    # when error occurs, download picture and send link instead
                    error(message)
                    traceback.print_exc()
                    if pic_send_mode == 0:
                        cq_download_pic(filename)
                    my_url = get_short_url(SERVER_PIC_URL + filename)
                    pic = my_url
                    global_vars.tg_bot.sendMessage(tg_group_id,
                                                   pic + '\n' + part_msg)
    return True
コード例 #47
0
ファイル: events.py プロジェクト: Gaeta/Delta
import discord, re, utils, sqlite3, traceback, sys

from discord.ext import commands
from datetime import datetime

try:
    from pyfiglet import print_figlet

except ImportError:
    utils.error("PyFiglet is not installed, run pip3 install pyfiglet.",
                "Events",
                terminate=True)

SLUR_REGEX = r"nigg(er|a)|f****t|tranny|crack(er|a)"
INVITE_REGEX = r"(https?:\/\/)?(www\.)?((discord|invite)\.(gg|io|me|li)|discordapp\.com\/invite)\/.+[a-z]"
NSFW_REGEX = r"(https?:\/\/)?(www\.)?(pornhub|redtube|youporn|tube8|pornmd|thumbzilla|modelhub)\.com"
LINK_REGEX = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
INV_VALID_REGEX = r"(https?:\/\/)?(www\.)?(discord\.gg|discordapp\.com\/invite)\/(.[^\s]+)"


class BasicEvents(commands.Cog):
    def __init__(self, bot):
        self.bot = bot

    def perm_cleanup(self, perm):
        return perm.replace("_", " ").title()

    @commands.Cog.listener()
    async def on_ready(self):
        print_figlet(self.bot.config.figlet)
コード例 #48
0
    def produce_outputs(self):
        # get input configuration data
        self.topk = None
        self.messages = []
        self.input_parameters_dict = [dp for dp in self.data_pool.data if type(dp.data) == Dictionary][0].data.instances
        self.input_parameters = to_namedtuple(self.input_parameters_dict, "input_parameters")

        # get reference data by chain name output
        self.label_mapping = []
        for mapping in self.params.label_mappings:
            # read json
            if type(mapping) is str:
                try:
                    with open(mapping) as f:
                        mapping = json.load(f)
                except:
                    error("Requires json labelmapping or literal list")
            mapping_dict = {ix: val for (ix, val) in enumerate(mapping)}
            self.label_mapping.append(mapping_dict)


        # thresholding
        for th in self.params.thresholds:
            if th not in self.input_parameters_dict:
                self.result =  {"results": [], "input_params": self.input_parameters_dict, "messages": [f"Threshold {th} missing from input parameters"]}
                return


        datapack = [x for x in self.data_pool.data if x.chain == self.params.data_chain][0]
        predictions, tagged_idx = [], []
        for i, chain_name in enumerate(self.params.pred_chains):
            # predictions
            chain_preds = [x for x in self.data_pool.data if x.chain == chain_name][0]
            predictions.append(chain_preds)


        # for text data, keep just the words
        if type(datapack.data) == Text:
            data = [x["words"] for x in datapack.data.instances]

        res = []
        
        predictions = [x.data.instances for x in predictions]
        num_all_ngrams = len(predictions[0])
        num_steps = len(predictions)

        # compute thresholding values
        thresholding = np.zeros((num_all_ngrams, len(self.params.thresholds)), bool)
        # for i, th in enumerate(self.params.thresholds):
        #     th_val = float(self.input_parameters_dict[th])


        thresholding[:, 0] = predictions[0][:, 1] > float(self.input_parameters_dict[self.params.thresholds[0]])
        thresholding[:, 1] = predictions[1][:, 1] > float(self.input_parameters_dict[self.params.thresholds[1]])
        thresholding[:, 2] = np.any(predictions[2] > float(self.input_parameters_dict[self.params.thresholds[2]]), axis=1)


        ngram_tags = sorted([x for x in datapack.usages[0].tags if x.startswith("ngram_inst")])

        with tictoc("Classification report building", announce=False):
            for n, ngram_tag in enumerate(ngram_tags):
                # indexes of the tokens for the current instance
                # to the entire data container
                original_instance_ix_data = datapack.usages[0].get_tag_instances(ngram_tag)
                inst_obj = {"instance": n, "data": [data[i] for i in original_instance_ix_data], "predictions": []}

                for local_word_idx, ix in enumerate(original_instance_ix_data):
                    word_obj = {"word": data[ix], "word_idx": int(local_word_idx), "overall_predictions": {}}
                    detailed = []
                    
                    # for each step
                    for step_idx in range(num_steps):
                        preds = predictions[step_idx]
                        step_name = self.params.pred_chains[step_idx]
                        step_obj = {"name": step_name, "step_index": step_idx}

                        survives = thresholding[ix, step_idx]
                        step_preds = np.expand_dims(preds[ix, :], axis=0)
                        scores, classes = self.get_topK_preds(step_preds, self.label_mapping[step_idx], self.params.only_report_labels[step_idx])
                        step_preds = {c: round(s, 4) for (c, s) in zip(classes[0], scores[0])}
                        step_obj["step_preds"] = step_preds
                        detailed.append(step_preds)

                    modified, deleted, replaced = thresholding[ix, :]
                    modify_obj = {"modified": int(modified), "prob": detailed[0]["modify"]}
                    word_obj["overall_predictions"]["modify_prediction"] = modify_obj

                    delete_obj = detailed[1]
                    # replaced
                    objs = []
                    for word, prob in detailed[2].items():
                        objs.append({"word": word, "prob": prob})
                    replace_obj = objs

                    if modified:
                        if deleted:
                            # deleted
                            word_obj["overall_predictions"]["delete_prediction"] = delete_obj
                        elif replaced:
                            word_obj["overall_predictions"]["replace_prediction"] = replace_obj

                    if not self.omit_detailed_results():
                        word_obj["detailed_predictions"] = {"modify_prediction": modify_obj, "delete_prediction": delete_obj, "replace_prediction": replace_obj}

                    inst_obj["predictions"].append(word_obj)
                res.append(inst_obj)

        self.result = {"results": res, "input_params": self.input_parameters_dict, "messages": self.messages}
コード例 #49
0
ファイル: fliper.py プロジェクト: samvidmistry/FLIPER
def runInstruction(instr):
    '''
    Run the given instruction. In case of block begin, the instructions
    will be collected and ran when the block end command is encountered.
    '''
    global canvasWidth
    global canvasHeight
    global canvas
    global canvasImage
    global canvasBackground
    global inBlock
    global maxDuration
    global animationQueue

    if instr.data == 'canvas_size':
        if canvasWidth is not None and canvasHeight is not None:
            raise CanvasReadjustError(
                errorAtToken("Canvas size changed after drawing commands.",
                             instr.children[0]))
        width = int(instr.children[0].children[0])
        height = int(instr.children[1].children[0])
        color = (255, 255, 255, 255)
        if len(instr.children) == 6:
            red = int(instr.children[2].children[0])
            if red < 0 or red > 255:
                raise ColorOutOfRangeError(
                    errorAtToken("0 <= color value <= 255", instr.children[2]))
            green = int(instr.children[3].children[0])
            if green < 0 or green > 255:
                raise ColorOutOfRangeError(
                    errorAtToken("0 <= color value <= 255", instr.children[3]))
            blue = int(instr.children[4].children[0])
            if blue < 0 or blue > 255:
                raise ColorOutOfRangeError(
                    errorAtToken("0 <= color value <= 255", instr.children[4]))
            alpha = int(instr.children[5].children[0])
            if alpha < 0 or alpha > 255:
                alphaOutOfRangeError(instr.children[5])
            color = (red, green, blue, alpha)
        canvasWidth = int(width)
        canvasHeight = int(height)
        canvasBackground = color

    elif instr.data == 'declare_image':
        checkCanvas(instr.children[0].children[0])
        children = instr.children
        iPath = children[0].children[0][1:-1]
        id = children[1].children[0][1:-1]

        if id in imageData:
            raise DuplicateIdError(
                errorAtToken(
                    "ID {} is already associated with an image.".format(id),
                    children[1].children[0]))

        x = 0 if len(children) != 4 else int(children[2].children[0])
        y = 0 if len(children) != 4 else int(children[3].children[0])

        if not path.exists(iPath):
            raise ImagePathError(
                errorAtToken("Image at {} does not exist.".format(iPath),
                             children[0].children[0]))

        if not path.isfile(iPath):
            raise ImagePathError(
                errorAtToken("{} is not a file".format(iPath),
                             children[0].children[0]))

        try:
            im = Image.open(iPath).convert("RGBA")
            imageData[id] = im
            imageLocation[id] = (x, y)
            drawFrame()
        except PIL.UnidentifiedImageError:
            raise ImagePathError(
                errorAtToken(
                    "Image at {} cannot be opened or identified.".format(
                        iPath), children[0].children[0]))

    elif instr.data == 'move_object':
        checkCanvas(instr.children[0])
        id = instr.children[0].children[0][1:-1]
        checkId(instr.children[0])

        sx, sy = imageLocation[id]
        dx = int(instr.children[1].children[0])
        dy = int(instr.children[2].children[0])
        duration = instr.children[3].children[0]

        checkDuration(duration)
        duration = int(duration)

        animation = Move(sx, sy, dx, dy, duration)
        applyOrQueue(id, animation, duration)

    elif instr.data == 'rotate_object':
        checkCanvas(instr.children[0])
        id = instr.children[0].children[0][1:-1]
        checkId(instr.children[0])

        degrees = float(instr.children[1].children[0])
        duration = instr.children[2].children[0]
        checkDuration(duration)
        duration = int(duration)

        animation = Rotate(degrees, duration, canvasBackground)
        applyOrQueue(id, animation, duration)

    elif instr.data == 'change_opacity':
        checkCanvas(instr.children[0])
        id = instr.children[0].children[0][1:-1]
        checkId(instr.children[0])

        destAlpha = int(instr.children[1].children[0])
        if destAlpha < 0 or destAlpha > 255:
            alphaOutOfRangeError(instr.children[1])

        duration = instr.children[2].children[0]
        checkDuration(duration)
        duration = int(duration)

        image = imageData[id]
        _, _, _, srcAlpha = image.getpixel((0, 0))
        animation = Alpha(srcAlpha, destAlpha, duration)
        applyOrQueue(id, animation, duration)

    elif instr.data == 'scale_object':
        checkCanvas(instr.children[0])
        id = instr.children[0].children[0][1:-1]
        checkId(instr.children[0])

        sx = float(instr.children[1].children[0])
        sy = float(instr.children[2].children[0])

        duration = instr.children[3].children[0]
        checkDuration(duration)
        duration = int(duration)

        sWidth = imageData[id].width
        sHeight = imageData[id].height

        animation = Scale(sWidth, sHeight, duration, sx, sy)
        applyOrQueue(id, animation, duration)

    elif instr.data == 'wait':
        checkCanvas(instr.children[0])
        duration = instr.children[0].children[0]
        checkDuration(duration)
        duration = int(duration)

        for i in range(duration):
            drawFrame()

    elif instr.data == 'delete_object':
        checkCanvas(instr.children[0])
        id = instr.children[0].children[0][1:-1]
        checkId(instr.children[0])

        del imageData[id]
        del imageLocation[id]

        drawFrame()

    elif instr.data == 'block_begin':
        if inBlock:
            raise NestedBlockError(
                error("Nested blocks are not supported", instr.line, 0))

        inBlock = True

    elif instr.data == 'block_end':
        if not inBlock:
            raise BlockEndWithoutBeginError(
                error("No pairing begin block statement present", instr.line,
                      0))

        applyAnimationsForDuration(animationQueue, maxDuration)
        inBlock = False
        maxDuration = 0
        animationQueue.clear()
コード例 #50
0
 def forward(self, x):
     """Forward pass function"""
     # forward pass
     error("Attempted to access abstract forward function")
     return None
コード例 #51
0
ファイル: apply-tag.py プロジェクト: zfstor/build
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#####################################################################

import os
import sys
from dsl import load_file
from utils import sh, sh_str, info, error, env

dsl = load_file('${BUILD_CONFIG}/repos.pyd', os.environ)


def tag_repo(repo, tag):
    sh("git --git-dir=${repo}/.git tag ${tag}")


if __name__ == '__main__':
    if len(sys.argv) < 2:
        error('Usage: apply-tag.py <tag>')

    tag = sys.argv[1]
    for i in dsl['repository'].values():
        info('Tagging repository: {0}', i['name'])
        tag_repo(i)
コード例 #52
0
def checkTrunc(infile, lines, length):
    """check prefixes of lines"""
    with open(infile, "r") as f:
        for i, inline in enumerate(f):
            if inline.rstrip() != lines[i].rstrip()[:length]:
                error("Truncated records", "Record %d mismatch" % i)
コード例 #53
0
def usage():
    error('%s [dataset]' % sys.argv[0])
コード例 #54
0
ファイル: chpl_atomics.py プロジェクト: mstrout/chapel
def get(flag='target'):
    if flag == 'network':
        atomics_val = overrides.get('CHPL_NETWORK_ATOMICS')
        if not atomics_val:
            comm_val = chpl_comm.get()
            if comm_val in ['ofi', 'ugni'] and get('target') != 'locks':
                atomics_val = comm_val
            else:
                atomics_val = 'none'
    elif flag == 'target':
        atomics_val = overrides.get('CHPL_ATOMICS')
        if not atomics_val:
            compiler_val = chpl_compiler.get('target')
            platform_val = chpl_platform.get('target')

            # We default to C standard atomics (cstdlib) for gcc 5 and newer.
            # Some prior versions of gcc look like they support standard
            # atomics, but have buggy or missing parts of the implementation,
            # so we do not try to use cstdlib with gcc < 5. If support is
            # detected for clang (via preprocessor checks) we also default to
            # cstdlib atomics. For llvm-clang we always default to
            # cstdlib atomics. We know the llvm-clang will have compiler
            # support for atomics and llvm requires gcc 4.8 (or a compiler with
            # equivalent features) to be built so we know we'll have system
            # header support too.
            #
            # We support intrinsics for gcc, intel, cray and clang. gcc added
            # initial support in 4.1, and added support for 64-bit atomics on
            # 32-bit platforms with 4.8. clang and intel also support 64-bit
            # atomics on 32-bit platforms and the cray compiler will never run
            # on a 32-bit machine.
            #
            # For pgi or 32-bit platforms with an older gcc, we fall back to
            # locks
            if compiler_val in ['gnu', 'cray-prgenv-gnu', 'mpi-gnu']:
                version = get_compiler_version(flag)
                if version >= CompVersion('5.0'):
                    atomics_val = 'cstdlib'
                elif version >= CompVersion('4.8'):
                    atomics_val = 'intrinsics'
                elif version >= CompVersion(
                        '4.1') and not platform_val.endswith('32'):
                    atomics_val = 'intrinsics'
            elif compiler_val == 'intel' or compiler_val == 'cray-prgenv-intel':
                atomics_val = 'intrinsics'
            elif compiler_val == 'cray-prgenv-cray':
                atomics_val = 'cstdlib'
            elif compiler_val in ['allinea', 'cray-prgenv-allinea']:
                atomics_val = 'cstdlib'
            elif compiler_val == 'clang':
                if has_std_atomics():
                    atomics_val = 'cstdlib'
                else:
                    atomics_val = 'intrinsics'
            elif compiler_val == 'llvm':
                atomics_val = 'cstdlib'

            # we can't use intrinsics, fall back to locks
            if not atomics_val:
                atomics_val = 'locks'
    else:
        error("Invalid flag: '{0}'".format(flag), ValueError)
    return atomics_val
コード例 #55
0
ファイル: chpl_llvm.py プロジェクト: dlongnecke-cray/chapel
def validate_llvm_config():
    llvm_val = get()
    llvm_config = get_llvm_config()

    if llvm_val == 'system':
        if llvm_config == '' or llvm_config == 'none':
            error("CHPL_LLVM=system but could not find an installed LLVM"
                  " with one of the supported versions: {0}".format(
                  llvm_versions_string()))

    if (llvm_val == 'system' or
        (llvm_val == 'bundled' and os.path.exists(llvm_config))):
        version, config_error = check_llvm_config(llvm_config)
        if config_error:
            error("Problem with llvm-config at {0} -- {1}"
                  .format(llvm_config, config_error))

    if llvm_val == 'system':
      bindir = get_system_llvm_config_bindir()
      if not (bindir and os.path.isdir(bindir)):
          error("llvm-config command {0} provides missing bin dir {1}"
                .format(llvm_config, bindir))
      clang_c = get_llvm_clang('c')[0]
      clang_cxx = get_llvm_clang('c++')[0]
      if not os.path.exists(clang_c):
          error("Missing clang command at {0}".format(clang_c))
      if not os.path.exists(clang_cxx):
          error("Missing clang++ command at {0}".format(clang_cxx))

      (noPackageErrors, package_err) = check_llvm_packages(llvm_config)
      if not noPackageErrors:
        error(package_err)
コード例 #56
0
def api():
    data = request.values.to_dict()
    try:
        command = re.split("\s+", data["text"])
        slack_id = data["team_id"]
        team_domain = data["team_domain"]
        channel = data["channel_id"]
    except KeyError:
        abort(400)

    # ensuring that the request comes from slack
    if not valid_slack_request(request):
        return abort(404)

    team = db.session.query(Team).filter_by(slack_id=slack_id).first()
    if not team:
        return error(
            "You are not registered in our proxy server, try removig the app "
            "and adding it to slack again.")

    if command[0] == "help":
        fields = [
            {
                "title": "`/pass` _or_ `/pass list`",
                "value": "To list the available passwords in this channel.",
                "short": True,
            },
            {
                "title":
                "`/pass <secret>` or `/pass show <secret>`",
                "value":
                ("To retrieve a one time use link with the secret content, "
                 "this link expires in 15 minutes."),
                "short":
                True,
            },
            {
                "title":
                "`/pass insert <secret>`",
                "value": ("To retrieve the link with the editor to create the "
                          "secret, this link expires in 15 minutes."),
                "short":
                True,
            },
            {
                "title": "`/pass remove <secret>`",
                "value": ("To remove the secret from the group."),
                "short": True,
            },
            {
                "title":
                "`/pass configure` or `/pass configure <server_url>`",
                "value":
                ("To setup the password storage, it is only necessary "
                 "to execute it once."),
                "short":
                True,
            },
        ]
        return jsonify({
            "attachments": [{
                "fallback":
                ("_Usage:_ https://github.com/talpor/password-scale"),
                "text":
                "*_Usage:_*",
                "fields":
                fields,
            }]
        })

    if command[0] == "configure" and len(command) == 2:
        url = command[1]
        if not validators.url(url):
            return error("Invalid URL format, use: https://<domain>")

        if team.url:
            msg = ("This team is already configured, you want to replace "
                   "the password server?")
            return jsonify({
                "attachments": [{
                    "fallback":
                    "This team already configured",
                    "text":
                    msg,
                    "callback_id":
                    "configure_password_server",
                    "color":
                    "warning",
                    "actions": [
                        {
                            "name": "reconfigure_server",
                            "text": "Yes",
                            "type": "button",
                            "value": url,
                        },
                        {
                            "name": "no_reconfigure",
                            "text": "No",
                            "style": "danger",
                            "type": "button",
                            "value": "no",
                        },
                    ],
                }]
            })

        if not team.register_server(url):
            return error("Unable to retrieve the _public_key_ "
                         "from the server".format(team_domain))

        return success("{} team successfully configured!".format(team_domain))

    if command[0] == "configure" and len(command) == 1 or not team.url:
        color = "warning"
        if team.url:
            msg = (
                "*{}* team already have a server configured, if you want to "
                "swap select some of the options below".format(team.name))
        elif command[0] == "configure":
            color = "good"
            msg = "What type of server do you want to use?"
        else:
            msg = (
                "*{}* team does not have a password server configured, select "
                "one of the options below to start.".format(team_domain))

        warning_msg = (
            "This is a test server, any information stored on this server "
            "can be deleted at any moment without prior notice!")
        return jsonify({
            "attachments": [{
                "fallback":
                msg,
                "text":
                msg,
                "color":
                color,
                "callback_id":
                "configure_password_server",
                "actions": [
                    {
                        "name": "use_demo_server",
                        "text": "Use Test Server",
                        "type": "button",
                        "value": "no",
                        "confirm": {
                            "title": "Confirm",
                            "text": warning_msg,
                            "ok_text": "I understand",
                            "dismiss_text": "No",
                        },
                    },
                    {
                        "text": "Request Private Server",
                        "type": "button",
                        "url": CONFIGURATION_GUIDE_URL,
                    },
                    {
                        "name": "no_configure",
                        "text": "Later",
                        "type": "button",
                        "value": "no",
                    },
                ],
            }]
        })
    if command[0] in ["", "list"]:
        try:
            dir_ls = cmd.list(team, channel)
        except SlashpassError as e:
            return error("_{}_".format(e.message))

        if not dir_ls:
            return warning(
                "You have not passwords created for this channel, use "
                "`/pass insert <secret>` to create the first one!")

        return jsonify({
            "attachments": [{
                "fallback":
                dir_ls,
                "text":
                "Password Store\n{}".format(dir_ls),
                "footer": ("Use the command `/pass <key_name>` to retrieve "
                           "some of the keys"),
            }]
        })

    if command[0] == "insert" and len(command) == 2:
        app = command[1]
        token = cmd.generate_insert_token(team, channel, app)

        msg = "Adding password for *{}* in this channel".format(app)
        return jsonify({
            "attachments": [{
                "fallback":
                msg,
                "text":
                msg,
                "footer":
                "This editor will be valid for 15 minutes",
                "color":
                "good",
                "actions": [{
                    "text": "Open editor",
                    "style": "primary",
                    "type": "button",
                    "url": "{}/insert/{}".format(SITE, token),
                }],
            }]
        })

    if command[0] == "remove" and len(command) == 2:
        app = command[1]
        if cmd.remove(team, channel, app):
            return success(
                "The secret *{}* was removed successfully.".format(app))
        return warning("Looks like the secret *{}* is not in your repository "
                       ":thinking_face: use the command `/pass list` "
                       "to verify your storage.".format(app))

    if command[0] == "show" and len(command) == 2:
        app = command[1]
    else:
        app = command[0]

    onetime_link = cmd.show(team, channel, app)
    if onetime_link:
        return jsonify({
            "attachments": [{
                "fallback":
                "Password: {}".format(onetime_link),
                "text":
                "Password for *{}*".format(app),
                "footer":
                "This secret will be valid for 15 minutes",
                "color":
                "good",
                "actions": [{
                    "text": "Open secret",
                    "style": "primary",
                    "type": "button",
                    "url": onetime_link,
                }],
            }]
        })
    else:
        return warning("*{}* is not in the password store.".format(app))
コード例 #57
0
 def make(self):
     # make sure there exist enough labels
     LabelledLearner.make(self)
     error("Dataset supplied to classifier has only one label",
           ill_defined(self.num_labels, cannot_be=1))
コード例 #58
0
    return 1 / y.shape[1] * np.sum((y_mp - y)**2)


def mean_absolute_error(y, y_mp):
    return 1 / y.shape[1] * np.sum(np.absolute(y_mp - y))


def negative_log_likelihood(y_mp):
    return -1 / y_mp.shape[1] * np.sum(np.log(y_mp + 1e-15))


if __name__ == '__main__':
    if len(sys.argv) != 2:
        usage()
    if not os.path.isfile(sys.argv[1]):
        error('no such file: %s' % sys.argv[1])

    feature_number = 30
    data = mp_train.read_data(sys.argv[1], feature_number)
    x, y = mp_train.split_xy(data)
    layers, neurons, wb, x_min, x_max = load_train_data()
    x = mp_train.scale(x, x_min, x_max)
    try:
        y_mp, za = mp_train.feedforward(wb, x, layers, neurons)
        l = mp_train.loss(y[0], y_mp[0])
    except:
        error("invalid train data")
    print("binary cross entropy: %.4f" % l)
    mse = mean_squared_error(y, y_mp)
    print("mean squared error: %.4f" % mse)
    mae = mean_absolute_error(y, y_mp)
コード例 #59
0
 def make(self):
     error("Cannot apply {} to multilabel data.".format(self.name),
           self.do_multilabel)
     SKLClassifier.make(self)
コード例 #60
0
ファイル: histogram.py プロジェクト: Git-Math/DSLR
            header = header[6:]
            features = [{
                "Ravenclaw": [],
                "Slytherin": [],
                "Gryffindor": [],
                "Hufflepuff": []
            } for i in range(len(header))]
            is_numeric = [False for i in range(len(header))]
            for line in reader:
                for i, field in enumerate(line):
                    if i == 1:
                        house = field
                    elif i >= 6 and field != "":
                        features[i - 6][house] += [float(field)]
    except:
        error("invalid dataset")

    return (header, features)


if __name__ == '__main__':
    # checks
    if len(sys.argv) != 2:
        usage()
    if not os.path.isfile(sys.argv[1]):
        error('no such file: %s' % sys.argv[1])

    header, features = read_data(sys.argv[1])
    index = 0
    histogram(header[index], features[index])