Exemplo n.º 1
0
	def recorrerCarpeta(self, archivo):
#		print(archivo)
		if os.path.isdir(archivo):
			for ruta in os.listdir(archivo):
				self.recorrerCarpeta(archivo+"/"+ruta)
		elif os.path.basename(archivo)==self.nombreArchivo and os.path.basename(os.path.dirname(archivo))==self.nombreCarpeta:
			utils.file_write(archivo, utils.file_read(self.filepath))
Exemplo n.º 2
0
 def script(self, dst, overlay, prefix):
     """generate the permissions script which will be applied before the end
     of an installation"""
     path = os.path.join(self.cfg['paths']['overlays'], overlay)
     perm_file = path + '.permissions'
     perm_script = os.path.join(dst, 'fix_perms.sh')
     if os.path.isfile(perm_file):
         data = utils.file_read(perm_file).strip()
         lines = data.split('\n')
         lines = [line for line in lines if not line.startswith('#')]
         script = []
         for line in lines:
             try:
                 mode, uid, gid, real_path = line.split('\t')
             except ValueError:
                 err = '%s is corrupt, delete or regenerate it with '\
                     'the "seedbank manage --overlay" command, or fix the '\
                     'file manually, line "%s" contains errors' % \
                     (perm_file, line)
                 raise utils.FatalException(err)
             else:
                 if prefix:
                     real_path = os.path.join(prefix, real_path[1:])
                     if real_path.endswith('.sb_template'):
                         real_path = os.path.splitext(real_path)[0]
                 script.append('chown %s:%s %s' % (uid, gid, real_path))
                 script.append('chmod %s %s' % (mode, real_path))
         utils.file_write(perm_script, '\n'.join(script))
     else:
         logging.warning(
             'overlay "%s" has been selected but permission '
             'file "%s" does not exist, so all files will be owned by root '
             'and will keep the current permissons which could lead to '
             'problems', overlay, perm_file)
         utils.file_write(perm_script, '')
Exemplo n.º 3
0
def status(address, msg):
    """write a file with custom status"""
    pxe_vars = settings.pxe_variables(cfg, address)
    logging.info('setting state to "%s"', msg)
    file_name = '%s_%s.state' % (pxe_vars['fqdn'], msg)
    file_name = os.path.join(cfg['paths']['status'], file_name)
    utils.file_write(file_name, msg)
Exemplo n.º 4
0
def status(address, msg):
    """write a file with custom status"""
    pxe_vars = settings.pxe_variables(cfg, address)
    logging.info('setting state to "%s"', msg)
    file_name = '%s_%s.state' % (pxe_vars['fqdn'], msg)
    file_name = os.path.join(cfg['paths']['status'], file_name)
    utils.file_write(file_name, msg)
Exemplo n.º 5
0
    def aplicarSonar(ruta):
        text=utils.file_read(ruta)
        text=quitCurlyAlones(text)
        utils.file_write(ruta, text)

                
                
Exemplo n.º 6
0
 def script(self, dst, overlay, prefix):
     """generate the permissions script which will be applied before the end
     of an installation"""
     path = os.path.join(self.cfg['paths']['overlays'], overlay)
     perm_file = path + '.permissions'
     perm_script = os.path.join(dst, 'fix_perms.sh')
     if os.path.isfile(perm_file):
         data = utils.file_read(perm_file).strip()
         lines = data.split('\n')
         lines = [line for line in lines if not line.startswith('#')]
         script = []
         for line in lines:
             try:
                 mode, uid, gid, real_path = line.split('\t')
             except ValueError:
                 err = '%s is corrupt, delete or regenerate it with '\
                     'the "seedbank manage --overlay" command, or fix the '\
                     'file manually, line "%s" contains errors' % \
                     (perm_file, line)
                 raise utils.FatalException(err)
             else:
                 if prefix:
                     real_path = os.path.join(prefix, real_path[1:])
                     if real_path.endswith('.sb_template'):
                         real_path = os.path.splitext(real_path)[0]
                 script.append('chown %s:%s %s' % (uid, gid, real_path))
                 script.append('chmod %s %s' % (mode, real_path))
         utils.file_write(perm_script, '\n'.join(script))
     else:
         logging.warning('overlay "%s" has been selected but permission '
             'file "%s" does not exist, so all files will be owned by root '
             'and will keep the current permissons which could lead to '
             'problems', overlay, perm_file)
         utils.file_write(perm_script, '')
Exemplo n.º 7
0
 def crear_archivo(self, nombre):
     archivo=utils.get_filedir()+os.sep+nombre
     utils.file_write(archivo, "")
     window=sublime.active_window()
     window.open_file(archivo)
     view=window.active_view()
     threading.Thread(target=self.insertar).start()
Exemplo n.º 8
0
 def run(self, edit):
     lang=utils.get_language()
     self.rutaSamples= os.path.normpath(os.path.join(sublime.packages_path(), "..", "samples", lang+".json"))
     print("la ruta es : "+self.rutaSamples)
     if not os.path.exists(self.rutaSamples):
         utils.file_write(self.rutaSamples, "{}")
     window=sublime.active_window()
     window.show_input_panel("sample name", "", self.save, None, None)
Exemplo n.º 9
0
 def write(self, contents):
     """write the pxe boot file"""
     file_name = os.path.join(cfg['paths']['tftpboot'], 'pxelinux.cfg',
                              self.address)
     directory = os.path.dirname(file_name)
     utils.make_dirs(directory)
     utils.file_delete('%s.disabled' % file_name)
     utils.file_write(file_name, contents)
Exemplo n.º 10
0
 def write(self, contents):
     """write the pxe boot file"""
     file_name = os.path.join(cfg['paths']['tftpboot'], 'pxelinux.cfg',
         self.address)
     directory = os.path.dirname(file_name)
     utils.make_dirs(directory)
     utils.file_delete('%s.disabled' % file_name)
     utils.file_write(file_name, contents)
Exemplo n.º 11
0
 def guardar(self, nombre):
     if not nombre:return
     ext=utils.get_fileext()
     if not ext:ext=utils.get_ext()
     nombre=nombre.replace(" ", "_")
     text=utils.get_text()
     text=text.replace("$", "\$");
     utils.file_write(TEMPLATES_PATH+nombre+"."+ext, text)
Exemplo n.º 12
0
def crear_archivo_bloqueo(orden):
    eliminar_archivos()
    ruta=None
    if orden==1:ruta=ARCHIVO_BLOQUEO_UNO
    elif orden==2:ruta=ARCHIVO_BLOQUEO_DOS
    elif orden==3:ruta=ARCHIVO_BLOQUEO_TRES
    elif orden==4:ruta=ARCHIVO_BLOQUEO_CUATRO

    if ruta:utils.file_write(ruta, "dani")
Exemplo n.º 13
0
	def on_post_save(self, view):
		if utils.get_language()!="jsf":return
		window=sublime.active_window()
		folders=window.folders()
		if not folders:return

		folderProyecto=folders[0]
		if not os.path.exists(os.path.join(folderProyecto, "pom.xml")):return
		server=utils.get_preference("server")
		
		folderDeploy=server_folder_deploy[server]
		self.folderDeploy=folderDeploy

		filepath=utils.get_filepath()
		self.filepath=filepath
		
		if server=="weblogic":
			threading.Thread(target=self.reemplazarTodos).start()
			return

		if server!="jboss":
			folderDeploy=folderDeploy+os.sep+os.listdir(folderDeploy)[0]
			self.folderDeploy=folderDeploy

			folderDeploy=os.path.normpath(folderDeploy)
			print("the folder deploy is : "+folderDeploy)
		nombreProyecto=filepath.replace(folderProyecto+os.sep, "")
		#print("el nombre del proyceto es : "+nombreProyecto)
		nombreProyecto=nombreProyecto[:nombreProyecto.find(os.sep)]
		#print("el nuevo nombre del proyecto es: "+nombreProyecto)
		#print("el filepath es : "+filepath)
		#print("el folderDeploy es : "+folderDeploy)
		fileLocation=filepath[filepath.find("webapp"+os.sep)+7:]
		#print("el fileLocation is: "+fileLocation)
		print(server)
		

		print("el nombre del proyecto es : "+nombreProyecto)
		folders=os.listdir(folderDeploy)

		folders=[os.path.join(folderDeploy, x) for x in folders]
		
		def comparador(x):return os.path.getmtime(x)

		folders=sorted(folders, key=comparador, reverse=True)
		print(folders)
		for folderS in folders:
			for folder in os.listdir(folderS):
				print(folder)
				if folder.find(nombreProyecto)!=-1:
					fileLocation=folderS+os.sep+folder+os.sep+fileLocation
					print("la nueva localizacion del archivo es : "+fileLocation)
					utils.file_write(fileLocation, utils.file_read(filepath))
					#print("escrito con exito")
					return
				else:print("no")
Exemplo n.º 14
0
 def run(self, edit):
     paquete_snippets=sublime.packages_path()+os.sep+"snippets"
     lista=[]
     for archivo in utils.get_files({"folder":paquete_snippets, "ext":"json"}):
         snip=utils.load_json(archivo)
         lista=lista + list(snip.keys())
     lista=list(set(lista))
     for snippet in lista:
         snippet=snippet.lower().replace("-", "_").replace(" ", "").replace("?", "_")
         utils.file_write(RUTA_COMANDOS+"code_"+snippet+".bat", "echo code_"+snippet+" > d:/sublime3/comando.txt")
         print(snippet)
Exemplo n.º 15
0
 def phpToHtml(self, projectName):
     self.d=d=utils.get_dict_files({"ext":".php"})
     for archivo in d.keys():
         print(archivo)
         text=d[archivo]
         text=self.convertir(text)
         newRuta="d:/fromphp/"+archivo[archivo.find(projectName):]
         try:os.makedirs(os.path.dirname(newRuta))
         except:pass
         newRuta=newRuta.replace(".php", ".html")
         utils.file_write(newRuta, text=text)
Exemplo n.º 16
0
 def generar_comando(self, comando):
     if comando.endswith("Command"):comando=comando[:-7]
     comando_sublime=comando[0]
     i=1
     l=len(comando)
     while i< l:
         if comando[i].isupper():comando_sublime+="_"
         comando_sublime+=comando[i]
         i+=1
     comando_sublime=comando_sublime.lower()
     utils.file_write(CARPETA_COMANDOS+comando_sublime+".bat", "echo "+comando_sublime+" > "+ARCHIVO_COMANDO)
Exemplo n.º 17
0
def fuzz(fuzz_input):
  """
  Run the Converter on the input file and generate an output file in case of crash
  The temp_file is used as an imput to the converter
  """
  tmp_file = CRASH_PATH + '/tmp_input.img'
  utils.file_write(tmp_file, fuzz_input)
  result = utils.run(tmp_file)
  out = result.stderr.decode('ascii').replace('\n', '. ')
  # error if '*' in stderr
  if out.find('*') != -1:
    utils.save(CRASH_PATH, tmp_file, result.returncode, out, True)
Exemplo n.º 18
0
    def generate(self, path):
        """generate an overlay permissions file"""
        perm_file = path + '.permissions'

        overlay_contents = []
        for root, dirs, files in os.walk(path):
            for sub_dir in dirs:
                overlay_contents.append(os.path.join(root, sub_dir))
            for file_name in files:
                if not file_name == '.gitignore':
                    overlay_contents.append(os.path.join(root, file_name))

        perm_list = {}
        for entry in overlay_contents:
            stat = os.stat(entry)
            mode = int(oct(stat.st_mode)[3:])
            entry = entry.split(path, 1)[1]
            if entry == '/root':
                perm_list[entry] = ('0700', 0, 0)
            elif entry.endswith('.ssh'):
                perm_list[entry] = ('0700', 0, 0)
            elif entry.endswith('authorized_keys'):
                perm_list[entry] = ('0700', 0, 0)
            elif entry.startswith('/usr/local/bin'):
                perm_list[entry] = ('0755', 0, 0)
            elif entry == '/etc/rc.local':
                perm_list[entry] = ('0755', 0, 0)
            else:
                perm_list[entry] = ('%04d' % mode, 0, 0)

        if os.path.isfile(perm_file):
            data = utils.file_read(perm_file).split('\n')
            defined_list = {}
            for line in data:
                try:
                    mode, uid, gid, real_path = line.split('\t')
                except ValueError:
                    pass
                else:
                    defined_list[real_path] = (mode, uid, gid)
            for real_path in perm_list:
                if real_path in defined_list:
                    perm_list[real_path] = defined_list[real_path]
       
        data = [] 
        header_file = os.path.join(self.cfg['paths']['templates'],
            self.cfg['templates']['permission_script'])
        header = utils.file_read(header_file)
        data.append(header.strip())
        for key, value in perm_list.items():
            data.append('%s\t%s\t%s\t%s' % (value[0], value[1], value[2], key))
        utils.file_write(perm_file, '\n'.join(data) + '\n')
Exemplo n.º 19
0
    def generate(self, path):
        """generate an overlay permissions file"""
        perm_file = path + '.permissions'

        overlay_contents = []
        for root, dirs, files in os.walk(path):
            for sub_dir in dirs:
                overlay_contents.append(os.path.join(root, sub_dir))
            for file_name in files:
                if not file_name == '.gitignore':
                    overlay_contents.append(os.path.join(root, file_name))

        perm_list = {}
        for entry in overlay_contents:
            stat = os.stat(entry)
            mode = int(oct(stat.st_mode)[3:])
            entry = entry.split(path, 1)[1]
            if entry == '/root':
                perm_list[entry] = ('0700', 0, 0)
            elif entry.endswith('.ssh'):
                perm_list[entry] = ('0700', 0, 0)
            elif entry.endswith('authorized_keys'):
                perm_list[entry] = ('0700', 0, 0)
            elif entry.startswith('/usr/local/bin'):
                perm_list[entry] = ('0755', 0, 0)
            elif entry == '/etc/rc.local':
                perm_list[entry] = ('0755', 0, 0)
            else:
                perm_list[entry] = ('%04d' % mode, 0, 0)

        if os.path.isfile(perm_file):
            data = utils.file_read(perm_file).split('\n')
            defined_list = {}
            for line in data:
                try:
                    mode, uid, gid, real_path = line.split('\t')
                except ValueError:
                    pass
                else:
                    defined_list[real_path] = (mode, uid, gid)
            for real_path in perm_list:
                if real_path in defined_list:
                    perm_list[real_path] = defined_list[real_path]

        data = []
        header_file = os.path.join(self.cfg['paths']['templates'],
                                   self.cfg['templates']['permission_script'])
        header = utils.file_read(header_file)
        data.append(header.strip())
        for key, value in perm_list.items():
            data.append('%s\t%s\t%s\t%s' % (value[0], value[1], value[2], key))
        utils.file_write(perm_file, '\n'.join(data) + '\n')
Exemplo n.º 20
0
 def escribirArchivo(self):
     for i in range(len(self.parametros)):
         print(i)
         param=self.parametros[i]
         value=self.values[i]
         self.text=self.text.replace("~"+param+"~", value)
         self.text=self.text.replace("~lf:"+param+"~", value[0].lower()+value[1:])
         self.text=self.text.replace("~uf:"+param+"~", value[0].upper()+value[1:])
         self.text=self.text.replace("~lo:"+param+"~", value.lower())
         self.text=self.text.replace("~up:"+param+"~", value.upper())
     utils.file_write(self.rutaNewFile, self.text)
     window=sublime.active_window()
     window.open_file(self.rutaNewFile)
Exemplo n.º 21
0
def fuzz(input_seed, n_tests, n_bytes):
    """
  Run n_tests tests on converter. For each test, randomize n_bytes bytes of
  input_seed and write it to a temporary file. Then, converter is run with
  temporary file as input. If this file makes the converter crash, it is saved
  to CRASH_PATH directory.
  """
    tmp_file = CRASH_PATH + '/tmp_input.img'
    for i in range(n_tests):
        fuzz_input = fuzz_bytes(input_seed, n_bytes)
        utils.file_write(tmp_file, fuzz_input)

        result = utils.run(tmp_file)
        out = result.stderr.decode('ascii').replace('\n', '. ')

        # error if '*' in output
        if out.find('*') != -1:
            utils.save(CRASH_PATH, tmp_file, result.returncode, out)
Exemplo n.º 22
0
def args_gn_write(config):
    product_args_path = os.path.join(config.get_out_path(), 'args.gn')
    ndk = config.ndk
    build_type = config.build_type

    file_write(product_args_path, 'at',
               'product = "{}"\n'.format(config.product))
    # Add import to the file header
    if ndk is not None:
        ndk_gn_args = ('ohos_build_ndk = true\n'
                       'ohos_build_ndk_target_host = "{}"'
                       '\n'.format(ndk))
        file_write(product_args_path, 'at', ndk_gn_args)
    if config.test_args:
        file_write(product_args_path, 'at',
                   'ohos_xts_test_args = "{}"\n'.format(config.test_args[1]))
    file_write(product_args_path, 'at',
               'ohos_build_type = "{}"\n'.format(build_type))
    for feature in CallbackDict.args_list:
        file_write(product_args_path, 'at', '{}\n'.format(feature))
Exemplo n.º 23
0
    def add_preseed(self, contents):
        """add the seed file to the intrd image"""
        dst = os.path.join(self.work_initrd, 'preseed.cfg')
        utils.file_write(dst, contents)
        path_amd = os.path.join(self.work_iso, 'install.amd')
        path_i386 = os.path.join(self.work_iso, 'install.386')
        path_ubuntu = os.path.join(self.work_iso, 'install')

        if os.path.isdir(path_amd):
            self.data['architecture'] = 'amd'
            path = path_amd
        elif os.path.isdir(path_i386):
            self.data['architecture'] = '386'
            path = path_i386
        elif os.path.isdir(path_ubuntu):
            path = path_ubuntu
        else:
            path = self.work_iso

        initrd = os.path.join(path, 'initrd.gz')
        utils.initrd_extract(self.work_initrd, initrd)
        utils.initrd_create(self.work_initrd, initrd)
Exemplo n.º 24
0
def main():
    regex_set = get_dataset('data/regex/star0.txt')+get_dataset('data/regex/star1.txt')\
            + get_dataset('data/regex/star2.txt')+get_dataset('data/regex/star3.txt')
    dataset = []
    num_samples = 50
    for idx, regex in enumerate(regex_set):
        print(idx, regex)
        positive_samples = get_positive_samples(regex, num_samples)
        if len(positive_samples) < num_samples:
            positive_samples.extend([random.choice(positive_samples) for _ in range(num_samples-len(positive_samples))])
        negative_samples = get_negative_samples(regex, regex_set, num_samples)
        if None in negative_samples or len(negative_samples) == 0:  # There is not negative samples in case of \sigma *
            continue
        if len(negative_samples) < num_samples:
            negative_samples.extend([random.choice(negative_samples) for _ in range(num_samples-len(negative_samples))])
        positive_samples = preprocess_source(positive_samples)
        negative_samples = preprocess_source(negative_samples)
        regex = preprocess_target(regex)
        pair_data = '\t'.join(positive_samples) + '\t<sep>\t' +\
                    '\t'.join(negative_samples) + '\t<sep>\t' + regex
        dataset.append(pair_data)
    file_write('data/pair_data.txt', dataset)
Exemplo n.º 25
0
    def run(self, edit):
        paquete_snippets=sublime.packages_path()+os.sep+"snippets"
        lista=[]
        comandos=[]
        for archivo in utils.get_files({"folder":paquete_snippets, "ext":"json"}):
            snip=utils.load_json(archivo)
            lista=lista + list(snip.keys())
        lista=list(set(lista))
        for snippet in lista:
            snippet=snippet.lower().replace("-", "_").replace("(", "").replace(")", "").replace(" ", "").replace("?", "").replace(":", "")
            utils.file_write(RUTA_COMANDOS+"code_"+snippet+".bat", "echo code_"+snippet+" > d:/sublime3/comando.txt")
            comandos.append("code_"+snippet)
        archivos_plantillas=utils.get_files({"folder":RUTA_PLANTILLAS})
        for plantilla in archivos_plantillas:
            plantilla=os.path.basename(plantilla)
            if plantilla.rfind(".")!=-1:plantilla=plantilla[:plantilla.rfind(".")]
            plantilla=plantilla.replace(" ", "_").lower()
            utils.file_write(RUTA_COMANDOS+"make_"+plantilla+".bat", "echo make_"+plantilla+" > d:/sublime3/comando.txt")
            comandos.append("make_"+plantilla)
        archivos_python=utils.get_files({"folder":sublime.packages_path(), "ext":".py"})
        for programa in archivos_python:
            rutaPrograma=programa
            try:programa=utils.file_read(programa)
            except:
                print("saco error al leer : "+rutaPrograma)
                continue
            comandosPython=re.findall("class ([\w]+)\(sublime_plugin.TextCommand\)",programa, re.IGNORECASE)
            for comandoPython in comandosPython:
                comandoPython=comandoPython[0].lower()+comandoPython[1:]
                cp=""
                for c in comandoPython:
                    if c.isupper():cp+="_"
                    cp+=c.lower()
                if cp.endswith("_command"):cp=cp.replace("_command", "")
                comandos.append(cp)
        comandosInternos=utils.file_read("D:/sublime3/Data/Packages/User/Default (Windows).sublime-keymap")
        comandosInternos=re.findall('"command": *"(\w+)" *\}', comandosInternos, re.IGNORECASE)
        for comandoInterno in comandosInternos:comandos.append(comandoInterno)
        comandos=sorted(list(set(comandos)))
        strComandos=""
        for comando in comandos:strComandos+=comando+"\n"

        window=sublime.active_window()
        view=window.active_view()
        utils.file_write("d:/sublime3/comandos.txt", strComandos)
        view.run_command("ejecutar_comando", {"comando":"taskkill /f /im CustomizeableJarvis.exe\njarvis\nexit"})
        
Exemplo n.º 26
0
def main() -> None:
    prefix   = "cache/"
    now      = datetime.datetime.today()
    time_ym  = now.strftime("%Y-%m")
    time_dmy = now.strftime("%d. %b %Y")
    semester = utils.json_read(prefix + "current_semester.json", None)
    semester = semester[0] +" "+ semester[1]
    folder   = "gh-pages/"

    pflicht: List[Tuple[str, str]] = []
    fields: Dict[str, Dict[str, Tuple[str, str]]] = {}
    pflicht = utils.json_read(prefix + "pre-tucan-pflicht.json", pflicht)
    fields = utils.json_read(prefix + "pre-inferno.json", fields)

    #nebenfach = utils.json_read("nebenfach.json")
#    back = utils.groupby(((course, major +" · "+ category)
#            for major,v in nebenfach.items()
#            for category,v in v.items()
#            for module in v
#            for course in module), key=lambda x:x[0])
#    back = {k:["Y Nebenfach · " + " &<br> ".join(i[1] for i in v),""] for k,v in back}
#    fields = [back] + list(fields.values())
#    print(json.dumps(fields, indent=2))

    # dist/main.js with npm; code.orig.js without npm
    if os.path.exists("dist/main.js"):
      CODE_FILE = "dist/main.js"
    else:
      CODE_FILE = "code.orig.js"

    page_tmpl  = utils.file_read("page.html")
    index_tmpl = utils.file_read("index.html")
    code_tmpl  = utils.file_read(CODE_FILE)
    style_tmpl = utils.file_read("style.css")

    def filename(reg: str) -> str:
      return "".join(c for c in reg if c.isalnum())

    regulations = [
      (k,
       k.replace("B.Sc.", "Bachelor")
        .replace("M.Sc.", "Master")
        .replace(" (2015)", ""),
       filename(k) + ".html")
      for k in fields.keys()
      if k.endswith(" (2015)")
     ] + [
      # other FBs?
      ("BauUmwelt", "FB 13 Bau, Umwelt", "BauUmwelt.html")
    ]

    listy = [
      {'href': href, 'title': semester +" "+ display_regulation}
      for regulation, display_regulation, href in regulations
      if display_regulation.endswith(" Informatik")
      if not display_regulation.startswith("FB ")
    ]
    experimentallist = [
      {'href': href, 'title': semester +" "+ display_regulation}
      for regulation, display_regulation, href in regulations
      if not display_regulation.endswith(" Informatik")
      if not display_regulation.startswith("FB ")
    ]
    speciallist = [
      {'href': href, 'title': semester +" "+ display_regulation}
      for regulation, display_regulation, href in regulations
      if display_regulation.startswith("FB ")
    ]
    index_data = {
      "list": listy,
      "experimentallist": experimentallist,
      "speciallist": speciallist,
    }
    utils.file_write(folder + "/index.html", stache(index_tmpl, index_data))
    utils.file_write(folder + "/main.js", code_tmpl)
    utils.file_write(folder + "/style.css", style_tmpl)

    print(regulations)
    for regulation, display_regulation, href in regulations:
        print(prefix + "-" + filename(regulation) + ".json")
        modules: Dict[str, Module] = {}
        modules = utils.json_read(prefix + "-" + filename(regulation) + ".json", modules)
        if modules == []: continue # if file exists

        data = [clean(module_id, module, fields, regulation)
                for module_id, module in modules.items()]

        data.sort(key=lambda x: (x['category'], x['id'])) # -int(x['credits'])
        js_data = json.dumps(data, indent=1)

        page_data = {
          "today":      time_dmy,
          "semester":   semester,
          "regulation": display_regulation,
          "js_data":    js_data,
          "content":    generate_page(data)
        }
        utils.file_write(folder + "/" + href, stache(page_tmpl, page_data))

    print("finished")
Exemplo n.º 27
0
def main(argv):

    if len(argv) == 0:
        print ("""\nUsage: python3 -m adapter config-file.ini ( stop | daemon )

""" + adapter_app +""", version """ + adapter_version +"""

  config-file               Configuration file, see mcfeedadapter_example.conf for examples.
  action                    Optional, one of the following:
      stop                  Stop running adapter
      daemon                Start adapter as daemon
""")
        return 0

    print (adapter_app + """, version """ + adapter_version + "\n")
    print ("")
       
    args=readconf.parse_argv(argv)
    if cfg.action is not None:
        if cfg.action == 'daemon':
            utils.become_daemon();
        
    if not readconf.read_conf(args):
        return 1;

    if not initialize_outputs():
        close_outputs()
        return 1;
    
    current_pid=utils.file_read(cfg.pid_file)
       
    if cfg.action is not None:
        if (cfg.action == 'stop') or (cfg.action == 'status'):
            if current_pid is None:
                print("Adapter is not running\n")
                return 0
            process_id=int(current_pid)
            print("Adapter found, PID " + str(process_id))
            if cfg.action == 'stop':
                utils.remove_file(cfg.pid_file)
                while utils.is_process_running(process_id):
                    time.sleep(0.05)
                print("Adapter stopped\n")
            return 0

    if current_pid is not None:
        utils.print_error("Adapter for this feed is already running")
        return 1
    
    
    utils.file_write(cfg.pid_file,utils.get_pid())

    signal.signal(signal.SIGINT,term_signal_handler)
    signal.signal(signal.SIGTERM,term_signal_handler)
    
    current_pid=utils.file_read(cfg.pid_file)
    utils.log_write("Adapter started, PID: " + str(current_pid))
    while current_pid is not None:
        iteration_result=adapter_iteration()
        if cfg.action is not None:
            if cfg.action == 'term':
                utils.remove_file(cfg.pid_file)
                current_pid = None
                
        if current_pid is not None:
            if not iteration_result[0]:
                utils.print_error("Adapter encountered error when processing feed records and will be stopped")
                utils.remove_file(cfg.pid_file)
                return 1
            
            if not iteration_result[1]:
                time.sleep(0.1)
            
        current_pid=utils.file_read(cfg.pid_file)

    close_outputs()
    utils.log_write("Adapter stopped")
Exemplo n.º 28
0
def update_local_props(app_root_dst):
    filename = join_path(app_root_dst, 'local.properties')
    content = 'sdk.dir=' + ANDROID_HOME + '\n'
    content += 'ndk.dir=' + ANDROID_NDK_HOME + '\n'
    file_write(filename, content)
Exemplo n.º 29
0
regex_set3 = set()
set_size = 55000


while len(regex_set0) < set_size or len(regex_set1) < set_size or len(regex_set2) < set_size \
        or len(regex_set3) < set_size:

    regex_instance = rstr.rstr([rstr.rstr('0123*', 0, 4)
                                for _ in range(30)] + operations, 7)
    star_cnt = regex_instance.count('*')

    if regex_instance.strip() == '':
        continue
    if '**' in regex_instance or regex_instance[0] == '*':
        continue

    if star_cnt == 0 and len(regex_set0) < set_size:
        regex_set0.add(regex_instance)
    elif star_cnt == 1 and len(regex_set1) < set_size:
        regex_set1.add(regex_instance)
    elif star_cnt == 2 and len(regex_set2) < set_size:
        regex_set2.add(regex_instance)
    elif star_cnt == 3 and len(regex_set3) < set_size:
        regex_set3.add(regex_instance)
    print(len(regex_set0), len(regex_set1), len(regex_set2), len(regex_set3))

file_write('data/alphabet4/regex/star0.txt', regex_set0)
file_write('data/alphabet4/regex/star1.txt', regex_set1)
file_write('data/alphabet4/regex/star2.txt', regex_set2)
file_write('data/alphabet4/regex/star3.txt', regex_set3)
Exemplo n.º 30
0
                        seq2seq_train.extend(get_dataset(f_name))
                    elif 'valid' in f_name:
                        seq2seq_valid.extend(get_dataset(f_name))
                    elif 'test' in f_name:
                        seq2seq_test.extend(get_dataset(f_name))

            elif model_type == 'set2regex':
                for f_name in os.listdir(path):
                    f_name = path + '/' + f_name
                    if 'train' in f_name:
                        set2regex_train.extend(get_dataset(f_name))
                    elif 'valid' in f_name:
                        set2regex_valid.extend(get_dataset(f_name))
                    elif 'test' in f_name:
                        set2regex_test.extend(get_dataset(f_name))

    file_write('../total/{}/seq2seq/train.txt'.format(max_set_val),
               seq2seq_train)
    file_write('../total/{}/set2regex/train.txt'.format(max_set_val),
               set2regex_train)

    file_write('../total/{}/seq2seq/valid.txt'.format(max_set_val),
               seq2seq_valid)
    file_write('../total/{}/set2regex/valid.txt'.format(max_set_val),
               set2regex_valid)

    file_write('../total/{}/seq2seq/test.txt'.format(max_set_val),
               seq2seq_test)
    file_write('../total/{}/set2regex/test.txt'.format(max_set_val),
               set2regex_test)
Exemplo n.º 31
0
model_file_root = "saved_models/"
if not os.path.isdir(log_file_root):
    os.mkdir(log_file_root)
if not os.path.isdir(model_file_root):
    os.mkdir(model_file_root)
hyper_params['log_file'] = log_file_root + hyper_params[
    'project_name'] + '_log' + file_name + '.txt'
hyper_params['model_file_name'] = model_file_root + hyper_params[
    'project_name'] + '_model' + file_name + '.pt'

# Load the processed data and get the reader classes for training, test, and validation sets
train_reader, val_reader, test_reader, total_items = load_data(hyper_params)
hyper_params['total_items'] = total_items
hyper_params['testing_batch_limit'] = test_reader.num_b

file_write(hyper_params['log_file'],
           "\n\nSimulation run on: " + str(dt.datetime.now()) + "\n\n")
file_write(hyper_params['log_file'], "Data reading complete!")
file_write(hyper_params['log_file'],
           "Number of train batches: {:4d}".format(train_reader.num_b))
file_write(hyper_params['log_file'],
           "Number of validation batches: {:4d}".format(val_reader.num_b))
file_write(hyper_params['log_file'],
           "Number of test batches: {:4d}".format(test_reader.num_b))
file_write(hyper_params['log_file'], "Total Items: " + str(total_items) + "\n")

# Instantiate the model
model = Model(hyper_params)
# Loss function is the VAE loss
criterion = VAELoss(hyper_params)

# Different options for the optimizer
Exemplo n.º 32
0
from utils import get_dataset, file_write

set_list = ['set5'] + ['set{}'.format(i) for i in range(10, 110, 10)]
regex_files = ['star0.txt', 'star1.txt', 'star2.txt', 'star3.txt']
num = 5000

for regex_file in regex_files:
    dataset = get_dataset(regex_file)
    cnt = 0

    for set_dir in set_list:
        each_set = dataset[cnt:cnt + num]
        cnt += num
        file_name = set_dir + '/' + regex_file
        file_write(file_name, each_set)
Exemplo n.º 33
0
 def add_preseed(self, contents):
     """copy the preseed file to the intrd image"""
     dst = os.path.join(self.work_initrd, 'preseed.cfg')
     utils.file_write(dst, contents)
Exemplo n.º 34
0
def train_complete(hyper_params,
                   Model,
                   train_reader,
                   val_reader,
                   user_count,
                   item_count,
                   model,
                   review=True):
    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    from torch.autograd import Variable

    from loss import MSELoss
    from eval import evaluate, eval_ranking
    from utils import file_write, is_cuda_available, load_obj, log_end_epoch, init_transnet_optim

    file_write(hyper_params['log_file'],
               "\n\nSimulation run on: " + str(dt.datetime.now()) + "\n\n")
    file_write(hyper_params['log_file'], "Data reading complete!")
    file_write(hyper_params['log_file'],
               "Number of train batches: {:4d}".format(len(train_reader)))
    file_write(hyper_params['log_file'],
               "Number of validation batches: {:4d}".format(len(val_reader)))

    criterion = MSELoss(hyper_params)

    if hyper_params['model_type'] in ['transnet', 'transnet++']:
        optimizer = init_transnet_optim(hyper_params, model)

    else:
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=hyper_params['lr'],
                                     weight_decay=hyper_params['weight_decay'])

    file_write(hyper_params['log_file'], str(model))
    file_write(hyper_params['log_file'],
               "\nModel Built!\nStarting Training...\n")

    try:
        best_MSE = float(INF)

        for epoch in range(1, hyper_params['epochs'] + 1):
            epoch_start_time = time.time()

            # Training for one epoch
            metrics = train(model, criterion, optimizer, train_reader,
                            hyper_params)
            metrics['dataset'] = hyper_params['dataset']
            # log_end_epoch(hyper_params, metrics, epoch, time.time() - epoch_start_time, metrics_on = '(TRAIN)')

            # Calulating the metrics on the validation set
            metrics, _, _ = evaluate(model,
                                     criterion,
                                     val_reader,
                                     hyper_params,
                                     user_count,
                                     item_count,
                                     review=review)
            metrics['dataset'] = hyper_params['dataset']
            log_end_epoch(hyper_params,
                          metrics,
                          epoch,
                          time.time() - epoch_start_time,
                          metrics_on='(VAL)')

            # Save best model on validation set
            if metrics['MSE'] < best_MSE:
                print("Saving model...")
                torch.save(model.state_dict(), hyper_params['model_path'])
                best_MSE = metrics['MSE']

    except KeyboardInterrupt:
        print('Exiting from training early')

    # Load best model and return it for evaluation on test-set
    model = Model(hyper_params)
    if is_cuda_available: model = model.cuda()
    model.load_state_dict(torch.load(hyper_params['model_path']))
    model.eval()

    return model
Exemplo n.º 35
0
def main():
    set_list = ['set5'] + ['set{}'.format(i) for i in range(10, 110, 10)]
    prefix_path = 'data/alphabet4/regex/'
    star_num = [0, 1, 2, 3]
    max_set = [20, 50, 100]

    for set_dir in set_list:
        path = prefix_path + set_dir
        set_num = int(set_dir.replace('set', '').strip())

        for star_idx in star_num:
            star_file = '/star{}.txt'.format(star_idx)
            file_path = path + star_file

            regex_set = get_dataset(file_path)
            pairs_data = []
            for regex in regex_set:
                pairs = []
                strings = get_instance_list(regex, set_num)
                strings = process_source(strings)
                if set_num - len(strings) > 0:
                    for _ in range(set_num - len(strings)):
                        strings.append('none')
                pairs.extend(strings)
                regex = process_target(regex)
                pairs.append(regex)
                pairs = '\t'.join(pairs)
                pairs_data.append(pairs)

            for maximum_set in max_set:
                diff = maximum_set - set_num
                if set_num > maximum_set:
                    continue

                seq2seq_data, set2regex_data = fill_none_token(pairs_data, diff, maximum_set)
                test_ratio = int(len(seq2seq_data) * 0.1)
                seq2seq_test = seq2seq_data[0:test_ratio]
                seq2seq_valid = seq2seq_data[test_ratio:2*test_ratio]
                seq2seq_train = seq2seq_data[2*test_ratio:]
                train_fname = path + '/max_set{}/seq2seq/star{}_train.txt'.format(maximum_set, star_idx)
                test_fname = path + '/max_set{}/seq2seq/star{}_test.txt'.format(maximum_set, star_idx)
                valid_fname = path + '/max_set{}/seq2seq/star{}_valid.txt'.format(maximum_set, star_idx)
                file_write(train_fname, seq2seq_train)
                file_write(test_fname, seq2seq_test)
                file_write(valid_fname, seq2seq_valid)

                set2regex_test = set2regex_data[0:test_ratio]
                set2regex_valid = set2regex_data[test_ratio:2*test_ratio]
                set2regex_train = set2regex_data[2*test_ratio:]
                train_fname = path + '/max_set{}/set2regex/star{}_train.txt'.format(maximum_set, star_idx)
                test_fname = path + '/max_set{}/set2regex/star{}_test.txt'.format(maximum_set, star_idx)
                valid_fname = path + '/max_set{}/set2regex/star{}_valid.txt'.format(maximum_set, star_idx)
                file_write(train_fname, set2regex_train)
                file_write(test_fname, set2regex_test)
                file_write(valid_fname, set2regex_valid)
Exemplo n.º 36
0
 def guardar(self, name):
     rutaTemplate=os.path.join(TEMPLATES_PATH,self.langFolder,name)
     utils.file_write(rutaTemplate, utils.get_text())
Exemplo n.º 37
0
	def run(self, edit):
		esLinux=False
		esMac=False
		view=sublime.active_window().active_view()
		tipo=utils.get_language()
		comando=""
		archivo=self.extraerArchivo(view.file_name(), tipo)
		print("el archivo es :"+archivo["ruta"])
		print("la ruta es : "+archivo["ruta"])
		os.chdir(archivo["ruta"])
		pausa=" & pause>nul"
		plataforma=platform.system().lower().strip()

		if plataforma == "linux":
			esLinux=True
			pausa=' && read - p ""'
		elif plataforma == "darwin":
			esMac=True

		print(archivo)
		commands={
		"java":"java %(nombre)s"%archivo,
		"c":"%(nombre)s"%archivo,
		"batch file":"%(nombre)s"%archivo,
		"ruby":'ruby %(path)s'%archivo,
		"r":'rscript %(path)s'%archivo,
		"python":'python %(path)s'%archivo,
		"python3":'python3 %(path)s'%archivo,
		"dart":'dart %(nombreCompleto)s'%archivo,
		"plsql":'sqlplus %(username)s/%(password)s~%(host)s:%(port)s/%(service)s @ %(path)s'%archivo,
		"sqlite":'sqlite -cmd ".read %(nombreCompleto)s"'%archivo,
		"mysql":'mysql --bind-address=%(host)s --port=%(port)s --user=%(username)s --password=%(password)s --database=%(db)s -e "source %(path)s"'%archivo,
		"nodejs":'node %(path)s'%archivo,
		"perl":'perl %(nombreCompleto)s'%archivo,
		"mongodb":"mongo %(nombreCompleto)s"%archivo,
		"source.postgre ":"psql -f %(nombreCompleto)s"%archivo,
		"sqlserver":'sqlcmd -S "ANTIN\SQLEXPRESS" -i "%(nombreCompleto)s"'%archivo,
		"scala":"scala %(nombreCompleto)s"%archivo,
		"groovy":"groovy %(path)s"%archivo,
		"go":"go run %(path)s"%archivo
		}

		print("el tipo es : "+tipo)
		if commands.get(tipo):
			print("el tipo es: "+commands.get(tipo))
			comando=commands.get(tipo)
		elif tipo.startswith("source.js ") or tipo.startswith("source.basic.html"):
			webbrowser.open_new_tab(archivo["path"])
			return
		else:
			print("no es ninguno")

		if comando.strip():
			print("el comando es : "+comando)
			if esMac:
				print("es mac")
				rutaArchivo = os.path.join(sublime.packages_path(), "run.command")
				utils.file_write(rutaArchivo, comando)
				comando='open "'+rutaArchivo+'"'
				print("el comando final es : "+comando)
				os.system(comando)
			else:
				view.run_command("ejecutar_comando", {"comando":comando})			
Exemplo n.º 38
0
 def add_preseed(self, contents):
     """copy the preseed file to the intrd image"""
     dst = os.path.join(self.work_initrd, 'preseed.cfg')
     utils.file_write(dst, contents)
Exemplo n.º 39
0
def train(model, criterion, reader, optimizer, epoch, hyper_params,
          experiment):
    """
    Function to train the model
    :param model: The model choice
    :param criterion: The loss function choice
    :param reader: The Data Reader class
    :param optimizer: The optimizer choice
    :param epoch: The given epoch
    :param hyper_params: The hyper-parameter dictionary
    :param experiment: CometML experiment to log metric
    """
    # Step into training mode
    model.train()

    total_loss = 0
    start_time = time.time()
    batch = 0
    batch_limit = int(reader.num_b)
    total_anneal_steps = 200000
    anneal = 0.0
    update_count = 0.0
    anneal_cap = 0.2

    for x, y_s in reader.iter():
        batch += 1

        # Empty the gradients
        model.zero_grad()
        optimizer.zero_grad()

        # Forward pass
        decoder_output, z_mean, z_log_sigma = model(x)

        # Backward pass
        loss = criterion(decoder_output, z_mean, z_log_sigma, y_s, anneal)
        loss.backward()
        optimizer.step()

        total_loss += loss.data

        # Anneal logic
        if total_anneal_steps > 0:
            anneal = min(anneal_cap, 1. * update_count / total_anneal_steps)
        else:
            anneal = anneal_cap
        update_count += 1.0

        # Logging mechanism
        if (batch % hyper_params['batch_log_interval'] == 0
                and batch > 0) or batch == batch_limit:
            div = hyper_params['batch_log_interval']
            if batch == batch_limit:
                div = (batch_limit % hyper_params['batch_log_interval']) - 1
            if div <= 0:
                div = 1

            cur_loss = (total_loss / div)
            elapsed = time.time() - start_time

            ss = '| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | loss {:5.4f}'.format(
                epoch, batch, batch_limit, (elapsed * 1000) / div, cur_loss)

            file_write(hyper_params['log_file'], ss)

            total_loss = 0
            start_time = time.time()

    # Log loss to CometML where step is each epoch
    experiment.log_metric("loss", total_loss, step=epoch)