Ejemplo n.º 1
1
    def remove(self, ignore_errors=False):
        # Think about ignore_errors
        stream_logger.info("   - %s" % self.name)

        # If archive not already extract
        if not os.path.exists("%s/%s" % (conf.get("settings", "cache"), self.name)):
            self.unarchive()

        self.import_control()
        # Pre Remove
        stream_logger.info("     | Pre Remove")
        self.control.pre_remove()

        # Remove
        stream_logger.info("     | Remove")
        files_list = open(os.path.join(conf.get("settings", "cache"), self.name, "files.lst")).readlines()
        for _file in files_list:
            try:
                os.remove(os.path.join(conf.get("settings", "packages"), _file.replace("\n", "")))
            except:
                pass
        # Post Remove
        stream_logger.info("     | Post Remove")
        self.control.post_remove()

        stream_logger.info("     | Clean")
        shutil.rmtree(os.path.join(conf.get("settings", "cache"), self.name))
Ejemplo n.º 2
0
def parse(parseFileName):
	global resultFile;
	resultFileName = "Conf-Nodes.txt";
	
	try:
                os.remove(resultFileName);
        except OSError:
                pass;

	tree = ET.parse(parseFileName);	
	root = tree.getroot();
	#print(root);


	resultFile = open(resultFileName, 'a');	
	
	for child in root:
		allAuthors = child.findall('author');
		pubYear = child.find('year');
		journal = child.find('journal');
		booktitle = child.find('booktitle');		

		pubName = None;		
	
		if not (journal is None):
			pubName = journal;
		elif not (booktitle is None):
			pubName = booktitle;

		if not (pubName is None): #variable was reassigned
			processRecord(allAuthors, pubYear, pubName);
	
	resultFile.close();	
Ejemplo n.º 3
0
    def stop(self):
        """
                Stop the daemon
                """
        # Get the pid from the pidfile
        try:
            pf = file(self.pidfile, 'r')
            pid = int(pf.read().strip())
            pf.close()
        except IOError:
            pid = None

        if not pid:
            message = "pidfile %s does not exist. Daemon not running?\n"
            sys.stderr.write(message % self.pidfile)
            return  # not an error in a restart

        # Try killing the daemon process
        try:
            while 1:
                os.kill(pid, SIGTERM)
                time.sleep(0.1)
        except OSError, err:
            err = str(err)
            if err.find("No such process") > 0:
                if os.path.exists(self.pidfile):
                    os.remove(self.pidfile)
            else:
                print str(err)
                sys.exit(1)
  def __WritePickled(self, obj, filename):
    """Pickles the object and writes it to the given file.
    """
    if not filename or filename == '/dev/null' or not obj:
      return


    descriptor, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename))
    tmpfile = os.fdopen(descriptor, 'wb')





    pickler = pickle.Pickler(tmpfile, protocol=1)
    pickler.fast = True
    pickler.dump(obj)

    tmpfile.close()

    self.__file_lock.acquire()
    try:
      try:

        os.rename(tmp_filename, filename)
      except OSError:

        try:
          os.remove(filename)
        except:
          pass
        os.rename(tmp_filename, filename)
    finally:
      self.__file_lock.release()
Ejemplo n.º 5
0
def rrmdir(directory):
    for root, dirs, files in os.walk(directory, topdown=False):
        for name in files:
            os.remove(os.path.join(root, name))
        for name in dirs:
            os.rmdir(os.path.join(root, name))
    os.rmdir(directory)
Ejemplo n.º 6
0
    def __del__(self):
        """Destructor. Removes the pidfile, if it was created by ourselfes."""

        if not self.created:
            return

        if not os.path.exists(self.filename):
            if self.verbose > 3:
                log.debug(
                    _("Pidfile '%s' doesn't exists, not removing."), self.filename)
            return

        if not self.auto_remove:
            if self.verbose > 3:
                log.debug(
                    _("Auto removing disabled, don't deleting '%s'."),
                    self.filename)
            return

        if self.verbose > 1:
            log.debug(_("Removing pidfile '%s' ..."), self.filename)
        if self.simulate:
            if self.verbose > 1:
                log.debug(_("Just kidding .."))
            return
        try:
            os.remove(self.filename)
        except OSError as e:
            log.err(
                _("Could not delete pidfile %(file)r: %(err)s"),
                self.filename, str(e))
        except Exception as e:
            self.handle_error(str(e), e.__class__.__name__, True)
Ejemplo n.º 7
0
def page_extract(start, end, SUBSECTION):

    PDF_IN = PdfFileReader(open(PDF_DIR, 'rb'))

#    for i in xrange(PDF_IN.numPages): # for all pages
    for i in range(int(start) - 1, int(end)):

        output = PdfFileWriter()
        output.addPage(PDF_IN.getPage(i))
        
        base, name_ext = os.path.split(PDF_DIR)
        name, ext      = os.path.splitext(name_ext)
        PDF_OUT        = '{}{}'.format(TMP_DIR, '{}-{}{}'.format(name, str(i).zfill(6), ext))
        
        with open(PDF_OUT, 'wb') as outputStream:
            output.write(outputStream)
        
        gs_pdf_to_png(PDF_OUT)
        os.remove(PDF_OUT)
    
    png_list = group(os.listdir(TMP_DIR), 2)
    for tup in png_list:
        print tup
        card_front = os.path.join(TMP_DIR, tup[0])
        card_back  = os.path.join(TMP_DIR, tup[1])
        make_cards(card_front, card_back, SUBSECTION)
Ejemplo n.º 8
0
    def _setup_region_dict(self, name, reg, outsubdir, systematic, ntuple,
                           basedir):
        regdic = reg.get_config_dict()
        modedir = regdic['hists'].lower()
        regdic['name'] = name
        if outsubdir:
            systdir = outsubdir
        else:
            if systematic == 'NONE':
                systdir = 'baseline'
            else:
                systdir = systematic.lower()
        full_out_dir = join(basedir, modedir, systdir)
        if not isdir(full_out_dir):
            if self.make_dirs:
                make_dir_if_none(full_out_dir)
            else:
                raise IOError(99,"no dir",full_out_dir)
        histname = '{}.h5'.format(basename(splitext(ntuple)[0]))
        full_out_path = join(full_out_dir, histname)
        if isfile(full_out_path):
            if self.rerun:
                os.remove(full_out_path)
            else:
                return None
        regdic['output_name'] = full_out_path
        regdic['systematic'] = systematic

        return regdic
Ejemplo n.º 9
0
def sqlite_new(path):
    global SQLite
    if SQLite is not None:
        sqlite_close()
    if os.path.isfile(path):
        os.remove(path)
    SQLite = sqlite_init(path)
Ejemplo n.º 10
0
    def _delete(self):

        # Gets root b4 deleting all files #
        self._root()

        # Deletes file if it exists #
        if self.written == True:
            os.remove(self.filename)
            self.written = False
        else:
            pass

        # Deletes created folders until a folder that is not empty is encountered #
        while self.roots != []:
            
            # Def current garbage (i.e. to be deleted) #
            current_dir = ''
            for folder in self.roots:
                current_dir += '%s/' % folder

            # Checks if there are other files or folders in the way #
            if os.listdir(current_dir) == []:
                os.rmdir(current_dir)
                del self.roots[-1]
            else:
                break
Ejemplo n.º 11
0
    def sync(self, args):
        """ Synchronize rtc/repository.yaml file and each rtc repository version hash. """
        options, argv = self.parse_args(args[:], self._print_alternative_rtcs)
        verbose = options.verbose_flag
        sys.stdout.write('# Writing repository.yaml for package distribution\n')

        sys.stdout.write('## Parsing RTC directory\n')
        package = admin.package.get_package_from_path(os.getcwd())
        repos = []
        for rtc in admin.rtc.get_rtcs_from_package(package, verbose=verbose):
            sys.stdout.write('### RTC %s\n' % rtc.rtcprofile.basicInfo.name)
            repo = admin.repository.get_repository_from_path(rtc.path, description=rtc.rtcprofile.basicInfo.description)

            repos.append(repo)

        repo_file = os.path.join(package.get_rtcpath(), 'repository.yaml')

        bak_file = repo_file + wasanbon.timestampstr()
        if os.path.isfile(bak_file):
            os.remove(bak_file)
        import shutil, yaml
        shutil.copy(repo_file, bak_file)
        dic = yaml.load(open(bak_file, 'r'))
        if not dic:
            dic = {}
        for repo in repos:
            if getattr(repo, 'url') != None:
                url = repo.url.strip()
            else:
                url = ''
            dic[repo.name] = {'repo_name' : repo.name, 'git': url, 'description':repo.description, 'hash':repo.hash}

        yaml.dump(dic, open(repo_file, 'w'), encoding='utf8', allow_unicode=True, default_flow_style=False)
        pass
Ejemplo n.º 12
0
	def __update_hotkey(self, command, hotkey):
		""" Update the hotkey for 'command' to 'hotkey'. """
		""" If 'command' is not found, add it with the new 'hotkey'. """
		""" Return 'True' on success, 'False' otherwise. """
		self.__touch_config_file()
		oldfile = open(XBINDKEYS_CONFIG_FILE, "r")
		newfile = open(XBINDKEYS_CONFIG_FILE + ".new", "w")
		# Search for command
		commandfound = False
		skipnextline = False
		for line in oldfile:
			if skipnextline == False:
				newfile.write(line)
			else:
				skipnextline = False
			if line == '"' + command + '"\n':
				newfile.write("  " + hotkey + "\n") # update hotkey
				commandfound = True
				skipnextline = True
		if commandfound == False:
			# command not found, add it
			newfile.write('"' + command + '"\n')
			newfile.write("  " + hotkey + "\n")
		oldfile.close()
		newfile.close()
		try:
			os.remove(XBINDKEYS_CONFIG_FILE)
		except:
			sessionlog.write("ERROR: 'Hotkeys.__update_hotkey()' - Cannot replace '" + XBINDKEYS_CONFIG_FILE + "'.")
			os.remove(XBINDKEYS_CONFIG_FILE + ".new")
			return False
		shutil.move(XBINDKEYS_CONFIG_FILE + ".new", XBINDKEYS_CONFIG_FILE)
		return True
Ejemplo n.º 13
0
    def removeOld(self, path, dontDelete = [], newSize = 0):

        files = []
        oldSize = 0
        for root, subfiles, filenames in os.walk(path):
            log.debug(subfiles)

            for filename in filenames:
                ext = os.path.splitext(filename)[1].lower()[1:]
                fullPath = os.path.join(root, filename)
                oldSize += os.path.getsize(fullPath)

                # iso's are huge, but the same size as 720p, so remove some filesize for better comparison
                if ext == 'iso':
                    oldSize -= (os.path.getsize(fullPath) / 1.6)

                if not fullPath in dontDelete:

                    # Only delete media files and subtitles
                    if ('*.' + ext in self.extensions['movie'] or '*.' + ext in self.extensions['subtitle']) and not '-trailer' in filename:
                        files.append(fullPath)

        log.info('Quality Old: %d, New %d.' % (long(oldSize) / 1024 / 1024, long(newSize) / 1024 / 1024))
        if long(oldSize) < long(newSize):
            for file in files:
                try:
                    os.remove(file)
                    log.info('Removed old file: %s' % file)
                except OSError:
                    log.info('Couldn\'t delete file: %s' % file)
            return True
        else:
            log.info('New file(s) are smaller then old ones, don\'t overwrite')
            return False
Ejemplo n.º 14
0
def docss():
    """ Compresses the  CSS files """

    listCSS = []

    theme = settings.get_theme()
    print "Using theme %s" % theme
    css_cfg = os.path.join("..", "..", "..", "private", "templates", theme, "css.cfg")
    f = open(css_cfg, "r")
    files = f.readlines()
    f.close()
    for file in files[:-1]:
        p = re.compile("(\n|\r|\t|\f|\v)+")
        file = p.sub("", file)
        listCSS.append("../../styles/%s" % file)

    outputFilenameCSS = "eden.min.css"

    # Merge CSS files
    print "Merging Core styles."
    mergedCSS = mergeCSS(listCSS, outputFilenameCSS)

    # Compress CSS files
    print "Writing to %s." % outputFilenameCSS
    compressCSS(mergedCSS, outputFilenameCSS)

    # Move files to correct locations
    print "Deleting %s." % outputFilenameCSS
    try:
        os.remove("../../themes/%s/%s" % (theme, outputFilenameCSS))
    except:
        pass
    print "Moving new %s." % outputFilenameCSS
    shutil.move(outputFilenameCSS, "../../themes/%s" % theme)
Ejemplo n.º 15
0
def send_data(save_path):
    """
    Sends all the data files that are present in the specified path to the Qbike server.
    :param save_path: Requires the path in which the trips are saved.
    :return: Nothing. The data is sent to the Server and the txt files are removed from the path's directory.
    """
    end = False
    Trip_nb = 100
    while end == False:
        if not os.path.isfile('C:\Users\Joren\Documents\Ir 1B\P&O\P&O 3\Tryouts\Trips\Trip1.txt'):
            end = True

        else:
            for nb in reversed(range(0, 100)):
                Trip = os.path.join(save_path, "Trip" + str(nb) + ".txt")
                Trip_nb = str(nb)
                if os.path.isfile(Trip):
                    break

            Trip_path = os.path.join(save_path, r"Trip" + Trip_nb + r".txt")

            with open(Trip_path, "r") as Trip:
                batch = json.load(Trip)

            info = {'purpose': 'batch-sender', 'groupID': "cwa2", 'userID': ID}
            socketIO = SocketIO('dali.cs.kuleuven.be', 8080)
            socketIO.on('server_message', on_response)
            socketIO.emit('start', json.dumps(info), on_response)
            socketIO.wait(2)
            socketIO.emit('batch-tripdata', json.dumps(batch), on_response)
            socketIO.wait(5)

            os.remove(Trip_path)

    print "Sent Data"
Ejemplo n.º 16
0
def _maybe_extract(fpath, dirname, descend=True):
    path = os.path.dirname(fpath)
    untar_fpath = os.path.join(path, dirname)
    if not os.path.exists(untar_fpath):
        print('Extracting contents of "{}"...'.format(dirname))
        tfile = zipfile.ZipFile(fpath, 'r')
        try:
            tfile.extractall(untar_fpath)
        except (Exception, KeyboardInterrupt) as e:
            if os.path.exists(untar_fpath):
                if os.path.isfile(untar_fpath):
                    os.remove(untar_fpath)
                else:
                    shutil.rmtree(untar_fpath)
            raise
        tfile.close()
    if descend:
        dirs = [os.path.join(untar_fpath, o)
                for o in os.listdir(untar_fpath)
                if os.path.isdir(os.path.join(untar_fpath, o))]
        if len(dirs) != 1:
            print("Error, found not exactly one dir: {}".format(dirs))
            sys.exit(-1)
        return dirs[0]
    else:
        return untar_fpath
Ejemplo n.º 17
0
    def symlink(env, target, source):
        trgt = str(target[0])
        src = str(source[0])

        if os.path.islink(trgt) or os.path.exists(trgt):
            os.remove(trgt)
        os.symlink(os.path.basename(src), trgt)
Ejemplo n.º 18
0
def save_tmp_file(fileobj, filename, ext):
    if ext in IMAGES_EXT:
        f = open("/tmp/" + filename + ext, 'wb')
        shutil.copyfileobj(fileobj, f)
        f.close()
        helpers.resize_image(filename, ext)
        if ext != '.jpg':
            os.remove("/tmp/" + filename + ext)
        return '.jpg'
    if ext in ['.txt']:
        f = open("/tmp/" + filename + ext, 'w')
        shutil.copyfileobj(fileobj, f)
        f.close()
        return ext
    if ext in ['.dcm', '.dicom']:
        f = open("/tmp/" + filename + ext, 'w')
        shutil.copyfileobj(fileobj, f)
        f.close()
        o = dir(main_helpers.dicom)
        try:
            main_helpers.dicom.saveDicomAsImage("/tmp/" + filename + ext,
                                                "/tmp/" + filename + ext + ".thumbnail.jpg")
        except:
            shutil.copy(
                os.path.join(settings.BASE_DIR, 'static/img/files/dicom.png'),
                "/tmp/" + filename + ext + ".thumbnail.png"
            )
        return ext
    f = open("/tmp/" + filename + ext, 'wb')
    shutil.copyfileobj(fileobj, f)
    f.close()
    return ext
Ejemplo n.º 19
0
    def test_is_same_output(self):
        fd1 = sys.stderr.fileno()
        fd2 = os.dup(fd1)
        try:
            self.assertTrue(ConfigureOutputHandler._is_same_output(fd1, fd2))
        finally:
            os.close(fd2)

        fd2, path = tempfile.mkstemp()
        try:
            self.assertFalse(ConfigureOutputHandler._is_same_output(fd1, fd2))

            fd3 = os.dup(fd2)
            try:
                self.assertTrue(ConfigureOutputHandler._is_same_output(fd2, fd3))
            finally:
                os.close(fd3)

            with open(path, 'a') as fh:
                fd3 = fh.fileno()
                self.assertTrue(
                    ConfigureOutputHandler._is_same_output(fd2, fd3))

        finally:
            os.close(fd2)
            os.remove(path)
Ejemplo n.º 20
0
    def test_io(self):

        import h5py

        # Cleanup directories
        fname = 'testdset.hdf5'

        if mpiutil.rank0 and os.path.exists(fname):
            os.remove(fname)

        mpiutil.barrier()

        gshape = (19, 17)

        ds = mpiarray.MPIArray(gshape, dtype=np.int64)

        ga = np.arange(np.prod(gshape)).reshape(gshape)

        l0, s0, e0 = mpiutil.split_local(gshape[0])
        ds[:] = ga[s0:e0]

        ds.redistribute(axis=1).to_hdf5(fname, 'testds', create=True)

        if mpiutil.rank0:

            with h5py.File(fname, 'r') as f:

                h5ds = f['testds'][:]

                assert (h5ds == ga).all()

        ds2 = mpiarray.MPIArray.from_hdf5(fname, 'testds')

        assert (ds2 == ds).all()
Ejemplo n.º 21
0
    def testMakeDirs(self):
        d = path(self.tempdir)

        # Placeholder file so that when removedirs() is called,
        # it doesn't remove the temporary directory itself.
        tempf = d / 'temp.txt'
        tempf.touch()
        try:
            foo = d / 'foo'
            boz = foo / 'bar' / 'baz' / 'boz'
            boz.makedirs()
            try:
                self.assert_(boz.isdir())
            finally:
                boz.removedirs()
            self.failIf(foo.exists())
            self.assert_(d.exists())

            foo.mkdir(o750)
            boz.makedirs(o700)
            try:
                self.assert_(boz.isdir())
            finally:
                boz.removedirs()
            self.failIf(foo.exists())
            self.assert_(d.exists())
        finally:
            os.remove(tempf)
Ejemplo n.º 22
0
    def make_new_version_message(self, path):
        """Make a new version message for the repo at the given path."""

        try:
            cwd = os.getcwd()
            os.chdir(path)

            version = self.get_current_tag()

            if version[0] is None:
                return

            messages_path = os.path.join(path, 'messages.json')
            message_path = self.rewrite_messages_json(messages_path, version)

            if os.path.exists(message_path):
                os.remove(message_path)

            with open(message_path, mode='w', encoding='utf-8') as f:
                header = '{} {}'.format(
                    os.path.basename(path),
                    os.path.splitext(os.path.basename(message_path))[0])
                f.write('{}\n{}\n'.format(header, '-' * (len(header) + 1)))
                f.write(self.get_commit_messages_since(version))

            self.window.run_command('open_file', args={'file': message_path})

        except Exception:
            import traceback
            traceback.print_exc()
        finally:
            os.chdir(cwd)
Ejemplo n.º 23
0
 def write(self, cr, uid, ids, vals, context=None):
     if not isinstance(ids, list):
         ids = [ids]
     if vals.get("filename") and not vals.get("extension"):
         vals["filename"], vals["extension"] = os.path.splitext(vals["filename"])
     upd_ids = ids[:]
     if vals.get("filename") or vals.get("extension"):
         images = self.browse(cr, uid, upd_ids, context=context)
         for image in images:
             old_full_path = self._image_path(cr, uid, image, context=context)
             if not old_full_path:
                 continue
             # all the stuff below is there to manage the files on the filesystem
             if (
                 vals.get("filename")
                 and (image.name != vals["filename"])
                 or vals.get("extension")
                 and (image.extension != vals["extension"])
             ):
                 super(product_images, self).write(cr, uid, image.id, vals, context=context)
                 upd_ids.remove(image.id)
                 if "file" in vals:
                     # a new image have been loaded we should remove the old image
                     # TODO it's look like there is something wrong with function
                     # field in openerp indeed the preview is always added in the write :(
                     if os.path.isfile(old_full_path):
                         os.remove(old_full_path)
                 else:
                     new_image = self.browse(cr, uid, image.id, context=context)
                     new_full_path = self._image_path(cr, uid, new_image, context=context)
                     # we have to rename the image on the file system
                     if os.path.isfile(old_full_path):
                         os.rename(old_full_path, new_full_path)
     return super(product_images, self).write(cr, uid, upd_ids, vals, context=context)
Ejemplo n.º 24
0
 def test_delete_mid_read(self):
     self.viewer.extract()
     self.viewer.select('install.js')
     os.remove(os.path.join(self.viewer.dest, 'install.js'))
     res = self.viewer.read_file()
     eq_(res, '')
     assert self.viewer.selected['msg'].startswith('That file no')
Ejemplo n.º 25
0
 def remove_logfile(self):
     from os import remove, path
     try:
         remove( path.join(path.split(__file__)[0], self.logfile_name) )
     except OSError:
         # nothing to do if file does not exist
         pass
Ejemplo n.º 26
0
def open_tempfile_with_atomic_write_to(path, **kwargs):
    """
    Open a temporary file object that atomically moves to the specified
    path upon exiting the context manager.

    Supports the same function signature as `open`.

    The parent directory exist and be user-writable.

    WARNING: This is just like 'mv', it will clobber files!
    """
    parent_directory = os.path.dirname(path)
    _tempfile = tempfile.NamedTemporaryFile(delete=False, dir=parent_directory)
    _tempfile.close()
    tempfile_path = _tempfile.name
    try:
        with open(tempfile_path, **kwargs) as file:
            yield file
            file.flush()
            os.fsync(file.fileno())
        os.rename(tempfile_path, path)
    finally:
        try:
            os.remove(tempfile_path)
        except OSError as e:
            if e.errno == errno.ENOENT:
                pass
            else:
                raise e
Ejemplo n.º 27
0
 def test_bom(self):
     dest = os.path.join(settings.TMP_PATH, 'test_bom')
     open(dest, 'w').write('foo'.encode('utf-16'))
     self.viewer.select('foo')
     self.viewer.selected = {'full': dest, 'size': 1}
     eq_(self.viewer.read_file(), u'foo')
     os.remove(dest)
Ejemplo n.º 28
0
    def scrape_rollcall(self, vote, vurl):
        (path, resp) = self.urlretrieve(vurl)
        pdflines = convert_pdf(path, 'text')
        os.remove(path)

        current_vfunc = None

        for line in pdflines.split('\n'):
            line = line.strip()

            # change what is being recorded
            if line.startswith('YEAS') or line.startswith('AYES'):
                current_vfunc = vote.yes
            elif line.startswith('NAYS'):
                current_vfunc = vote.no
            elif (line.startswith('EXCUSED') or
                  line.startswith('NOT VOTING') or
                  line.startswith('ABSTAIN')):
                current_vfunc = vote.other
            # skip these
            elif not line or line.startswith('Page '):
                continue

            # if a vfunc is active
            elif current_vfunc:
                # split names apart by 3 or more spaces
                names = re.split('\s{3,}', line)
                for name in names:
                    if name:
                        current_vfunc(name.strip())
Ejemplo n.º 29
0
    def save(self, force_insert=False, force_update=False):
        if getattr(settings, 'DPP_IE_COMPATIBLE_PDF_VIEWER', True) and self.old_document != self.document:
            self.pdf_images_generated = False
        else:
            self.pdf_images_generated = True
        super(Document, self).save(force_insert, force_update)
        #print "pdf_images_generated set to: " + str(self.pdf_images_generated)

        # Delete old document
        if self.old_document and self.old_document != self.document:
            if os.path.exists(self.old_document.path):
                os.remove(self.old_document.path)
                #print "Old document deleted from path: " + self.old_document.path
        
        if self.old_document != self.document:
            cmd = u"python manage.py createpages " + str(self.id) + " --settings=" + settings.SETTINGS_MODULE
            subprocess.Popen(cmd, shell=True)
            #print "New page creation process started..."
        
        # Creating images when DPP_IE_COMPATIBLE_PDF_VIEWER=True in settings.py    
        if getattr(settings, 'DPP_IE_COMPATIBLE_PDF_VIEWER', True) and self.old_document != self.document:
            cmd = u"python manage.py generatepdfimages " + str(self.id) + " --settings=" + settings.SETTINGS_MODULE
            subprocess.Popen(cmd, shell=True)
            #print "Image generation process started..."
        
        self.old_document = self.document
Ejemplo n.º 30
0
    def runThread(self,conn,addr):
        while True:
            #print  ('Connected from', addr)
            #data = conn.recv(bsize)
            #print ('Data received from client', repr(data.decode()))

            dirs = os.listdir(fpath)
            time.sleep(10)
            for fl in dirs:
                msg = '{0}{1}'.format("Sending file: ",fl)
                conn.send(msg.encode())
                if "ok" in conn.recv(bsize).decode(): # client ready to receive
                    selfl = '{0}{1}'.format(fpath,fl)
                    f = open(selfl,'rb')
                    payload = f.read(bsize)

                    while (payload):
                        conn.send(payload)
                        print('........')
                        if "ok" in conn.recv(bsize).decode():
                            payload = f.read(bsize)
                    conn.send("eof".encode())
                    f.close()
                    # once the file is sent, it must be removed
                    os.remove(selfl)
Ejemplo n.º 31
0
os.mkdir(output)

with open(config) as f:
    json_data = json.load(f)

university_id = json_data['universityID']
pages = json_data['pages']
modules = json_data['modules']

tex_settings = [f"\\userid{{{university_id}}}\n", f"\\pages{{{pages}}}"]

with open(settings_file, "w+") as f:
    f.writelines(tex_settings)

pdflatex_result = subprocess.run(["pdflatex", "-interaction=nonstopmode", "-jobname=out", template],
                                 stderr=subprocess.STDOUT, stdout=subprocess.PIPE)

if pdflatex_result.returncode != 0:
    print("Error: pdflatex exited with a non-zero return code. Run with -v to show the output of pdflatex")
    if verbose:
        print(pdflatex_result.stdout.decode("utf-8"))

for module in modules:
    module_pdf = os.path.join(output, f"{module}_{university_id}.pdf")
    shutil.copyfile(out_pdf, module_pdf)

os.remove("out.aux")
os.remove("out.log")
os.remove("out.pdf")
#folder='slt20190822'
dir_year='slt'+year

till_month=till_month[0:4]+'-'+till_month[4:6]+'-'
print(till_month)
'''
#date=folder[3:11]
#print(dir_month)
#dir_master=dir_month+'_master/'
#print(dir_master)
#dir_calib_sci=folder+'_calib_sci/'
#print(dir_calib_sci)

file_info='gasp_target_fitsheader_info_exclude_baddata_from201902.txt'
if os.path.exists(file_info):
    os.remove(file_info)
f_info=open(file_info,'w')

file_log='gasp_target_fitsheader_info_exclude_baddata_from201902.log'
if os.path.exists(file_log):
    os.remove(file_log)
f_log=open(file_log,'w')

#time_calib_start=strftime("%Y-%m-%d %H:%M:%S", gmtime())
#time_calib_start=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
time_calib_start=str(datetime.now())  
info_time=('File generated by An-Li Tsai at '+time_calib_start+' UTC')
print(info_time)
f_log.write(info_time+'\n')

print(sys.argv)
Ejemplo n.º 33
0
def Xanes2Min(params, x, data, input, config, output):
    from .controls import generateAndRunWorkflow
    import copy
    Xanes2Min.count
    #lastParams
    Xanes2Min.count += 1
    input2 = copy.deepcopy(input)

    energy_shift = 0.0
    atoms = input['cluster']
    amp = 1.0
    input2['feff.corrections'] = [[0.0, 0.0]]
    # Set controls based on what has changed since last call
    # to function.
    if Xanes2Min.count == 1:
        control = [1, 1, 1, 1, 1, 1]
    else:
        control = [0, 0, 0, 0, 0, 0]
    ipar = 0
    for param in list(params.values()):
        if Xanes2Min.lastParams is not None:
            diff = param != list(Xanes2Min.lastParams.values())[ipar]
        else:
            diff = True

        # Use case insensitive equal.
        if param.name.lower() == 'expansion':
            # Uniform expansion of coordinates in cluster.
            if diff:
                control = [1, 1, 1, 1, 1, 1]

            expansion = param.value

            atoms = [[
                f[0], expansion * f[1], expansion * f[2], expansion * f[3]
            ] for f in atoms]

        elif param.name.lower() == 'broadening':
            # Lorentzian broadening applied to spectrum.
            if diff:
                control[5] = 1

            broadening = param.value
            #input2['spectral_broadening'] = [[broadening]]
            input2['feff.corrections'][0][1] = broadening

        elif param.name.lower() == 'delta_e0':
            # Shift in absolute edge energy (shift of energy grid of spectrum).
            energy_shift = param.value
        elif param.name.lower() == 'bond':
            # Move a set of atoms away from absorber along a bond.
            # Find vector to move along (r_2 - r_1)/r12
            # Get the two atoms defining the bond vector.
            if diff:
                control = [1, 1, 1, 1, 1, 1]

            bond = param.value
            bond_atoms = [
                item - 1 for sublist in input2['fit.bond'] for item in sublist
            ]
            vec = [
                input2['cluster'][bond_atoms[1]][i] -
                input2['cluster'][bond_atoms[0]][i] for i in [1, 2, 3]
            ]
            vecSquared = [vec[i]**2 for i in [0, 1, 2]]
            norm = math.sqrt(sum(vecSquared))
            vec = [vec[i] / norm * bond for i in [0, 1, 2]]
            for atom in bond_atoms[1:]:
                for i in [1, 2, 3]:
                    atoms[atom][i] += vec[i - 1]

        elif param.name.lower() == 'delta_efermi':
            #input2['fermi_shift'] = [[param.value]]
            input2['feff.corrections'][0][0] = param.value
            if diff:
                control[5] = 1
        elif param.name.lower() == 'amplitude':
            amp = param.value
        else:
            print(('WARNING: UNKOWN PARAMETER ' + param.name + '!'))
            print('STOPPING NOW!!!')
            exit()

        ipar += 1

    input2['cluster'] = atoms
    input2['feff.control'] = [control]
    # Need a copy of config to start wf over
    config2 = copy.deepcopy(config)

    # Set current working directory to xCDir, so that internal wf
    # will run inside of outer wf directory.
    config2['cwd'] = config['xcDir']

    if False:  # Save all runs of underlying handler in separate directories.
        config2['xcIndexStart'] = Xanes2Min.count
    else:
        config2['xcIndexStart'] = 1

    dir = config['xcDir']
    # Loop over targets in output. Not sure if there will ever be more than one output target here.
    # Set output and error files
    for target in output:
        with open(os.path.join(dir, 'corvus.fit.stdout'),
                  'w') as out, open(os.path.join(dir, 'corvus.fit.stderr'),
                                    'w') as err:

            # Set the tagetList according to fit.target
            #xanes2MinIterator += 1
            targetList = input['fit.target']

            # generate and run the workflow for target, unless no run is necessary.
            generateAndRunWorkflow(config2, input2, targetList)

            x0, y = np.array(input2[input['fit.target'][0][0]])
            y = y * amp
            # If there is an energy shift, shift the x-axis before
            # interpolating onto data grid
            x0 = x0 + energy_shift
            # On first call, check if experimental data is outside calculated
            # data, and redefine experimental data within range.
            global firstcall
            global fitconvfile
            if firstcall:
                print('Opening convergence file')
                try:
                    os.remove('fitconvergence.dat')
                except OSError:
                    pass
                fitconvfile = open('fitconvergence.dat', 'a')
                np.savetxt(fitconvfile, np.array([x, data]).transpose())
                fitconvfile.write('\n')
                firstcall = False

            yterp = np.interp(x, x0, y, left=0.0, right=0.0)

            np.savetxt(fitconvfile, np.array([x, yterp]).transpose())
            fitconvfile.write('\n')

            i = 0
            residual = np.zeros(yterp.size)
            for yi in yterp:
                if (x[i] >= x0[0]) and (x[i] <= x0[-1]):
                    residual[i] = yi - data[i]
                else:
                    residual[i] = 0.0
                i = i + 1

            Xanes2Min.lastParams = copy.copy(params)

            return residual
Ejemplo n.º 34
0
import subprocess, os, sys, shutil, glob, timeit

tic = timeit.default_timer()

# Wipe contents of output folder 
# This script is running inside the docker container, so this should be allowed
for g in glob.glob('/output/*'):
    if os.path.isdir(g):
        shutil.rmtree(g)
    else:
        os.remove(g)

# Collect the list of tags to be run
all_tags = []
if os.path.exists('/REFPROP/.just_these_tags'):
    all_tags = [line.strip() for line in open('/REFPROP/.just_these_tags').readlines() if line]
elif '--catchargs' in sys.argv:
    iargs = sys.argv.index('--catchargs')
    args = sys.argv[iargs+1]
    all_tags = args.split(',')
else:
    output = subprocess.run('/REFPROP-tests/build/main -t', shell = True, stdout = subprocess.PIPE).stdout.decode('utf-8')
    for il, line in enumerate(output.split('\n')[1::]):
        if not line or '[' not in line: continue
        tag = '[' + line.split('[')[1]
        if 'veryslow' in tag: continue # Don't run the veryslow tests
    #    if 'predef_mix' not in tag: continue
        all_tags.append(tag)

tag_times = {}
for tag in all_tags:
Ejemplo n.º 35
0
def test_single_image():
    """Test the simple case of one image and one catalog.
    """
    if __name__ == '__main__':
        logger = piff.config.setup_logger(verbose=2)
    else:
        logger = piff.config.setup_logger(log_file='output/test_single_image.log')

    # Make the image
    image = galsim.Image(2048, 2048, scale=0.26)

    # Where to put the stars.  Include some flagged and not used locations.
    x_list = [ 123.12, 345.98, 567.25, 1094.94, 924.15, 1532.74, 1743.11, 888.39, 1033.29, 1409.31 ]
    y_list = [ 345.43, 567.45, 1094.32, 924.29, 1532.92, 1743.83, 888.83, 1033.19, 1409.20, 123.11 ]
    flag_list = [ 1, 1, 13, 1, 1, 4, 1, 1, 0, 1 ]

    # Draw a Gaussian PSF at each location on the image.
    sigma = 1.3
    g1 = 0.23
    g2 = -0.17
    psf = galsim.Gaussian(sigma=sigma).shear(g1=g1, g2=g2)
    for x,y,flag in zip(x_list, y_list, flag_list):
        bounds = galsim.BoundsI(int(x-31), int(x+32), int(y-31), int(y+32))
        offset = galsim.PositionD( x-int(x)-0.5 , y-int(y)-0.5 )
        psf.drawImage(image=image[bounds], method='no_pixel', offset=offset)
        # corrupt the ones that are marked as flagged
        if flag & 4:
            print('corrupting star at ',x,y)
            ar = image[bounds].array
            im_max = np.max(ar) * 0.2
            ar[ar > im_max] = im_max
    image.addNoise(galsim.GaussianNoise(rng=galsim.BaseDeviate(1234), sigma=1e-6))

    # Write out the image to a file
    image_file = os.path.join('output','simple_image.fits')
    image.write(image_file)

    # Write out the catalog to a file
    dtype = [ ('x','f8'), ('y','f8'), ('flag','i2') ]
    data = np.empty(len(x_list), dtype=dtype)
    data['x'] = x_list
    data['y'] = y_list
    data['flag'] = flag_list
    cat_file = os.path.join('output','simple_cat.fits')
    fitsio.write(cat_file, data, clobber=True)

    # Use InputFiles to read these back in
    config = { 'image_file_name' : image_file,
               'cat_file_name': cat_file }
    input = piff.InputFiles(config, logger=logger)
    assert input.image_file_name == [ image_file ]
    assert input.cat_file_name == [ cat_file ]

    # Check image
    assert input.nimages == 1
    image1, _, image_pos, _ = input.getRawImageData(0)
    np.testing.assert_equal(image1.array, image.array)

    # Check catalog
    np.testing.assert_equal([pos.x for pos in image_pos], x_list)
    np.testing.assert_equal([pos.y for pos in image_pos], y_list)

    # Repeat, using flag columns this time.
    config = { 'image_file_name' : image_file,
               'cat_file_name': cat_file,
               'flag_col': 'flag',
               'use_flag': '1',
               'skip_flag': '4',
               'stamp_size': 48 }
    input = piff.InputFiles(config, logger=logger)
    assert input.nimages == 1
    _, _, image_pos, _ = input.getRawImageData(0)
    assert len(image_pos) == 7

    # Make star data
    orig_stars = input.makeStars()
    assert len(orig_stars) == 7
    assert orig_stars[0].image.array.shape == (48,48)

    # Process the star data
    # can only compare to truth if include_pixel=False
    model = piff.Gaussian(fastfit=True, include_pixel=False)
    interp = piff.Mean()
    fitted_stars = [ model.fit(model.initialize(star)) for star in orig_stars ]
    interp.solve(fitted_stars)
    print('mean = ',interp.mean)

    # Check that the interpolation is what it should be
    # Any position would work here.
    chipnum = 0
    x = 1024
    y = 123
    orig_wcs = input.getWCS()[chipnum]
    orig_pointing = input.getPointing()
    image_pos = galsim.PositionD(x,y)
    world_pos = piff.StarData.calculateFieldPos(image_pos, orig_wcs, orig_pointing)
    u,v = world_pos.x, world_pos.y
    stamp_size = config['stamp_size']

    target = piff.Star.makeTarget(x=x, y=y, u=u, v=v, wcs=orig_wcs, stamp_size=stamp_size,
                                  pointing=orig_pointing)
    true_params = [ sigma, g1, g2 ]
    test_star = interp.interpolate(target)
    np.testing.assert_almost_equal(test_star.fit.params, true_params, decimal=4)

    # Check default values of options
    psf = piff.SimplePSF(model, interp)
    assert psf.chisq_thresh == 0.1
    assert psf.max_iter == 30
    assert psf.outliers == None
    assert psf.interp_property_names == ('u','v')

    # Now test running it via the config parser
    psf_file = os.path.join('output','simple_psf.fits')
    config = {
        'input' : {
            'image_file_name' : image_file,
            'cat_file_name' : cat_file,
            'flag_col' : 'flag',
            'use_flag' : 1,
            'skip_flag' : 4,
            'stamp_size' : stamp_size
        },
        'psf' : {
            'model' : { 'type' : 'Gaussian',
                        'fastfit': True,
                        'include_pixel': False},
            'interp' : { 'type' : 'Mean' },
            'max_iter' : 10,
            'chisq_thresh' : 0.2,
        },
        'output' : { 'file_name' : psf_file },
    }
    orig_stars, wcs, pointing = piff.Input.process(config['input'], logger)

    # Use a SimplePSF to process the stars data this time.
    interp = piff.Mean()
    psf = piff.SimplePSF(model, interp, max_iter=10, chisq_thresh=0.2)
    assert psf.chisq_thresh == 0.2
    assert psf.max_iter == 10

    # Error if input has no stars
    with np.testing.assert_raises(RuntimeError):
        psf.fit([], wcs, pointing, logger=logger)

    # Do the fit
    psf.fit(orig_stars, wcs, pointing, logger=logger)
    test_star = psf.interp.interpolate(target)
    np.testing.assert_almost_equal(test_star.fit.params, true_params, decimal=4)

    # test that drawStar and drawStarList work
    test_star = psf.drawStar(target)
    test_star_list = psf.drawStarList([target])[0]
    np.testing.assert_equal(test_star.fit.params, test_star_list.fit.params)
    np.testing.assert_equal(test_star.image.array, test_star_list.image.array)

    # test copy_image property of drawStar and draw
    for draw in [psf.drawStar, psf.model.draw]:
        target_star_copy = psf.interp.interpolate(piff.Star(target.data.copy(), target.fit.copy()))
        # interp is so that when we do psf.model.draw we have fit.params to work with

        test_star_copy = draw(target_star_copy, copy_image=True)
        test_star_nocopy = draw(target_star_copy, copy_image=False)
        # if we modify target_star_copy, then test_star_nocopy should be modified,
        # but not test_star_copy
        target_star_copy.image.array[0,0] = 23456
        assert test_star_nocopy.image.array[0,0] == target_star_copy.image.array[0,0]
        assert test_star_copy.image.array[0,0] != target_star_copy.image.array[0,0]
        # however the other pixels SHOULD still be all the same value
        assert test_star_nocopy.image.array[1,1] == target_star_copy.image.array[1,1]
        assert test_star_copy.image.array[1,1] == target_star_copy.image.array[1,1]

        test_star_center = draw(test_star_copy, copy_image=True, center=(x+1,y+1))
        np.testing.assert_almost_equal(test_star_center.image.array[1:,1:],
                                       test_star_copy.image.array[:-1,:-1])

    # test that draw works
    test_image = psf.draw(x=target['x'], y=target['y'], stamp_size=config['input']['stamp_size'],
                          flux=target.fit.flux, offset=target.fit.center)
    # this image should be the same values as test_star
    assert test_image == test_star.image
    # test that draw does not copy the image
    image_ref = psf.draw(x=target['x'], y=target['y'], stamp_size=config['input']['stamp_size'],
                         flux=target.fit.flux, offset=target.fit.center, image=test_image)
    image_ref.array[0,0] = 123456789
    assert test_image.array[0,0] == image_ref.array[0,0]
    assert test_star.image.array[0,0] != test_image.array[0,0]
    assert test_star.image.array[1,1] == test_image.array[1,1]

    # Round trip to a file
    psf.write(psf_file, logger)
    psf2 = piff.read(psf_file, logger)
    assert type(psf2.model) is piff.Gaussian
    assert type(psf2.interp) is piff.Mean
    assert psf2.chisq == psf.chisq
    assert psf2.last_delta_chisq == psf.last_delta_chisq
    assert psf2.chisq_thresh == psf.chisq_thresh
    assert psf2.max_iter == psf.max_iter
    assert psf2.dof == psf.dof
    assert psf2.nremoved == psf.nremoved
    test_star = psf2.interp.interpolate(target)
    np.testing.assert_almost_equal(test_star.fit.params, true_params, decimal=4)

    # Do the whole thing with the config parser
    os.remove(psf_file)

    piff.piffify(config, logger)
    psf3 = piff.read(psf_file)
    assert type(psf3.model) is piff.Gaussian
    assert type(psf3.interp) is piff.Mean
    assert psf3.chisq == psf.chisq
    assert psf3.last_delta_chisq == psf.last_delta_chisq
    assert psf3.chisq_thresh == psf.chisq_thresh
    assert psf3.max_iter == psf.max_iter
    assert psf3.dof == psf.dof
    assert psf3.nremoved == psf.nremoved
    test_star = psf3.interp.interpolate(target)
    np.testing.assert_almost_equal(test_star.fit.params, true_params, decimal=4)

    # Test using the piffify executable
    os.remove(psf_file)
    # This would be simpler as a direct assignment, but this once, test the way you would set
    # this from the command line, which would call parse_variables.
    piff.config.parse_variables(config, ['verbose=0'], logger=logger)
    #config['verbose'] = 0
    with open('simple.yaml','w') as f:
        f.write(yaml.dump(config, default_flow_style=False))
    config2 = piff.config.read_config('simple.yaml')
    assert config == config2
    piffify_exe = get_script_name('piffify')
    p = subprocess.Popen( [piffify_exe, 'simple.yaml'] )
    p.communicate()
    psf4 = piff.read(psf_file)
    assert type(psf4.model) is piff.Gaussian
    assert type(psf4.interp) is piff.Mean
    assert psf4.chisq == psf.chisq
    assert psf4.last_delta_chisq == psf.last_delta_chisq
    assert psf4.chisq_thresh == psf.chisq_thresh
    assert psf4.max_iter == psf.max_iter
    assert psf4.dof == psf.dof
    assert psf4.nremoved == psf.nremoved
    test_star = psf4.interp.interpolate(target)
    np.testing.assert_almost_equal(test_star.fit.params, true_params, decimal=4)

    # With very low max_iter, we hit the warning about non-convergence
    config['psf']['max_iter'] = 1
    with CaptureLog(level=1) as cl:
        piff.piffify(config, cl.logger)
    assert 'PSF fit did not converge' in cl.output
Ejemplo n.º 36
0
import os

for root, dirs, files in os.walk('results/'):
    for f in files:
        if str(f).endswith('.txt'):
            os.remove(f'{root}/{f}')
Ejemplo n.º 37
0
 def reset(self, verbose=False):
     os.remove(self.filename)
Ejemplo n.º 38
0
    def connect(self):
        """
        建立WebSocket连接,并实例化SSHBridge类,在这个对象中建立SSH连接,放在 self.ssh_channel 通道中
        :return:
        """
        # pri00nt('【Web  --websocket-->  WS】建立WebSocket通道,当前连接用户:', self.simple_user)

        self.accept()

        # WebSocket连接成功后,连接ssh
        query_string = self.scope.get('query_string')
        ws_args = QueryDict(query_string=query_string, encoding='utf-8')
        # # pri00nt(ws_args)
        # <QueryDict: {'user': ['admin'], 'host': ['192.168.96.20'], 'port': ['22'], 'auth': ['pwd'], 'pwd': ['ZGphbmdvYWRtaW4='], 'key': [''], 'width': ['113'], 'height': ['43']}>
        # 根据参数判断是否是协作
        team = ws_args.get('team')
        if team:
            self.is_team = True
            self.team_name = "team_{}".format(self.host_id)  # 加到这个通道组
            async_to_sync(self.channel_layer.group_add)(
                self.team_name,
                self.channel_name
            )
            # 用户连接时,同一群组发送消息
            self.send_message_or_team(json.dumps({'flag': 'user', 'message': '用户 {} 已连接本终端'.format(self.simple_user)}))

        width = ws_args.get('width')
        height = ws_args.get('height')
        width = int(500)
        height = int(500)  # ssh连接要求int类型:required argument is an integer

        ssh_connect_dict = {}

        if self.host_id:
            # 指定连接
            # pri00nt('连接的服务器id:', self.host_id)
            if int(self.host_id) == 1:
                ssh_connect_dict = {
                    'host': '120.26.175.79',
                    'user': '******',
                    'port': 22,
                    'timeout': 30,
                    'pty_width': width,
                    'pty_height': height,
                    'pwd': 'AAAAaaaa0'
                }
            else:
                self.close()
                return

        else:
            user = ws_args.get('user')
            host = ws_args.get('host')
            port = ws_args.get('port')
            port = int(port)
            auth = ws_args.get('auth')
            pwd = ws_args.get('pwd')
            if pwd:
                pwd = base64.b64decode(pwd).decode('utf-8')
            sshkey_filename = ws_args.get('sshkey_filename')

            ssh_connect_dict = {
                'host': host,
                'user': user,
                'port': port,
                'timeout': 30,
                'pty_width': width,
                'pty_height': height,
                'pwd': pwd
            }

            if auth == 'key':
                sshkey_file = os.path.join(settings.MEDIA_ROOT, 'sshkey', sshkey_filename)
                if not os.path.exists(sshkey_file):
                    self.send(json.dumps({'flag': 'error', 'message': '密钥文件不存在'}))

                else:
                    try:
                        f = open(sshkey_file, 'r', encoding='utf-8')
                        key = f.read()
                        string_io = StringIO()
                        string_io.write(key)
                        string_io.flush()
                        string_io.seek(0)
                        ssh_connect_dict['key'] = string_io

                        os.remove(sshkey_file)  # 用完之后删除key文件
                    except BaseException as e:
                        # pri00nt('打开密钥文件出错', e)
                        pass

        # 建立SSH连接
        self.ssh = SSHBridge(websocket=self, simpleuser=self.simple_user)
        # pri00nt('【WS  --SSHBridge-->  SSH】连接SSH参数:', ssh_connect_dict)
        self.ssh.connect(**ssh_connect_dict)
Ejemplo n.º 39
0
 def _delete_plain(self, entry):
     path = entry.fs_path(self.plain_folder)
     os.remove(path)
Ejemplo n.º 40
0
    def test_sparse_tfidf_retriever(self):
        try:
            from parlai.agents.tfidf_retriever.tfidf_retriever import TfidfRetrieverAgent  # noqa: F401
        except ImportError as e:
            if 'pip install' in e.msg or 'pytorch' in e.msg:
                print(
                    'Skipping TestTfidfRetriever, missing optional pip packages or pytorch.'
                )
                return

        MODEL_FILE = '/tmp/tmp_test_babi'
        DB_PATH = '/tmp/tmp_test_babi.db'
        TFIDF_PATH = '/tmp/tmp_test_babi.tfidf'
        try:
            parser = ParlaiParser(True, True)
            parser.set_defaults(model='tfidf_retriever',
                                task='babi:task1k:1',
                                model_file=MODEL_FILE,
                                retriever_numworkers=4,
                                retriever_hashsize=2**8,
                                datatype='train:ordered',
                                num_epochs=1)
            opt = parser.parse_args(print_args=False)
            agent = create_agent(opt)
            train_world = create_task(opt, agent)
            # pass examples to dictionary
            while not train_world.epoch_done():
                train_world.parley()

            obs = {
                'text':
                'Mary moved to the bathroom. John went to the hallway. Where is Mary?',
                'episode_done': True
            }
            agent.observe(obs)
            reply = agent.act()
            assert reply['text'] == 'bathroom'

            ANS = 'The one true label.'
            new_example = {
                'text':
                'A bunch of new words that are not in the other task, '
                'which the model should be able to use to identify '
                'this label.',
                'labels': [ANS],
                'episode_done':
                True
            }
            agent.observe(new_example)
            reply = agent.act()
            assert 'text' in reply and reply['text'] == ANS

            new_example.pop('labels')
            agent.observe(new_example)
            reply = agent.act()
            assert reply['text'] == ANS
        finally:
            # clean up files
            if os.path.exists(DB_PATH):
                os.remove(DB_PATH)
            if os.path.exists(TFIDF_PATH + '.npz'):
                os.remove(TFIDF_PATH + '.npz')
Ejemplo n.º 41
0
    if pred != '':
        pred = pred.rstrip(',')
        pred = prefix + 'detected:' + pred
        g.logger.info('Prediction string:{}'.format(pred))
       # g.logger.error (f"Returning THIS IS {obj_json}")
        jos = json.dumps(obj_json)
        g.logger.debug('Prediction string JSON:{}'.format(jos))
        print(pred + '--SPLIT--' + jos)

    # end of matched_file

if g.config['delete_after_analyze'] == 'yes':
    try:
        if filename1:
            os.remove(filename1)
        if filename2:
            os.remove(filename2)
    except Exception as e:
        g.logger.error (f'Could not delete file(s):{e}')

if args.get('notes') and pred:
    # We want to update our DB notes with the detection string
    g.logger.debug ('Updating notes for EID:{}'.format(args.get('eventid')))
    import pyzm.api as zmapi
    api_options = {
            'apiurl': g.config['api_portal'],
            'portalurl': g.config['portal'],
            'user': g.config['user'],
            'password': g.config['password'],
            'logger': g.logger # We connect the API to zmlog 
Ejemplo n.º 42
0
async def handle_ytdl_file_download(e: MessageLike):
    # ytdldfile | format_id | sender_id | suid | is_audio

    data = e.data.decode("UTF-8")
    data = data.split("|")

    if data[2] != str(e.sender_id):
        await e.answer("Not valid user, Dont touch.")
        return
    else:
        await e.answer("Crunching Data.....")

    await e.edit(buttons=None)

    is_audio = False

    path = os.path.join(os.getcwd(), 'userdata', data[3] + ".json")
    if os.path.exists(path):
        with open(path, encoding="UTF-8") as file:
            ytdata = json.loads(file.read())
            yt_url = ytdata.get("webpage_url")
            thumb_path = await get_max_thumb(ytdata, data[3])

            op_dir = os.path.join(os.getcwd(), 'userdata', data[3])
            if not os.path.exists(op_dir):
                os.mkdir(op_dir)
            if data[1].startswith("xxother"):
                data[1] = data[1].replace("xxother", "")
                data[1] = int(data[1])
                j = 0
                for i in ytdata.get("formats"):
                    if j == data[1]:
                        data[1] = i.get("format_id")
                    j += 1
            else:
                for i in ytdata.get("formats"):
                    if i.get("format_id") == data[1]:
                        if i.get("acodec") is not None:
                            is_audio = True

            if data[1].endswith("K"):
                cmd = f"youtube-dl -i --extract-audio --add-metadata --audio-format mp3 --audio-quality {data[1]} -o '{op_dir}/%(title)s.%(ext)s' {yt_url}"

            else:
                if is_audio:
                    cmd = f"youtube-dl --continue --embed-subs --no-warnings --hls-prefer-ffmpeg --prefer-ffmpeg -f {data[1]} -o {op_dir}/%(title)s.%(ext)s {yt_url}"
                else:
                    cmd = f"youtube-dl --continue --embed-subs --no-warnings --hls-prefer-ffmpeg --prefer-ffmpeg -f {data[1]}+bestaudio[ext=m4a]/best -o {op_dir}/%(title)s.%(ext)s {yt_url}"

            out, err = await cli_call(cmd)

            if not err:

                # TODO Fix the original thumbnail
                # rdict = await upload_handel(op_dir,await e.get_message(),e.sender_id,dict(),thumb_path=thumb_path)

                rdict = await upload_handel(op_dir,
                                            await e.get_message(),
                                            e.sender_id,
                                            dict(),
                                            user_msg=e)
                await print_files(e, rdict)

                shutil.rmtree(op_dir)
                os.remove(thumb_path)
                os.remove(path)
            else:
                torlog.error(err)
                omess = await e.get_message()
                omess1 = await omess.get_reply_message()
                if "HTTP Error 429" in err:
                    emsg = "HTTP Error 429: Too many requests try after a while."
                else:
                    emsg = "An error has occured trying to upload any files that are found here."
                await omess.edit(emsg)
                if omess1 is None:
                    await omess.respond(emsg)
                else:
                    await omess1.reply(emsg)

                rdict = await upload_handel(op_dir,
                                            await e.get_message(),
                                            e.sender_id,
                                            dict(),
                                            user_msg=e)
                await print_files(e, rdict)

                try:
                    shutil.rmtree(op_dir)
                    os.remove(thumb_path)
                    os.remove(path)
                except:
                    pass

    else:
        await e.delete()
        await e.answer("Try again something went wrong.", alert=True)
        await e.delete()
Ejemplo n.º 43
0
def remove_package_lock(path):
    package_lock = Path(path, 'package-lock.json')
    print('Checking for package lock')
    if package_lock.exists():
        print(f'removing {package_lock}')
        os.remove(package_lock)
from camcan.datasets import load_camcan_timeseries_rest
from camcan.preprocessing import extract_connectivity

# load connectivity matrices
ATLASES = ['modl256', 'basc197']
CONNECTIVITY_KINDS = ('correlation', 'tangent')
# path for the different kind of connectivity matrices
CAMCAN_TIMESERIES = '/storage/tompouce/okozynet/camcan/timeseries'
OUT_DIR = '/storage/tompouce/okozynet/camcan/connectivity'

for connect_kind in CONNECTIVITY_KINDS:
    out_file = join(OUT_DIR, f'connect_data_{connect_kind}.h5')

    # remove the output file if it exists
    if os.path.exists(out_file):
        os.remove(out_file)

    for sel_atlas in ATLASES:
        print('**************************************************************')
        print(f'Reading timeseries files for {sel_atlas}')

        dataset = load_camcan_timeseries_rest(data_dir=CAMCAN_TIMESERIES,
                                              atlas=sel_atlas)
        connectivities = extract_connectivity(dataset.timeseries,
                                              kind=connect_kind)
        connect_data = None
        subjects = tuple(s[4:] for s in dataset.subject_id)

        for i, s in enumerate(subjects):
            if connect_data is None:
                columns = np.arange(start=0, stop=len(connectivities[i]))
Ejemplo n.º 45
0
      max_iter = iter

pretrain_model = os.path.abspath(pretrain_model)
train_src_param = '--weights="{}" '.format(pretrain_model)
if resume_training:
  if max_iter > 0:
    train_src_param = '--snapshot="{}_iter_{}.solverstate" '.format(snapshot_prefix, max_iter)

if remove_old_models:
  # Remove any snapshots smaller than max_iter.
  for file in os.listdir(snapshot_dir):
    if file.endswith(".solverstate"):
      basename = os.path.splitext(file)[0]
      iter = int(basename.split("{}_iter_".format(model_name))[1])
      if max_iter > iter:
        os.remove("{}/{}".format(snapshot_dir, file))
    if file.endswith(".caffemodel"):
      basename = os.path.splitext(file)[0]
      iter = int(basename.split("{}_iter_".format(model_name))[1])
      if max_iter > iter:
        os.remove("{}/{}".format(snapshot_dir, file))

# Create job file.
with open(job_file, 'w') as f:
  job_dir = os.path.abspath(job_dir)
  f.write('cd {}\n'.format(caffe_root))
  #f.write('Start-Transcript -Path {}\\{}.txt\n'.format(job_dir, model_name))
  f.write('.\\build\\tools\\Release\\caffe.exe train ')
  solver_file = os.path.abspath(solver_file)
  print(solver_file)
  f.write('--solver="{}" '.format(solver_file))
def test_rp_da_scheduler_bw():

    """
    **Purpose**: Run an EnTK application on localhost
    """

    p1 = Pipeline()
    p1.name = 'p1'

    n = 10

    s1 = Stage()
    s1.name = 's1'
    for x in range(n):
        t = Task()
        t.name = 't%s'%x
        t.executable = '/bin/hostname'
        t.arguments = ['>','hostname.txt']
        t.cpu_reqs['processes'] = 1
        t.cpu_reqs['threads_per_process'] = 16
        t.cpu_reqs['thread_type'] = ''
        t.cpu_reqs['process_type'] = ''
        t.lfs_per_process = 10
        t.download_output_data = ['hostname.txt > s1_t%s_hostname.txt'%(x)]

        s1.add_tasks(t)

    p1.add_stages(s1)

    s2 = Stage()
    s2.name = 's2'
    for x in range(n):
        t = Task()
        t.executable = '/bin/hostname'
        t.arguments = ['>','hostname.txt']
        t.cpu_reqs['processes'] = 1
        t.cpu_reqs['threads_per_process'] = 16
        t.cpu_reqs['thread_type'] = ''
        t.cpu_reqs['process_type'] = ''
        t.download_output_data = ['hostname.txt > s2_t%s_hostname.txt'%(x)]
        t.tag = 't%s'%x

        s2.add_tasks(t)


    p1.add_stages(s2)

    res_dict = {
                'resource'      : 'ncsa.bw_aprun',
                'walltime'      : 10,
                'cpus'          : 128,
                'project'       : 'gk4',
                'queue'         : 'high'
            }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()

    for i in range(n):
        assert open('s1_t%s_hostname.txt'%i,'r').readline().strip() == open('s2_t%s_hostname.txt'%i,'r').readline().strip()


    txts = glob('%s/*.txt' % os.getcwd())
    for f in txts:
        os.remove(f)
Ejemplo n.º 47
0
async def telegraphs(graph):
    """For telegraph command, upload media & text to telegraph site."""
    await graph.edit("`Processing...`")
    if not graph.text[0].isalpha() and graph.text[0] not in ("/", "#", "@", "!"):
        if graph.fwd_from:
            return
        if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
            os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
        if graph.reply_to_msg_id:
            start = datetime.now()
            r_message = await graph.get_reply_message()
            input_str = graph.pattern_match.group(1)
            if input_str == "m":
                downloaded_file_name = await bot.download_media(
                    r_message, TEMP_DOWNLOAD_DIRECTORY
                )
                end = datetime.now()
                ms = (end - start).seconds
                await graph.edit(
                    f"**Di Download Ke** `{downloaded_file_name}` **di** `{ms}` **detik.**"
                )
                if downloaded_file_name.endswith(".webp"):
                    resize_image(downloaded_file_name)
                try:
                    media_urls = upload_file(downloaded_file_name)
                except exceptions.TelegraphException as exc:
                    await graph.edit("**ERROR:** " + str(exc))
                    os.remove(downloaded_file_name)
                else:
                    os.remove(downloaded_file_name)
                    await graph.edit(
                        f"**Berhasil diupload ke** [telegra.ph](https://telegra.ph{media_urls[0]})",
                        link_preview=True,
                    )
            elif input_str == "t":
                user_object = await bot.get_entity(r_message.sender_id)
                title_of_page = user_object.first_name  # + " " + user_object.last_name
                # apparently, all Users do not have last_name field
                page_content = r_message.message
                if r_message.media:
                    if page_content != "":
                        title_of_page = page_content
                    downloaded_file_name = await bot.download_media(
                        r_message, TEMP_DOWNLOAD_DIRECTORY
                    )
                    m_list = None
                    with open(downloaded_file_name, "rb") as fd:
                        m_list = fd.readlines()
                    for m in m_list:
                        page_content += m.decode("UTF-8") + "\n"
                    os.remove(downloaded_file_name)
                page_content = page_content.replace("\n", "<br>")
                response = telegraph.create_page(
                    title_of_page, html_content=page_content
                )
                await graph.edit(
                    f'**Berhasil diupload ke** [telegra.ph](https://telegra.ph/{response["path"]})',
                    link_preview=True,
                )
        else:
            await graph.edit(
                "`Mohon Balas Ke Pesan, Untuk Mendapatkan Link Telegraph Permanen.`"
            )
Ejemplo n.º 48
0
    def run_test(self):
        self.log.info(
            "-includeconf works from config file. subversion should end with 'main; relative)/'"
        )

        subversion = self.nodes[0].getnetworkinfo()["subversion"]
        assert subversion.endswith("main; relative)/")

        self.log.info("-includeconf cannot be used as command-line arg")
        self.stop_node(0)
        self.nodes[0].assert_start_raises_init_error(
            extra_args=["-includeconf=relative2.conf"],
            expected_msg=
            "Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf"
        )

        self.log.info(
            "-includeconf cannot be used recursively. subversion should end with 'main; relative)/'"
        )
        with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"),
                  "a",
                  encoding="utf8") as f:
            f.write("includeconf=relative2.conf\n")
        self.start_node(0)

        subversion = self.nodes[0].getnetworkinfo()["subversion"]
        assert subversion.endswith("main; relative)/")
        self.stop_node(
            0,
            expected_stderr=
            "warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf"
        )

        self.log.info("-includeconf cannot contain invalid arg")

        # Commented out as long as we ignore invalid arguments in configuration files
        #with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
        #    f.write("foo=bar\n")
        #self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Invalid configuration value foo")

        self.log.info("-includeconf cannot be invalid path")
        os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
        self.nodes[0].assert_start_raises_init_error(
            expected_msg=
            "Error reading configuration file: Failed to include configuration file relative.conf"
        )

        self.log.info(
            "multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'"
        )
        with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"),
                  "w",
                  encoding="utf8") as f:
            # Restore initial file contents
            f.write("uacomment=relative\n")

        with open(os.path.join(self.options.tmpdir, "node0",
                               "muncatascoin.conf"),
                  "a",
                  encoding='utf8') as f:
            f.write("includeconf=relative2.conf\n")

        self.start_node(0)

        subversion = self.nodes[0].getnetworkinfo()["subversion"]
        assert subversion.endswith("main; relative; relative2)/")
Ejemplo n.º 49
0
def index(request):
    if request.method == "POST":
        package = request.POST.get("package", "")
        timeout = min(force_int(request.POST.get("timeout")), 60 * 60 * 24)
        options = request.POST.get("options", "")
        priority = force_int(request.POST.get("priority"))
        machine = request.POST.get("machine", "")
        gateway = request.POST.get("gateway", None)
        clock = request.POST.get("clock", None)
        custom = request.POST.get("custom", "")
        memory = bool(request.POST.get("memory", False))
        enforce_timeout = bool(request.POST.get("enforce_timeout", False))
        referrer = validate_referrer(request.POST.get("referrer", None))
        tags = request.POST.get("tags", None)

        task_gateways = []
        ipaddy_re = re.compile(
            r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
        )

        if referrer:
            if options:
                options += ","
            options += "referrer=%s" % (referrer)

        if request.POST.get("free"):
            if options:
                options += ","
            options += "free=yes"

        if request.POST.get("nohuman"):
            if options:
                options += ","
            options += "nohuman=yes"

        if request.POST.get("tor"):
            if options:
                options += ","
            options += "tor=yes"

        if request.POST.get("process_memory"):
            if options:
                options += ","
            options += "procmemdump=yes"

        if request.POST.get("kernel_analysis"):
            if options:
                options += ","
            options += "kernel_analysis=yes"

        orig_options = options

        if gateway and gateway.lower() == "all":
            for e in settings.GATEWAYS:
                if ipaddy_re.match(settings.GATEWAYS[e]):
                    task_gateways.append(settings.GATEWAYS[e])
        elif gateway and gateway in settings.GATEWAYS:
            if "," in settings.GATEWAYS[gateway]:
                if request.POST.get("all_gw_in_group"):
                    tgateway = settings.GATEWAYS[gateway].split(",")
                    for e in tgateway:
                        task_gateways.append(settings.GATEWAYS[e])
                else:
                    tgateway = random.choice(
                        settings.GATEWAYS[gateway].split(","))
                    task_gateways.append(settings.GATEWAYS[tgateway])
            else:
                task_gateways.append(settings.GATEWAYS[gateway])

        if not task_gateways:
            # To reduce to the default case
            task_gateways = [None]

        db = Database()
        task_ids = []
        task_machines = []

        if machine.lower() == "all":
            for entry in db.list_machines():
                task_machines.append(entry.label)
        else:
            task_machines.append(machine)

        if "sample" in request.FILES:
            samples = request.FILES.getlist("sample")
            for sample in samples:
                # Error if there was only one submitted sample and it's empty.
                # But if there are multiple and one was empty, just ignore it.
                if not sample.size:
                    if len(samples) != 1:
                        continue

                    return render(request, "error.html",
                                  {"error": "You uploaded an empty file."})
                elif sample.size > settings.MAX_UPLOAD_SIZE:
                    return render(
                        request, "error.html", {
                            "error":
                            "You uploaded a file that exceeds the maximum allowed upload size specified in web/web/local_settings.py."
                        })

                # Moving sample from django temporary file to Cuckoo temporary storage to
                # let it persist between reboot (if user like to configure it in that way).
                path = store_temp_file(sample.read(), sample.name)

                for gw in task_gateways:
                    options = update_options(gw, orig_options)

                    for entry in task_machines:
                        task_ids_new = db.demux_sample_and_add_to_db(
                            file_path=path,
                            package=package,
                            timeout=timeout,
                            options=options,
                            priority=priority,
                            machine=entry,
                            custom=custom,
                            memory=memory,
                            enforce_timeout=enforce_timeout,
                            tags=tags,
                            clock=clock)
                        task_ids.extend(task_ids_new)
        elif "quarantine" in request.FILES:
            samples = request.FILES.getlist("quarantine")
            for sample in samples:
                # Error if there was only one submitted sample and it's empty.
                # But if there are multiple and one was empty, just ignore it.
                if not sample.size:
                    if len(samples) != 1:
                        continue

                    return render(
                        request, "error.html",
                        {"error": "You uploaded an empty quarantine file."})
                elif sample.size > settings.MAX_UPLOAD_SIZE:
                    return render(
                        request, "error.html", {
                            "error":
                            "You uploaded a quarantine file that exceeds the maximum allowed upload size specified in web/web/local_settings.py."
                        })

                # Moving sample from django temporary file to Cuckoo temporary storage to
                # let it persist between reboot (if user like to configure it in that way).
                tmp_path = store_temp_file(sample.read(), sample.name)

                path = unquarantine(tmp_path)
                try:
                    os.remove(tmp_path)
                except:
                    pass

                if not path:
                    return render(request, "error.html", {
                        "error":
                        "You uploaded an unsupported quarantine file."
                    })

                for gw in task_gateways:
                    options = update_options(gw, orig_options)

                    for entry in task_machines:
                        task_ids_new = db.demux_sample_and_add_to_db(
                            file_path=path,
                            package=package,
                            timeout=timeout,
                            options=options,
                            priority=priority,
                            machine=entry,
                            custom=custom,
                            memory=memory,
                            enforce_timeout=enforce_timeout,
                            tags=tags,
                            clock=clock)
                        task_ids.extend(task_ids_new)
        elif "pcap" in request.FILES:
            samples = request.FILES.getlist("pcap")
            for sample in samples:
                if not sample.size:
                    if len(samples) != 1:
                        continue

                    return render(
                        request, "error.html",
                        {"error": "You uploaded an empty PCAP file."})
                elif sample.size > settings.MAX_UPLOAD_SIZE:
                    return render(
                        request, "error.html", {
                            "error":
                            "You uploaded a PCAP file that exceeds the maximum allowed upload size specified in web/web/local_settings.py."
                        })

                # Moving sample from django temporary file to Cuckoo temporary storage to
                # let it persist between reboot (if user like to configure it in that way).
                path = store_temp_file(sample.read(), sample.name)

                if sample.name.lower().endswith(".saz"):
                    saz = saz_to_pcap(path)
                    if saz:
                        try:
                            os.remove(path)
                        except:
                            pass
                        path = saz
                    else:
                        return render(
                            request, "error.html",
                            {"error": "Conversion from SAZ to PCAP failed."})

                task_id = db.add_pcap(file_path=path, priority=priority)
                task_ids.append(task_id)

        elif "url" in request.POST and request.POST.get("url").strip():
            url = request.POST.get("url").strip()
            if not url:
                return render(request, "error.html",
                              {"error": "You specified an invalid URL!"})

            url = url.replace("hxxps://", "https://").replace(
                "hxxp://", "http://").replace("[.]", ".")
            for gw in task_gateways:
                options = update_options(gw, orig_options)

                for entry in task_machines:
                    task_id = db.add_url(url=url,
                                         package=package,
                                         timeout=timeout,
                                         options=options,
                                         priority=priority,
                                         machine=entry,
                                         custom=custom,
                                         memory=memory,
                                         enforce_timeout=enforce_timeout,
                                         tags=tags,
                                         clock=clock)
                    if task_id:
                        task_ids.append(task_id)
        elif settings.VTDL_ENABLED and "vtdl" in request.POST:
            vtdl = request.POST.get("vtdl").strip()
            if (not settings.VTDL_PRIV_KEY
                    and not settings.VTDL_INTEL_KEY) or not settings.VTDL_PATH:
                return render(
                    request, "error.html", {
                        "error":
                        "You specified VirusTotal but must edit the file and specify your VTDL_PRIV_KEY or VTDL_INTEL_KEY variable and VTDL_PATH base directory"
                    })
            else:
                base_dir = tempfile.mkdtemp(prefix='cuckoovtdl',
                                            dir=settings.VTDL_PATH)
                hashlist = []
                if "," in vtdl:
                    hashlist = vtdl.split(",")
                else:
                    hashlist.append(vtdl)
                onesuccess = False

                for h in hashlist:
                    filename = base_dir + "/" + h
                    if settings.VTDL_PRIV_KEY:
                        url = 'https://www.virustotal.com/vtapi/v2/file/download'
                        params = {'apikey': settings.VTDL_PRIV_KEY, 'hash': h}
                    else:
                        url = 'https://www.virustotal.com/intelligence/download/'
                        params = {'apikey': settings.VTDL_INTEL_KEY, 'hash': h}

                    try:
                        r = requests.get(url, params=params, verify=True)
                    except requests.exceptions.RequestException as e:
                        return render(
                            request, "error.html", {
                                "error":
                                "Error completing connection to VirusTotal: {0}"
                                .format(e)
                            })
                    if r.status_code == 200:
                        try:
                            f = open(filename, 'wb')
                            f.write(r.content)
                            f.close()
                        except:
                            return render(
                                request, "error.html", {
                                    "error":
                                    "Error writing VirusTotal download file to temporary path"
                                })

                        onesuccess = True

                        for gw in task_gateways:
                            options = update_options(gw, orig_options)

                            for entry in task_machines:
                                task_ids_new = db.demux_sample_and_add_to_db(
                                    file_path=filename,
                                    package=package,
                                    timeout=timeout,
                                    options=options,
                                    priority=priority,
                                    machine=entry,
                                    custom=custom,
                                    memory=memory,
                                    enforce_timeout=enforce_timeout,
                                    tags=tags,
                                    clock=clock)
                                task_ids.extend(task_ids_new)
                    elif r.status_code == 403:
                        return render(
                            request, "error.html", {
                                "error":
                                "API key provided is not a valid VirusTotal key or is not authorized for VirusTotal downloads"
                            })

                if not onesuccess:
                    return render(
                        request, "error.html",
                        {"error": "Provided hash not found on VirusTotal"})

        tasks_count = len(task_ids)
        if tasks_count > 0:
            return render(request, "submission/complete.html", {
                "tasks": task_ids,
                "tasks_count": tasks_count
            })
        else:
            return render(request, "error.html",
                          {"error": "Error adding task to Cuckoo's database."})
    else:
        enabledconf = dict()
        enabledconf["vt"] = settings.VTDL_ENABLED
        enabledconf["kernel"] = settings.OPT_ZER0M0N
        enabledconf["memory"] = Config("processing").memory.get("enabled")
        enabledconf["procmemory"] = Config("processing").procmemory.get(
            "enabled")
        enabledconf["tor"] = Config("auxiliary").tor.get("enabled")
        if Config("auxiliary").gateways:
            enabledconf["gateways"] = True
        else:
            enabledconf["gateways"] = False
        enabledconf["tags"] = False
        # Get enabled machinery
        machinery = Config("cuckoo").cuckoo.get("machinery")
        # Get VM names for machinery config elements
        vms = [
            x.strip() for x in getattr(Config(machinery), machinery).get(
                "machines").split(",")
        ]
        # Check each VM config element for tags
        for vmtag in vms:
            if "tags" in getattr(Config(machinery), vmtag).keys():
                enabledconf["tags"] = True

        files = os.listdir(
            os.path.join(settings.CUCKOO_PATH, "analyzer", "windows",
                         "modules", "packages"))

        packages = []
        for name in files:
            name = os.path.splitext(name)[0]
            if name == "__init__":
                continue

            packages.append(name)

        # Prepare a list of VM names, description label based on tags.
        machines = []
        for machine in Database().list_machines():
            tags = []
            for tag in machine.tags:
                tags.append(tag.name)

            if tags:
                label = machine.label + ": " + ", ".join(tags)
            else:
                label = machine.label

            machines.append((machine.label, label))

        # Prepend ALL/ANY options.
        machines.insert(0, ("all", "All"))
        machines.insert(1, ("", "First available"))

        return render(
            request, "submission/index.html", {
                "packages": sorted(packages),
                "machines": machines,
                "gateways": settings.GATEWAYS,
                "config": enabledconf
            })
Ejemplo n.º 50
0
def main(args):
    teont_table = pd.read_csv(args.table, sep='\t', header=0, index_col=0)

    teont_dir = None
    if args.teont_dir is None:
        teont_dir = '.'.join(args.table.split('.')[:-2])

    samples = []

    for sample_calls in teont_table['SampleReads']:
        for sample_c in sample_calls.split(','):
            samples.append(sample_c.split('|')[0])

    samples = sorted(list(set(samples)))

    meth_table = dd(dict)
    meth_output = dd(dict)

    assert os.path.exists(teont_dir)

    for uuid in teont_table.index:
        ins = teont_table.loc[uuid]

        cons_fa = teont_dir + '/' + uuid + '.cons.ref.fa'

        if not os.path.exists(cons_fa):
            continue

        cons_dict = load_falib(cons_fa)
        cons_seq = cons_dict[uuid]

        for sample in samples:
            bam_fn = teont_dir + '/' + sample + '.' + uuid + '.te.bam'
            meth_fn = teont_dir + '/' + sample + '.' + uuid + '.te.meth.tsv.gz'

            meth_table[uuid][sample] = [0, 0, 0]  # meth, unmeth, no_call

            if not os.path.exists(bam_fn):
                continue

            if not os.path.exists(meth_fn):
                continue

            chrom = uuid

            h_start, h_end = sorted_unmapped_segments(cons_seq)[
                0]  # defines TE start / end positions in contig

            # get relevant genome chunk to tmp tsv

            meth_tbx = pysam.Tabixfile(meth_fn)

            tmp_methdata = uuid + '.' + sample + '.tmp.methdata.tsv'

            with open(tmp_methdata, 'w') as meth_out:
                # header
                with gzip.open(meth_fn, 'rt') as _:
                    for line in _:
                        assert line.startswith('chromosome')
                        meth_out.write(line)
                        break

                assert chrom in meth_tbx.contigs

                for rec in meth_tbx.fetch(chrom, h_start, h_end):
                    meth_out.write(str(rec) + '\n')

            # index by read_name
            methdata = pd.read_csv(tmp_methdata,
                                   sep='\t',
                                   header=0,
                                   index_col=4)

            if not args.keep_tmp_table:
                os.remove(tmp_methdata)

            # get list of relevant reads
            reads = get_reads(bam_fn,
                              tag_untagged=args.tag_untagged,
                              ignore_tags=args.ignore_tags)

            readnames = []
            for r in reads.keys():
                if r in methdata.index:
                    readnames.append(r)

            methdata = methdata.loc[readnames]

            methreads = {}

            for index, row in methdata.iterrows():
                r_start = row['start']
                r_end = row['end']
                llr = row['log_lik_ratio']
                seq = row['sequence']

                # get per-CG position (nanopolish/calculate_methylation_frequency.py)
                cg_pos = seq.find("CG")
                first_cg_pos = cg_pos
                while cg_pos != -1:
                    cg_start = r_start + cg_pos - first_cg_pos
                    cg_pos = seq.find("CG", cg_pos + 1)

                    cg_elt_start = cg_start - h_start

                    if cg_start >= h_start and cg_start <= h_end:
                        #print (cg_start, cg_elt_start, llr, index)
                        if index not in methreads:
                            methreads[index] = Read(index,
                                                    cg_elt_start,
                                                    llr,
                                                    phase=reads[index],
                                                    cutoff=float(args.cutoff))
                        else:
                            methreads[index].add_cpg(cg_elt_start,
                                                     llr,
                                                     cutoff=float(args.cutoff))

            for name, read in methreads.items():
                for loc in read.llrs.keys():
                    if read.meth_calls[loc] == 1:
                        meth_table[uuid][sample][0] += 1

                    if read.meth_calls[loc] == -1:
                        meth_table[uuid][sample][1] += 1

                    if read.meth_calls[loc] == 0:
                        meth_table[uuid][sample][2] += 1

        meth_output[uuid]['seg_id'] = uuid
        meth_output[uuid]['seg_chrom'] = ins['Chrom']
        meth_output[uuid]['seg_start'] = ins['Start']
        meth_output[uuid]['seg_end'] = ins['End']
        meth_output[uuid]['seg_name'] = ins['Subfamily']
        meth_output[uuid]['seg_strand'] = ins['Strand']

        for sample in samples:
            meth_output[uuid][sample +
                              '_meth_calls'] = meth_table[uuid][sample][0]
            meth_output[uuid][sample +
                              '_unmeth_calls'] = meth_table[uuid][sample][1]
            meth_output[uuid][sample +
                              '_no_calls'] = meth_table[uuid][sample][2]

    col_order = [
        'seg_id', 'seg_chrom', 'seg_start', 'seg_end', 'seg_name', 'seg_strand'
    ]

    for sample in samples:
        col_order += [
            sample + '_meth_calls', sample + '_unmeth_calls',
            sample + '_no_calls'
        ]

    meth_output = pd.DataFrame.from_dict(meth_output).T
    meth_output = meth_output[col_order]

    out_fn = '.'.join(args.table.split('.')[:-1]) + '.nr.segmeth.table.txt'

    meth_output.to_csv(out_fn, sep='\t', index=False)
Ejemplo n.º 51
0
                url += "&heading=" + args.heading
            if args.pitch:
                url += "&pitch=" + args.pitch
            try:
                urlretrieve(url, outfile)
            except KeyboardInterrupt:
                sys.exit("exit")
            if os.path.isfile(outfile):
                print(lat_lon)
                # get_color returns the main color of image
                color = getcolor.get_color(outfile)
                print(color)
                if color[0] == "#e3e2dd" or color[0] == "#e3e2de":
                    print("    No imagery")
                    imagery_misses += 1
                    os.remove(outfile)
                else:
                    print("    ========== Got one! ==========")
                    imagery_hits += 1
                    if imagery_hits == args.images_wanted:
                        break
            if country_hits == MAX_URLS:
                break
except KeyboardInterrupt:
    print("Keyboard interrupt")

print("Attempts:\t", attempts)
print("Country hits:\t", country_hits)
print("Imagery misses:\t", imagery_misses)
print("Imagery hits:\t", imagery_hits)
def MINIMUM_FREQUENCY_CHECKING_NEW_MESSAGES():
  return 300

def MAXIMUM_FREQUENCY_CHECKING_NEW_MESSAGES():
  return 3600

try:
  automatic_checking_is_on_int = int (settings_config[0])
  how_often_to_check_intvar = int (settings_config[1])
  counter = how_often_to_check_intvar 
  how_often_to_check_int = how_often_to_check_intvar
  reset_time_after_manually_check_int = int (settings_config[2])
  confirm_close_application_int = int (settings_config[3])
except:
  try:
    os.remove(FILE_SETTINGS)
    os.execv(sys.executable, ['python'] + sys.argv)
  except:
    os.execv(__file__, sys.argv)
  
found_message_today = False
check_manually_new_communicates = False
last_check_date_and_time = ''
new_communicates = 'No new messages'
def counter_label(label):
  def count():
    global counter
    global check_manually_new_communicates
    counter -= 1
    label.after(1000, count)
    if automatic_checking_is_on_int:
Ejemplo n.º 53
0
    def __spawn_instance(self):
        """
        Create and configure a new KRA instance using pkispawn.
        Creates a configuration file with IPA-specific
        parameters and passes it to the base class to call pkispawn
        """

        self.tmp_agent_db = tempfile.mkdtemp(
                prefix="tmp-", dir=paths.VAR_LIB_IPA)
        tmp_agent_pwd = ipautil.ipa_generate_password()

        # Create a temporary file for the admin PKCS #12 file
        (admin_p12_fd, admin_p12_file) = tempfile.mkstemp()
        os.close(admin_p12_fd)

        cfg = dict(
            pki_issuing_ca_uri="https://{}".format(
                ipautil.format_netloc(self.fqdn, 443)),
            # Client security database
            pki_client_database_dir=self.tmp_agent_db,
            pki_client_database_password=tmp_agent_pwd,
            pki_client_database_purge=True,
            pki_client_pkcs12_password=self.admin_password,
            pki_import_admin_cert=False,
            pki_client_admin_cert_p12=admin_p12_file,
        )

        if not (os.path.isdir(paths.PKI_TOMCAT_ALIAS_DIR) and
                os.path.isfile(paths.PKI_TOMCAT_PASSWORD_CONF)):
            # generate pin which we know can be used for FIPS NSS database
            pki_pin = ipautil.ipa_generate_password()
            cfg['pki_server_database_password'] = pki_pin
        else:
            pki_pin = None

        _p12_tmpfile_handle, p12_tmpfile_name = tempfile.mkstemp(dir=paths.TMP)

        if self.clone:
            krafile = self.pkcs12_info[0]
            shutil.copy(krafile, p12_tmpfile_name)
            pent = pwd.getpwnam(self.service_user)
            os.chown(p12_tmpfile_name, pent.pw_uid, pent.pw_gid)

            self._configure_clone(
                cfg,
                security_domain_hostname=self.fqdn,
                clone_pkcs12_path=p12_tmpfile_name,
            )
            cfg.update(
                pki_clone_setup_replication=False,
            )
        else:
            # the admin cert file is needed for the first instance of KRA
            cert = self.get_admin_cert()
            # First make sure that the directory exists
            parentdir = os.path.dirname(paths.ADMIN_CERT_PATH)
            if not os.path.exists(parentdir):
                os.makedirs(parentdir)
            with open(paths.ADMIN_CERT_PATH, "wb") as admin_path:
                admin_path.write(
                    base64.b64encode(cert.public_bytes(x509.Encoding.DER))
                )

        # Generate configuration file
        pent = pwd.getpwnam(self.service_user)
        config = self._create_spawn_config(cfg)
        with tempfile.NamedTemporaryFile('w', delete=False) as f:
            config.write(f)
            os.fchown(f.fileno(), pent.pw_uid, pent.pw_gid)
            cfg_file = f.name

        nolog_list = [
            self.dm_password, self.admin_password, pki_pin, tmp_agent_pwd
        ]

        try:
            DogtagInstance.spawn_instance(
                self, cfg_file,
                nolog_list=nolog_list
            )
        finally:
            os.remove(p12_tmpfile_name)
            os.remove(cfg_file)
            os.remove(admin_p12_file)

        shutil.move(paths.KRA_BACKUP_KEYS_P12, paths.KRACERT_P12)
        logger.debug("completed creating KRA instance")
async def download_video(v_url):  # sourcery skip: avoid-builtin-shadow
    """ For .ytdl command, download media from YouTube and many other sites. """
    url = v_url.pattern_match.group(2)
    type = v_url.pattern_match.group(1).lower()
    out_folder = Config.TMP_DOWNLOAD_DIRECTORY + "youtubedl/"

    if not os.path.isdir(out_folder):
        os.makedirs(out_folder)

    await v_url.edit("`Preparing to download...`")

    if type == "a":
        opts = {
            'format': 'bestaudio',
            'addmetadata': True,
            'key': 'FFmpegMetadata',
            'writethumbnail': True,
            'embedthumbnail': True,
            'audioquality': 0,
            'audioformat': 'mp3',
            'prefer_ffmpeg': True,
            'geo_bypass': True,
            'nocheckcertificate': True,
            'postprocessors': [{
                'key': 'FFmpegExtractAudio',
                'preferredcodec': 'mp3',
                'preferredquality': '320',
            }],
            'outtmpl': out_folder+'%(title)s.mp3',
            'quiet': True,
            'logtostderr': False
        }
        video = False
        song = True

    elif type == "v":
        opts = {
            'format': 'best',
            'addmetadata': True,
            'key': 'FFmpegMetadata',
            'writethumbnail': True,
            'write_all_thumbnails': True,
            'embedthumbnail': True,
            'prefer_ffmpeg': True,
            'hls_prefer_native': True,
            'geo_bypass': True,
            'nocheckcertificate': True,
            'postprocessors': [{
                'key': 'FFmpegVideoConvertor',
                'preferedformat': 'mp4'
            }],
            'outtmpl': out_folder+'%(title)s.mp4',
            'logtostderr': False,
            'quiet': True
        }
        song = False
        video = True

    try:
        await v_url.edit("`Fetching data, please wait..`")
        with YoutubeDL(opts) as ytdl:
            ytdl_data = await loop.run_in_executor(None, ytdl.extract_info, url)
        filename = sorted(get_lst_of_files(out_folder, []))
    except DownloadError as DE:
        await v_url.edit(f"`{str(DE)}`")
        return
    except ContentTooShortError:
        await v_url.edit("`The download content was too short.`")
        return
    except GeoRestrictedError:
        await v_url.edit(
            "`Video is not available from your geographic location due to geographic restrictions imposed by a website.`"
        )
        return
    except MaxDownloadsReached:
        await v_url.edit("`Max-downloads limit has been reached.`")
        return
    except PostProcessingError:
        await v_url.edit("`There was an error during post processing.`")
        return
    except UnavailableVideoError:
        await v_url.edit("`Media is not available in the requested format.`")
        return
    except XAttrMetadataError as XAME:
        await v_url.edit(f"`{XAME.code}: {XAME.msg}\n{XAME.reason}`")
        return
    except ExtractorError:
        await v_url.edit("`There was an error during info extraction.`")
        return
    except Exception as e:
        await v_url.edit(f"{str(type(e)): {str(e)}}")
        return
    c_time = time.time()

    # cover_url = f"https://img.youtube.com/vi/{ytdl_data['id']}/0.jpg"
    # thumb_path = wget.download(cover_url, out_folder + "cover.jpg")

    # relevant_path = "./DOWNLOADS/youtubedl"
    # included_extensions = ["mp4","mp3"]
    # file_names = [fn for fn in os.listdir(relevant_path)
    #             if any(fn.endswith(ext) for ext in included_extensions)]

    if song:
        relevant_path = "./DOWNLOADS/youtubedl"
        included_extensions = ["mp3"]
        file_names = [fn for fn in os.listdir(relevant_path)
                      if any(fn.endswith(ext) for ext in included_extensions)]
        img_extensions = ["webp", "jpg", "jpeg"]
        img_filenames = [fn_img for fn_img in os.listdir(relevant_path) if any(
            fn_img.endswith(ext_img) for ext_img in img_extensions)]
        thumb_image = out_folder + img_filenames[0]

        # thumb = out_folder + "cover.jpg"
        file_path = out_folder + file_names[0]
        song_size = file_size(file_path)
        j = await v_url.edit(f"`Preparing to upload song:`\
        \n**{ytdl_data['title']}**\
        \nby *{ytdl_data['uploader']}*")
        await v_url.client.send_file(
            v_url.chat_id,
            file_path,
            caption=ytdl_data['title'] + "\n" + f"`{song_size}`",
            supports_streaming=True,
            thumb=thumb_image,
            attributes=[
                DocumentAttributeAudio(duration=int(ytdl_data['duration']),
                                       title=str(ytdl_data['title']),
                                       performer=str(ytdl_data['uploader']))
            ],
            progress_callback=lambda d, t: asyncio.get_event_loop(
            ).create_task(
                progress(d, t, v_url, c_time, "Uploading..",
                         f"{ytdl_data['title']}.mp3")))
        # os.remove(file_path)
        await asyncio.sleep(DELETE_TIMEOUT)
        os.remove(thumb_image)
        await j.delete()

    elif video:
        relevant_path = "./DOWNLOADS/youtubedl/"
        included_extensions = ["mp4"]
        file_names = [fn for fn in os.listdir(relevant_path)
                      if any(fn.endswith(ext) for ext in included_extensions)]
        img_extensions = ["webp", "jpg", "jpeg"]
        img_filenames = [fn_img for fn_img in os.listdir(relevant_path) if any(
            fn_img.endswith(ext_img) for ext_img in img_extensions)]
        thumb_image = out_folder + img_filenames[0]

        file_path = out_folder + file_names[0]
        video_size = file_size(file_path)
        # thumb = out_folder + "cover.jpg"

        j = await v_url.edit(f"`Preparing to upload video:`\
        \n**{ytdl_data['title']}**\
        \nby *{ytdl_data['uploader']}*")
        await v_url.client.send_file(
            v_url.chat_id,
            file_path,
            supports_streaming=True,
            caption=ytdl_data['title'] + "\n" + f"`{video_size}`",
            thumb=thumb_image,
            progress_callback=lambda d, t: asyncio.get_event_loop(
            ).create_task(
                progress(d, t, v_url, c_time, "Uploading..",
                         f"{ytdl_data['title']}.mp4")))
        os.remove(file_path)
        await asyncio.sleep(DELETE_TIMEOUT)
        os.remove(thumb_image)
        await v_url.delete()
        await j.delete()
    shutil.rmtree(out_folder)
Ejemplo n.º 55
0
def update():
    '''
    Execute an svn update on all of the repos
    '''
    # data for the fileserver event
    data = {'changed': False,
            'backend': 'svnfs'}
    pid = os.getpid()
    data['changed'] = purge_cache()
    for repo in init():
        lk_fn = os.path.join(repo['repo'], 'update.lk')
        with salt.utils.fopen(lk_fn, 'w+') as fp_:
            fp_.write(str(pid))
        old_rev = _rev(repo)
        try:
            CLIENT.update(repo['repo'])
        except pysvn._pysvn.ClientError as exc:
            log.error(
                'Error updating svnfs remote {0} (cachedir: {1}): {2}'
                .format(repo['url'], repo['cachedir'], exc)
            )
        try:
            os.remove(lk_fn)
        except (OSError, IOError):
            pass

        new_rev = _rev(repo)
        if any((x is None for x in (old_rev, new_rev))):
            # There were problems getting the revision ID
            continue
        if new_rev != old_rev:
            data['changed'] = True

    env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
    if data.get('changed', False) is True or not os.path.isfile(env_cache):
        env_cachedir = os.path.dirname(env_cache)
        if not os.path.exists(env_cachedir):
            os.makedirs(env_cachedir)
        new_envs = envs(ignore_cache=True)
        serial = salt.payload.Serial(__opts__)
        with salt.utils.fopen(env_cache, 'w+') as fp_:
            fp_.write(serial.dumps(new_envs))
            log.trace('Wrote env cache data to {0}'.format(env_cache))

    # if there is a change, fire an event
    if __opts__.get('fileserver_events', False):
        event = salt.utils.event.get_event(
                'master',
                __opts__['sock_dir'],
                __opts__['transport'],
                opts=__opts__,
                listen=False)
        event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver'))
    try:
        salt.fileserver.reap_fileserver_cache_dir(
            os.path.join(__opts__['cachedir'], 'svnfs/hash'),
            find_file
        )
    except (IOError, OSError):
        # Hash file won't exist if no files have yet been served up
        pass
Ejemplo n.º 56
0
    def do_GET(self):
        logger.debug("do_GET: path=%s", self.path)
        if self.path == '/':
            if not self.server.conn.is_authorized():
                logger.debug("Not authorized yet, redir to HV")
                # Start by redirecting user to HealthVault to authorize us
                record_id = None
                if os.path.exists("RECORD_ID"):
                    with open("RECORD_ID", "r") as f:
                        record_id = f.read()
                url = self.server.conn.authorization_url(
                    '%s/authtoken' % BASE_URL, record_id)
                self.send_response(307)
                self.send_header("Location", url)
                self.end_headers()
                return
            self.show_data()
            return

        if self.path == '/submit':
            self.send_response(200)
            self.end_headers()
            return

        if self.path.startswith('/authtoken?'):
            # This is the redirect after the user has authed us
            # the params include the wctoken we'll be using from here on for this user's data
            logger.debug("Handling /authtoken...")
            o = urlparse(self.path)
            query = parse_qs(o.query)
            target = query['target'][0]
            if target == 'AppAuthReject':
                logger.debug('reject')
                self.send_response(200)
                self.end_headers()
                self.wfile.write("Auth was rejected (by the user?)")
                return
            if target not in ('AppAuthSuccess', 'SelectedRecordChanged'):
                logger.debug('no idea')
                self.send_response(200)
                self.end_headers()
                self.wfile.write("Unexpected authtoken target=%s\n" % target)
                self.wfile.write(self.path)
                return
            if not 'wctoken' in query:
                logger.debug('no wctoken given')
                self.send_response(200)
                self.end_headers()
                self.wfile.write("No WCTOKEN in query: %s" % self.path)
                self.wfile.close()
                return
            logger.debug("looks like we got a wctoken to use")
            try:
                self.set_wctoken(query['wctoken'][0])
            except HealthVaultException:
                logger.exception(
                    "Something went wrong trying to use the token")
                if os.path.exists("WCTOKEN"):
                    os.remove("WCTOKEN")
                self.send_response(307)
                self.send_header("Location", "/")
                self.end_headers()
                return

            logger.debug("Got token okay, redir to /")
            # Now redirect to / again
            self.send_response(307)
            self.send_header("Location", "/")
            self.end_headers()
            return

        # Tired of seeing errors for this one
        if self.path == '/favicon.ico':
            self.send_response(200)
            self.end_headers()
            return

        # We get here for any URL we don't recognize
        # Let's do an actual 404
        self.send_response(404)
        self.end_headers()
        self.wfile.write("Unknown URL: %r" % self.path)
        return
Ejemplo n.º 57
0
def main(argv):
    parser = argparse.ArgumentParser()
    required = parser.add_argument_group('required arguments')
    required.add_argument('-a', '--arch', type=str, 
            default=None, help='architecture to run jit-format on')
    required.add_argument('-o', '--os', type=str,
            default=None, help='operating system')
    required.add_argument('-c', '--coreclr', type=str,
            default=None, help='full path to coreclr')

    args, unknown = parser.parse_known_args(argv)

    if unknown:
        print('Ignorning argument(s): ', ','.join(unknown))

    if args.coreclr is None:
        print('Specify --coreclr')
        return -1
    if args.os is None:
        print('Specifiy --os')
        return -1
    if args.arch is None:
        print('Specify --arch')
        return -1

    if not os.path.isdir(expandPath(args.coreclr)):
        print('Bad path to coreclr')
        return -1

    coreclr = args.coreclr
    platform = args.os
    arch = args.arch

    my_env = os.environ

    # Download .Net CLI

    dotnetcliUrl = ""
    dotnetcliFilename = ""

    # build.cmd removes the Tools directory, so we need to put our version of jitutils
    # outside of the Tools directory

    dotnetcliPath = os.path.join(coreclr, 'dotnetcli-jitutils')

    # Try to make the dotnetcli-jitutils directory if it doesn't exist

    try:
        os.makedirs(dotnetcliPath)
    except OSError:
        if not os.path.isdir(dotnetcliPath):
            raise

    print("Downloading .Net CLI")
    if platform == 'Linux':
        dotnetcliUrl = "https://go.microsoft.com/fwlink/?LinkID=809129"
        dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.tar.gz')
    elif platform == 'OSX':
        dotnetcliUrl = "https://go.microsoft.com/fwlink/?LinkID=809128"
        dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.tar.gz')
    elif platform == 'Windows_NT':
        dotnetcliUrl = "https://go.microsoft.com/fwlink/?LinkID=809126"
        dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.zip')
    else:
        print('Unknown os ', os)
        return -1

    response = urllib2.urlopen(dotnetcliUrl)
    request_url = response.geturl()
    testfile = urllib.URLopener()
    testfile.retrieve(request_url, dotnetcliFilename)

    if not os.path.isfile(dotnetcliFilename):
        print("Did not download .Net CLI!")
        return -1

    # Install .Net CLI

    if platform == 'Linux' or platform == 'OSX':
        tar = tarfile.open(dotnetcliFilename)
        tar.extractall(dotnetcliPath)
        tar.close()
    elif platform == 'Windows_NT':
        with zipfile.ZipFile(dotnetcliFilename, "r") as z:
            z.extractall(dotnetcliPath)

    dotnet = ""
    if platform == 'Linux' or platform == 'OSX':
        dotnet = "dotnet"
    elif platform == 'Windows_NT':
        dotnet = "dotnet.exe"


    if not os.path.isfile(os.path.join(dotnetcliPath, dotnet)):
        print("Did not extract .Net CLI from download")
        return -1

    # Download bootstrap

    bootstrapFilename = ""

    jitUtilsPath = os.path.join(coreclr, "jitutils")

    if os.path.isdir(jitUtilsPath):
        print("Deleting " + jitUtilsPath)
        shutil.rmtree(jitUtilsPath, onerror=del_rw)

    if platform == 'Linux' or platform == 'OSX':
        bootstrapFilename = "bootstrap.sh"
    elif platform == 'Windows_NT':
        bootstrapFilename = "bootstrap.cmd"

    bootstrapUrl = "https://raw.githubusercontent.com/dotnet/jitutils/master/" + bootstrapFilename

    bootstrapPath = os.path.join(coreclr, bootstrapFilename)
    testfile.retrieve(bootstrapUrl, bootstrapPath)

    if not os.path.isfile(bootstrapPath):
        print("Did not download bootstrap!")
        return -1

    # On *nix platforms, we need to make the bootstrap file executable

    if platform == 'Linux' or platform == 'OSX':
        print("Making bootstrap executable")
        os.chmod(bootstrapPath, 0751)

    print(bootstrapPath)

    # Run bootstrap

    my_env["PATH"] += os.pathsep + dotnetcliPath
    if platform == 'Linux' or platform == 'OSX':
        print("Running bootstrap")
        proc = subprocess.Popen(['bash', bootstrapPath], env=my_env)
        output,error = proc.communicate()
    elif platform == 'Windows_NT':
        proc = subprocess.Popen([bootstrapPath], env=my_env)
        output,error = proc.communicate()

    # Run jit-format

    returncode = 0
    jitutilsBin = os.path.join(coreclr, "jitutils", "bin")
    my_env["PATH"] += os.pathsep + jitutilsBin
    current_dir = os.getcwd()

    if not os.path.isdir(jitutilsBin):
        print("Jitutils not built!")
        return -1

    jitformat = jitutilsBin

    if platform == 'Linux' or platform == 'OSX':
        jitformat = os.path.join(jitformat, "jit-format")
    elif platform == 'Windows_NT':
        jitformat = os.path.join(jitformat,"jit-format.bat")
    errorMessage = ""

    builds = ["Checked", "Debug", "Release"]
    projects = ["dll", "standalone", "crossgen"]

    for build in builds:
        for project in projects:
            proc = subprocess.Popen([jitformat, "-a", arch, "-b", build, "-o", platform, "-c", coreclr, "--verbose", "--projects", project], env=my_env)
            output,error = proc.communicate()
            errorcode = proc.returncode

            if errorcode != 0:
                errorMessage += "\tjit-format -a " + arch + " -b " + build + " -o " + platform
                errorMessage += " -c <absolute-path-to-coreclr> --verbose --fix --projects " + project +"\n"
                returncode = errorcode

                # Fix mode doesn't return an error, so we have to run the build, then run with
                # --fix to generate the patch. This means that it is likely only the first run
                # of jit-format will return a formatting failure.
                if errorcode == -2:
                    # If errorcode was -2, no need to run clang-tidy again
                    proc = subprocess.Popen([jitformat, "--fix", "--untidy", "-a", arch, "-b", build, "-o", platform, "-c", coreclr, "--verbose", "--projects", project], env=my_env)
                    output,error = proc.communicate()
                else:
                    # Otherwise, must run both
                    proc = subprocess.Popen([jitformat, "--fix", "-a", arch, "-b", build, "-o", platform, "-c", coreclr, "--verbose", "--projects", project], env=my_env)
                    output,error = proc.communicate()

    os.chdir(current_dir)

    if returncode != 0:
        # Create a patch file
        patchFile = open("format.patch", "w")
        proc = subprocess.Popen(["git", "diff", "--patch", "-U20"], env=my_env, stdout=patchFile)
        output,error = proc.communicate()

    if os.path.isdir(jitUtilsPath):
        print("Deleting " + jitUtilsPath)
        shutil.rmtree(jitUtilsPath, onerror=del_rw)

    if os.path.isdir(dotnetcliPath):
        print("Deleting " + dotnetcliPath)
        shutil.rmtree(dotnetcliPath, onerror=del_rw)

    if os.path.isfile(bootstrapPath):
        print("Deleting " + bootstrapPath)
        os.remove(bootstrapPath)

    if returncode != 0:
        buildUrl = my_env["BUILD_URL"]
        print("There were errors in formatting. Please run jit-format locally with: \n")
        print(errorMessage)
        print("\nOr download and apply generated patch:")
        print("wget " + buildUrl + "artifact/format.patch")
        print("git apply format.patch")

    return returncode
Ejemplo n.º 58
0
def _BuildStagingDirectory(source_dir, staging_dir, bucket_ref,
                           excluded_regexes):
    """Creates a staging directory to be uploaded to Google Cloud Storage.

  The staging directory will contain a symlink for each file in the original
  directory. The source is a file whose name is the sha1 hash of the original
  file and points to the original file.

  Consider the following original structure:
    app/
      main.py
      tools/
        foo.py
   Assume main.py has SHA1 hash 123 and foo.py has SHA1 hash 456. The resultant
   staging directory will look like:
     /tmp/staging/
       123 -> app/main.py
       456 -> app/tools/foo.py
   (Note: "->" denotes a symlink)

   If the staging directory is then copied to a GCS bucket at
   gs://staging-bucket/ then the resulting manifest will be:
     {
       "app/main.py": {
         "sourceUrl": "https://storage.googleapis.com/staging-bucket/123",
         "sha1Sum": "123"
       },
       "app/tools/foo.py": {
         "sourceUrl": "https://storage.googleapis.com/staging-bucket/456",
         "sha1Sum": "456"
       }
     }

  Args:
    source_dir: The original directory containing the application's source
      code.
    staging_dir: The directory where the staged files will be created.
    bucket_ref: A reference to the GCS bucket where the files will be uploaded.
    excluded_regexes: List of file patterns to skip while building the staging
      directory.

  Raises:
    LargeFileError: if one of the files to upload exceeds the maximum App Engine
    file size.

  Returns:
    A dictionary which represents the file manifest.
  """
    manifest = {}
    bucket_url = bucket_ref.GetPublicUrl()

    def AddFileToManifest(manifest_path, input_path):
        """Adds the given file to the current manifest.

    Args:
      manifest_path: The path to the file as it will be stored in the manifest.
      input_path: The location of the file to be added to the manifest.
    Returns:
      If the target was already in the manifest with different contexts,
      returns None. In all other cases, returns a target location to which the
      caller must copy, move, or link the file.
    """
        file_ext = os.path.splitext(input_path)[1]
        sha1_hash = file_utils.Checksum().AddFileContents(
            input_path).HexDigest()

        target_filename = sha1_hash + file_ext
        target_path = os.path.join(staging_dir, target_filename)

        dest_path = '/'.join([bucket_url, target_filename])
        old_url = manifest.get(manifest_path, {}).get('sourceUrl', '')
        if old_url and old_url != dest_path:
            return None
        manifest[manifest_path] = {
            'sourceUrl': dest_path,
            'sha1Sum': sha1_hash,
        }
        return target_path

    for relative_path in util.FileIterator(source_dir, excluded_regexes):
        local_path = os.path.join(source_dir, relative_path)
        size = os.path.getsize(local_path)
        if size > _MAX_FILE_SIZE:
            raise LargeFileError(local_path, size, _MAX_FILE_SIZE)
        target_path = AddFileToManifest(relative_path, local_path)
        if not os.path.exists(target_path):
            _CopyOrSymlink(local_path, target_path)

    context_files = context_util.CreateContextFiles(staging_dir,
                                                    None,
                                                    overwrite=True,
                                                    source_dir=source_dir)
    for context_file in context_files:
        manifest_path = os.path.basename(context_file)
        target_path = AddFileToManifest(manifest_path, context_file)
        if not target_path:
            log.status.Print(
                'Not generating {0} because a user-generated '
                'file with the same name exists.'.format(manifest_path))
        if not target_path or os.path.exists(target_path):
            # If we get here, it probably means that the user already generated the
            # context file manually and put it either in the top directory or in some
            # subdirectory. The new context file is useless and may confuse later
            # stages of the upload (it is in the staging directory with a
            # nonconformant name), so delete it. The entry in the manifest will point
            # at the existing file.
            os.remove(context_file)
        else:
            # Rename the source-context*.json file (which is in the staging directory)
            # to the hash-based name in the same directory.
            os.rename(context_file, target_path)

    log.debug('Generated deployment manifest: "{0}"'.format(
        json.dumps(manifest, indent=2, sort_keys=True)))
    return manifest
Ejemplo n.º 59
0
def del_rw(action, name, exc):
    os.chmod(name, 0651)
    os.remove(name)
Ejemplo n.º 60
0
def delete(file):
    return os.remove(file)