Example #1
0
    def test_download_tarball(self):
        """ download_tarball should download the tarball found at
        get_tarball_url, to get_target_directory """

        mock_resource = self.mox.CreateMockAnything()
        mock_file = self.mox.CreateMockAnything()
        mock_http_message = self.mox.CreateMockAnything()
        mock_contents = self.mox.CreateMockAnything()

        self.mox.StubOutWithMock(Updater, "get_tarball_url")
        self.mox.StubOutWithMock(urllib2, "urlopen")
        self.mox.StubOutWithMock(__builtin__, "open")
        self.mox.StubOutWithMock(os.path, "isdir")

        os.path.isdir(self.__update_dir).AndReturn(True)

        Updater.get_tarball_url(self.__repo).AndReturn(self.__tarball_url)
        urllib2.urlopen(self.__tarball_url).AndReturn(mock_resource)

        mock_resource.info().AndReturn(mock_http_message)
        mock_http_message.get("Content-Disposition").AndReturn(self.__content_string)

        __builtin__.open(self.__target_filename, "wb").AndReturn(mock_file)
        mock_file.__enter__().AndReturn(mock_file)
        mock_file.write(mock_resource.read().AndReturn(mock_contents))
        mock_file.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())

        self.mox.ReplayAll()

        updater = Updater(repo = self.__repo, update_dir = self.__update_dir)
        self.assertEquals(self.__target_filename, updater.download_tarball())
Example #2
0
    def test_download_tarball_download_directory_does_not_exist(self):
        """ if the folder in which the tarball is to be stored does not exist,
        it should be created. """

        mock_resource = self.mox.CreateMockAnything()
        mock_file = self.mox.CreateMockAnything()
        mock_http_message = self.mox.CreateMockAnything()
        mock_contents = self.mox.CreateMockAnything()

        self.mox.StubOutWithMock(Updater, "get_tarball_url")
        self.mox.StubOutWithMock(urllib2, "urlopen")
        self.mox.StubOutWithMock(__builtin__, "open")
        self.mox.StubOutWithMock(os.path, "isdir")
        self.mox.StubOutWithMock(os, "mkdir")

        os.path.isdir(self.__update_dir).AndReturn(False)
        os.mkdir(self.__update_dir)

        Updater.get_tarball_url(self.__repo).AndReturn(self.__tarball_url)
        urllib2.urlopen(self.__tarball_url).AndReturn(mock_resource)

        mock_resource.info().AndReturn(mock_http_message)
        mock_http_message.get("Content-Disposition").AndReturn(self.__content_string)

        __builtin__.open(self.__target_filename, "wb").AndReturn(mock_file)
        mock_file.__enter__().AndReturn(mock_file)
        mock_file.write(mock_resource.read().AndReturn(mock_contents))
        mock_file.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())

        self.mox.ReplayAll()

        updater = Updater(repo = self.__repo, update_dir = self.__update_dir)
        self.assertEquals(self.__target_filename, updater.download_tarball())
Example #3
0
    def test_read_modified_cached_file(self):
        self.mox.StubOutWithMock(os.path, "getmtime")
        self.mox.StubOutWithMock(__builtin__, 'open')
        os.path.getmtime(mox.IgnoreArg()).AndReturn(2)

        fake_contents = "lorem ipsum"
        fake_file = self.mox.CreateMockAnything()
        fake_file.read().AndReturn(fake_contents)
        fake_context_manager = self.mox.CreateMockAnything()
        fake_context_manager.__enter__().AndReturn(fake_file)
        fake_context_manager.__exit__(mox.IgnoreArg(),
                                      mox.IgnoreArg(),
                                      mox.IgnoreArg())

        __builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager)

        self.mox.ReplayAll()
        cache_data = {"data": 1123, "mtime": 1}
        self.reload_called = False

        def test_reload(reloaded_data):
            self.assertEqual(reloaded_data, fake_contents)
            self.reload_called = True

        data = utils.read_cached_file("/this/is/a/fake", cache_data,
                                                reload_func=test_reload)
        self.mox.UnsetStubs()
        self.assertEqual(data, fake_contents)
        self.assertTrue(self.reload_called)
Example #4
0
def main():
    import argparse
    import os
    parser = argparse.ArgumentParser(description='Compress or decompress like lzop')
    parser.add_argument('-d', '--decompress', dest='decompress', action='store_true')
    #parser.add_argument('-t', '--test', dest='test', action='store_true')
    parser.add_argument('path')
    args = parser.parse_args()

    filename = os.path.basename(args.path)
    if args.decompress:
        with LzoFile(filename = args.path) as f:
            name, ext = os.path.splitext(filename)
            if ext == '.lzo':
                de_name = name
            else:
                de_name = filename + '.uncompressed'

            with __builtin__.open(de_name, 'wb') as de:
                de.write(f.read())

    else:
        with __builtin__.open(args.path, 'rb') as f:
            with LzoFile(filename = args.path + ".lzo", mode = 'wb') as com:
                com.write(f.read())
def create_crossvalidation_sets():
    if not os.path.exists('./cross_validation'):
        os.makedirs('./cross_validation')
    for i in range(1,8):
        list_train_cv = []
        list_test_cv = []
        print type(list_test_cv)
        print type(list_test_cv)
        if not os.path.exists('./cross_validation/cv_set_'+str(i)):
            os.makedirs('./cross_validation/cv_set_'+str(i))
        for j in range(1,8):
            if i == j:
                with open('./train/train_partition_'+str(i)+'.tsv', 'r') as file:
                    for line in file:
                        list_test_cv.append(line)

                with open('./cross_validation/cv_set_'+str(i)+'/test.tsv', 'w') as test_file:
                    for line in list_test_cv:
                        list_line = line.strip().split('\t')
                        test_file.write(str(list_line[0]+'\t'+'\t'+list_line[1]+'\n'))


            else:
                with open('./train/train_partition_'+str(j)+'.tsv', 'r') as file:
                    for line in file:
                        list_train_cv.append(line)

        with open('./cross_validation/cv_set_'+str(i)+'/train.tsv', 'w') as train_file:
            for line in list_train_cv:
                train_file.write(line)
 def test_write_to_file_failure(self):
     self.mox.StubOutWithMock(__builtin__, 'open')
     __builtin__.open(mox.IgnoreArg(), 'w').AndRaise(
         exception.FileNotFound(fakes.ERRORMSG))
     self.mox.ReplayAll()
     fh = ovzfile.OVZFile(fakes.TEMPFILE, 755)
     self.assertRaises(exception.FileNotFound, fh.write)
Example #7
0
 def test_file_handle(self):
     # test wave loader
     f = DATA_PATH + '/sample.wav'
     # open file handle
     file_handle = __builtin__.open(f)
     signal, sample_rate = load_audio_file(file_handle)
     self.assertIsInstance(signal, np.ndarray)
     self.assertTrue(signal.dtype == np.int16)
     self.assertTrue(type(sample_rate) == int)
     file_handle.close()
     # closed file handle
     signal, sample_rate = load_audio_file(file_handle)
     self.assertIsInstance(signal, np.ndarray)
     self.assertTrue(signal.dtype == np.int16)
     self.assertTrue(type(sample_rate) == int)
     # test ffmpeg loader
     f = DATA_PATH + '/stereo_sample.flac'
     # open file handle
     file_handle = __builtin__.open(f)
     signal, sample_rate = load_audio_file(file_handle)
     self.assertIsInstance(signal, np.ndarray)
     self.assertTrue(signal.dtype == np.int16)
     self.assertTrue(type(sample_rate) == float)
     file_handle.close()
     # closed file handle
     signal, sample_rate = load_audio_file(file_handle)
     self.assertIsInstance(signal, np.ndarray)
     self.assertTrue(signal.dtype == np.int16)
     self.assertTrue(type(sample_rate) == float)
def write_test_file(list_test_data):
    if not os.path.exists('./test'):
        os.makedirs('./test')

    f = open('./test/testing_file_with_tags.tsv', 'w')
    try:

        # for i in range(10):
        #     writer.writerow( (i+1, chr(ord('a') + i), '08/%02d/07' % (i+1)) )
        for list_sentence in list_test_data:
            for word_tag in list_sentence:
                print word_tag[0], word_tag[1]
                f.write(str(word_tag[0]+'\t'+'\t'+word_tag[1]+'\n'))
    finally:
        f.close()

    f = open('./test/testing_file_without_tags.tsv', 'w')
    try:
        for list_sentence in list_test_data:
            for word_tag in list_sentence:
                print word_tag[0]
                f.write(word_tag[0]+'\n')
    finally:
        f.close()

    print open('./test/testing_file_without_tags.tsv', 'r').read()
Example #9
0
def _test():
    args = sys.argv[1:]
    decompress = args and args[0] == '-d'
    if decompress:
        args = args[1:]
    if not args:
        args = ['-']
    for arg in args:
        if decompress:
            if arg == '-':
                f = GzipFile(filename='', mode='rb', fileobj=sys.stdin)
                g = sys.stdout
            else:
                if arg[-3:] != '.gz':
                    print "filename doesn't end in .gz:", repr(arg)
                    continue
                f = open(arg, 'rb')
                g = __builtin__.open(arg[:-3], 'wb')
        elif arg == '-':
            f = sys.stdin
            g = GzipFile(filename='', mode='wb', fileobj=sys.stdout)
        else:
            f = __builtin__.open(arg, 'rb')
            g = open(arg + '.gz', 'wb')
        while True:
            chunk = f.read(1024)
            if not chunk:
                break
            g.write(chunk)

        if g is not sys.stdout:
            g.close()
        if f is not sys.stdin:
            f.close()
Example #10
0
    def update_config(self, cwd=None):
        """
        Update the configuration files according to the current
        in-memory SExtractor configuration.
        """
        if cwd is None: cwd = '.'

        # -- Write filter configuration file

        # First check the filter itself

        filter = self.config['FILTER_MASK']
        rows = len(filter)
        cols = len(filter[0])   # May raise ValueError, OK

        filter_f = __builtin__.open(cwd + os.sep + self.config['FILTER_NAME'], 'w')
        filter_f.write("CONV NORM\n")
        filter_f.write("# %dx%d Generated from sextractor.py module.\n" %
                       (rows, cols))
        for row in filter:
            filter_f.write(" ".join(map(repr, row)))
            filter_f.write("\n")
            
        filter_f.close()

        # -- Write parameter list file

        parameters_f = __builtin__.open(cwd + os.sep + self.config['PARAMETERS_NAME'], 'w')
        for parameter in self.config['PARAMETERS_LIST']:
            print >>parameters_f, parameter

        parameters_f.close()

        # -- Write NNW configuration file

        nnw_f = __builtin__.open(cwd + os.sep + self.config['STARNNW_NAME'], 'w')
        nnw_f.write(nnw_config)
        nnw_f.close()


        # -- Write main configuration file

        main_f = __builtin__.open(cwd + os.sep + self.config['CONFIG_FILE'], 'w')

        for key in self.config.keys():
            if (key in SExtractor._SE_config_special_keys):
                continue
            value = self.config[key]
            if (type(value) in [list,tuple]):
                value = ", ".join(map(str, self.config[key]))
            elif type(value) == np.ndarray:
                value = ", ".join(map(str, self.config[key].tolist()))
            else:
                value = str(self.config[key])
            
            print >>main_f, ("%-16s       %-16s # %s" %
                             (key, value, SExtractor._SE_config[key]['comment']))

        main_f.close()
Example #11
0
def _test():
    # Act like gzip; with -d, act like gunzip; with -D, act like dictzip
    # The input file is not deleted, however, nor are any other gzip
    # options or features supported.
    args = sys.argv[1:]
    decompress = args and args[0] == "-d"
    dictzip = args and args[0] == "-D"
    if decompress or dictzip:
        args = args[1:]
    if not args:
        args = ["-"]
    for arg in args:
        if decompress:
            if arg == "-":
                f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
                g = sys.stdout
            else:
                if arg[-3:] != ".gz" and arg[-3:] != ".dz":
                    print "filename doesn't end in .gz or .dz:", repr(arg)
                    continue
                f = open(arg, "rb")
                g = __builtin__.open(arg[:-3], "wb")
        else:
            if dictzip:
                buffersize = 1000000
                ext = ".dz"
            else:
                buffersize = None
                ext = ".gz"
            if arg == "-":
                f = sys.stdin
                g = GzipFile(
                    filename="", mode="wb", fileobj=sys.stdout, chunksize=1000, buffersize=buffersize)
            else:
                f = __builtin__.open(arg, "rb")
                g = open(
                    arg + ext, "wb", chunksize=1000, buffersize=buffersize)
        blocksize = 1024
        if False:
            while True:
                chunk = f.read(blocksize)
                if not chunk:
                    break
                g.write(chunk)
        else:
            # test the random access code
            ptr = 0
            while True:
                f.seek(0)
                f.seek(ptr)
                chunk = f.read(blocksize)
                if not chunk:
                    break
                g.write(chunk)
                ptr += blocksize
        if g is not sys.stdout:
            g.close()
        if f is not sys.stdin:
            f.close()
Example #12
0
def mv_content(fro, to):
    txt = open(fro).read()
    (head, tail) = os.path.split(fro)
    name = "%s/%s" % (to, tail)
    f = open(name, 'w')
    f.write(txt)
    f.close()
    return name
Example #13
0
    def update_config(self):
        """
        Update the configuration files according to the current
        in-memory SExtractor configuration.
        """

        # -- Write filter configuration file

        # First check the filter itself

        filter = self.config['FILTER_MASK']
        rows = len(filter)
        cols = len(filter[0])   # May raise ValueError, OK

        filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w')
        filter_f.write("CONV NORM\n")
        filter_f.write("# %dx%d Generated from sextractor.py module.\n" %
                       (rows, cols))
        for row in filter:
            filter_f.write(" ".join(map(repr, row)))
            filter_f.write("\n")
            
        filter_f.close()

        # -- Write parameter list file

        parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w')
        for parameter in self.config['PARAMETERS_LIST']:
            print >>parameters_f, parameter

        parameters_f.close()

        # -- Write NNW configuration file

        nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w')
        nnw_f.write(nnw_config)
        nnw_f.close()


        # -- Write main configuration file

        main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w')

        for key in self.config.keys():
            if (key in SExtractor._SE_config_special_keys):
                continue

            if (key == "PHOT_AUTOPARAMS"): # tuple instead of a single value
                value = ",".join(map(str, self.config[key]))
            else:
                value = str(self.config[key])
            
            
            print >>main_f, ("%-16s       %-16s # %s" %
                             (key, value, SExtractor._SE_config[key]['comment']))

        main_f.close()
Example #14
0
    def __init__(self, directory):

        """Loads a new test case stored in the given directory."""        

        global cmdLineArchitectures
        global extraCompileFlags

        self.testExtraCompileFlags = extraCompileFlags
        self.description = ""
        self.architectures = []
        self.directory = directory
        if directory.startswith('./'):
            self.title = directory[2:]
        else:
            self.title = directory

        descriptionFile = __builtin__.open(directory + '/description.txt', 'r')
        self.description = descriptionFile.read().strip()
        descriptionFile.close()

        if os.access(directory + '/architectures.lst', R_OK):
            archFile = __builtin__.open(directory + '/architectures.lst')
            for line in archFile.read().splitlines():
                arch = line.strip()
                if arch != '':
                    self.architectures.append(arch)
            archFile.close()
        elif len(cmdLineArchitectures) > 0:
            self.architectures = cmdLineArchitectures            
        else:
            self.architectures = allArchitectures

        if os.access(directory + '/extraCompileFlags', R_OK):
            flagsFile = __builtin__.open(directory + '/extraCompileFlags')
            for line in flagsFile.read().splitlines():
                param = line.strip()
                if param != '':
                    self.testExtraCompileFlags += ' ';
                    self.testExtraCompileFlags += param;
                    self.testExtraCompileFlags += ' ';

        self.improvedRuns = False
        # Simulation results for each architecture (the verification data and the 
        # cycle count).
        self.results = {}      
        # Simulation stats for each architecture.
        self.stats = {}  
        self.loadOldResults()
        self.setupExecuted = False
        self.parallelPrograms = []
        
        # If requested. stores the results for each architecture in LaTeX format
        self.latexResults = {}
        self.latexColumnCount = 0

        self.seqCycleCount = -1
Example #15
0
def open_bam(input):
    if isinstance(input, basestring):
       return subprocess.Popen(["samtools", "view", "-h", input],
            stdout=subprocess.PIPE,
            stderr=__builtin__.open("/dev/null", 'w'),
            close_fds=True).stdout
    return subprocess.Popen(["samtools", "view", "-h", "-"],
            stdout=subprocess.PIPE,
            stdin=input,
            stderr=__builtin__.open("/dev/null", 'w'),
            close_fds=True).stdout
Example #16
0
 def test_ovz_network_interfaces_add_success(self):
     self.mox.StubOutWithMock(openvz_conn.OVZNetworkFile, "append")
     openvz_conn.OVZNetworkFile.append(mox.IgnoreArg()).MultipleTimes()
     self.mox.StubOutWithMock(openvz_conn.OVZNetworkFile, "write")
     openvz_conn.OVZNetworkFile.write().MultipleTimes()
     self.mox.StubOutWithMock(__builtin__, "open")
     __builtin__.open(mox.IgnoreArg()).AndReturn(StringIO(NETTEMPLATE))
     ifaces = openvz_conn.OVZNetworkInterfaces(INTERFACEINFO)
     self.mox.StubOutWithMock(ifaces, "_add_netif")
     ifaces._add_netif(INTERFACEINFO[0]["id"], mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes()
     self.mox.ReplayAll()
     ifaces.add()
Example #17
0
File: sgzip.py Project: kball/ambry
    def compress():

        with  __builtin__.open (uc_fn,'rb') as f_in:
            with  __builtin__.open (cpm_fn,'wb') as f_out:
                gz_out = GzipFile(f_out, mode='wb', 
                        extra = ['this is extra',{'foo':'bar'}], comment='This is the comment')
                while True:
                    chunk=f_in.read(10)
                    if not chunk:
                        break
                    gz_out.write(chunk)
                gz_out.close()
Example #18
0
 def znajdzBiblioteke(self, library, package):
     libPath = str(self.dialogMAIN.razenBiblioteki.text()).split(';')
     
     for lib in libPath:
         try:
             plikPart = __builtin__.open(os.path.join(os.path.join(os.path.join(lib, library), package), 'design.rzp'), "r")  # main lib file
             plikPart = json.load(plikPart)
             plikPart = __builtin__.open(os.path.join(os.path.join(os.path.join(lib, library), package), plikPart['layout']), "r")  # footprints file
             return json.load(plikPart)
         except:
             continue
     return False
Example #19
0
 def put_source(self, localpath, source):
     """Put the source obtained from the given url to cache.
     
        Use a custom makedirs() to overcome dev_appserver restrictions.
     """
     localpath = os.path.abspath(localpath)
     dirname = os.path.dirname(localpath)
     log.debug("put_source: trying to create %s" % dirname)
     try:
         os.makedirs(dirname)
     except:
         pass
     log.debug("put_source: trying to write %s" % localpath)
     __builtin__.open(localpath, "w").write(source)
     log.debug("put_source: wrote %s" % localpath)
Example #20
0
def isfile(path):
    """Mock `isfile` to check existence of test files.

    :Parameters:
        path : `str`
            File to check for existence
    :rtype: `bool`
    :return: `True` if file exists, `False` otherwise
    """
    filename = os.path.basename(path)
    try:
        __builtin__.open(os.path.join(BASEDIR, 'data', filename))
    except IOError:
        return False
    return True
Example #21
0
    def _shelve_safe_open(self, filename, flag='c', protocol=None, writeback=False, block=True, lckfilename=None):
        """Open the sheve file, createing a lockfile at filename.lck.  If
        block is False then a IOError will be raised if the lock cannot
        be acquired"""
        if lckfilename == None:
            lckfilename = filename + ".lck"
        lckfile = __builtin__.open(lckfilename, 'w')

        # Accquire the lock
        if flag == 'r':
            lockflags = LOCK_SH
        else:
            lockflags = LOCK_EX
        if not block:
            lockflags = LOCK_NB
        fcntl.flock(lckfile.fileno(), lockflags)

        # Open the shelf
        shelf = shelve.open(filename, flag, protocol, writeback)

        # Override close
        shelf.close = new.instancemethod(self._shelve_safe_close, shelf, shelve.Shelf)
        shelf.lckfile = lckfile

        # And return it
        return shelf
Example #22
0
 def __init__(self, filename=None, mode="r", fileobj=None, max_cache=100):
     #TODO - Assuming we can seek, check for 28 bytes EOF empty block
     #and if missing warn about possible truncation (as in samtools)?
     if max_cache < 1:
         raise ValueError("Use max_cache with a minimum of 1")
     #Must open the BGZF file in binary mode, but we may want to
     #treat the contents as either text or binary (unicode or
     #bytes under Python 3)
     if fileobj:
         assert filename is None
         handle = fileobj
         assert "b" in handle.mode.lower()
     else:
         if "w" in mode.lower() \
         or "a" in mode.lower():
             raise ValueError("Must use read mode (default), not write or append mode")
         handle = __builtin__.open(filename, "rb")
     self._text = "b" not in mode.lower()
     if self._text:
         self._newline = "\n"
     else:
         self._newline = b"\n"
     self._handle = handle
     self.max_cache = max_cache
     self._buffers = {}
     self._block_start_offset = None
     self._block_raw_length = None
     self._load_block(handle.tell())
Example #23
0
def open(filename):
    # FIXME: modify to return a WalImageFile instance instead of
    # plain Image object ?

    if hasattr(filename, "read"):
        fp = filename
    else:
        import __builtin__
        fp = __builtin__.open(filename, "rb")

    # read header fields
    header = fp.read(32+24+32+12)
    size = i32(header, 32), i32(header, 36)
    offset = i32(header, 40)

    # load pixel data
    fp.seek(offset)

    im = Image.fromstring("P", size, fp.read(size[0] * size[1]))
    im.putpalette(quake2palette)

    im.format = "WAL"
    im.format_description = "Quake2 Texture"

    # strings are null-terminated
    im.info["name"] = header[:32].split("\0", 1)[0]
    next_name = header[56:56+32].split("\0", 1)[0]
    if next_name:
        im.info["next_name"] = next_name

    return im
Example #24
0
    def __init__(self, f):
        self.pipe = None

        if sys.platform == 'win32':
            f = f.encode('CP932')
            startupinfo = sp.STARTUPINFO()
            startupinfo.dwFlags |= sp.STARTF_USESHOWWINDOW
        else:
            f = f.encode('UTF-8')
            startupinfo = None

        self.nul_file = __builtin__.open(os.devnull, 'w')

        ext = get_ext(f)

        '''
        acodec = get_encoder(ext)

        if not acodec:
            raise Exception('Not Found Encoder')

        args = [ffmpeg_bin, '-y', '-f', 's16le', '-acodec', 'pcm_s16le',
            '-ar', '44100', '-ac','2', '-i', '-',
            '-vn', '-acodec', acodec, '-ab', '128k', f]
        '''

        args = [ffmpeg_bin, '-y', '-f', 's16le', '-acodec', 'pcm_s16le',
            '-ar', '44100', '-ac','2', '-i', '-',
            '-vn', '-ab', '128k', f]

        self.pipe = sp.Popen(args,
            stdin=sp.PIPE, stdout=sp.PIPE, stderr=self.nul_file,
            startupinfo=startupinfo)
Example #25
0
File: sgzip.py Project: kball/ambry
    def decompress():

        with  __builtin__.open (cpm_fn,'rb') as f_in:
            with  __builtin__.open (dcpm_fn,'wb') as f_out:
                gz_in = GzipFile( flo(f_in), mode='rb')

                while True:
                    chunk=gz_in.read(10)
                    if not chunk:
                        break
                    f_out.write(chunk)
                gz_in.close()

                print 'Extra:', gz_in.extra
                print 'Extra:', gz_in.extra[1]['foo']
                print 'Comment:', gz_in.comment
Example #26
0
def setProjectFile(filename):
    projektBRD = __builtin__.open(filename, "r").read()[1:]
    wynik = ''
    licznik = 0
    txt = ''
    start = 0
    #
    txt_1 = 0

    for i in projektBRD:
        if i in ['"', "'"] and txt_1 == 0:
            txt_1 = 1
        elif i in ['"', "'"] and txt_1 == 1:
            txt_1 = 0
        
        if txt_1 == 0:
            if i == '(':
                licznik += 1
                start = 1
            elif i == ')':
                licznik -= 1
        
        txt += i
        
        if licznik == 0 and start == 1:
            wynik += '[start]' + txt.strip() + '[stop]'
            txt = ''
            start = 0
    
    return wynik
Example #27
0
 def __init__(self, filename = None, mode = None, compresslevel = 9, fileobj = None, mtime = None):
     if mode and 'b' not in mode:
         mode += 'b'
     if fileobj is None:
         fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
     if filename is None:
         if hasattr(fileobj, 'name'):
             filename = fileobj.name
         else:
             filename = ''
     if mode is None:
         if hasattr(fileobj, 'mode'):
             mode = fileobj.mode
         else:
             mode = 'rb'
     if mode[0:1] == 'r':
         self.mode = READ
         self._new_member = True
         self.extrabuf = ''
         self.extrasize = 0
         self.extrastart = 0
         self.name = filename
         self.min_readsize = 100
     elif mode[0:1] == 'w' or mode[0:1] == 'a':
         self.mode = WRITE
         self._init_write(filename)
         self.compress = zlib.compressobj(compresslevel, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
     else:
         raise IOError, 'Mode ' + mode + ' not supported'
     self.fileobj = fileobj
     self.offset = 0
     self.mtime = mtime
     if self.mode == WRITE:
         self._write_gzip_header()
Example #28
0
 def __init__(self, f):
     self._i_opened_the_file = None
     if type(f) == type(''):
         f = __builtin__.open(f, 'rb')
         self._i_opened_the_file = f
     # else, assume it is an open file object already
     self.initfp(f)
Example #29
0
 def __init__(self, filename=None, mode="w", fileobj=None, compresslevel=6):
     if fileobj:
         assert filename is None
         handle = fileobj
     else:
         if "w" not in mode.lower() \
         and "a" not in mode.lower():
             raise ValueError("Must use write or append mode, not %r" % mode)
         if "a" in mode.lower():
             handle = __builtin__.open(filename, "ab")
         else:
             handle = __builtin__.open(filename, "wb")
     self._text = "b" not in mode.lower()
     self._handle = handle
     self._buffer = b""
     self.compresslevel = compresslevel
Example #30
0
 def _isinstalled(browser):
     script = '''
            get version of application "%s"
            ''' % browser
     f = __builtin__.open(os.devnull, 'w')
     rc = subprocess.call("osascript -e '%s'" % script, shell=True, stdout=f, stderr=f)
     return not rc
Example #31
0
 def setProject(self, filename):
     '''Load project from file'''
     self.projektBRD = __builtin__.open(filename,
                                        "r").read().replace('\r\n', '\n')
     self.layers = re.findall(r'\(SIGNAL T=.+? P=.+? L=(.+?)\)',
                              self.projektBRD)
Example #32
0
def open( fp, mode = 'rb', start = None, shutdown = None ):
  assert 'rb' == mode
  with __builtin__.open( fp, mode ) as oFile:
    oReader = CReaderParadox( oFile.read() )
  oDb = CDatabase()

  ##  Common header.
  oDb.recordSize = oReader.read( '<H' )
  oDb.headerSize = oReader.read( '<H' )
  oDb.fileType = oReader.read( '<B' )
  if oDb.fileType not in [ 0, 2 ]:
    raise Error( MSG_ERR_FILE.format( fp ) )
  oDb.maxTableSize = oReader.read( '<B' )
  if oDb.maxTableSize not in range( 1, 32 + 1 ):
    raise Error( MSG_ERR_FILE.format( fp ) )
  oDb.recordsCount = oReader.read( '<I' )
  oReader.read( '<H' ) # Next block.
  oReader.read( '<H' ) # File blocks.
  oReader.read( '<H' ) # First block.
  oReader.read( '<H' ) # Last block.
  oReader.read( '<H' ) # Unknown.
  oReader.read( '<B' ) # Rebuild flag.
  oReader.read( '<B' ) # Index field number.
  oReader.read( '<I' ) # Primary index pointer.
  oReader.read( '<I' ) # Unknown.
  oReader.readArray( 3 ) # Unknown.
  oDb.fieldsCount = oReader.read( '<H' )
  oReader.read( '<H' ) # Primary key fields.
  oReader.read( '<I' ) # Encryption.
  oDb.sortOrder = oReader.read( '<B' )
  oReader.read( '<B' ) # Rebuild flag.
  oReader.read( '<H' ) # Unknown.
  oReader.read( '<B' ) # Change count.
  oReader.read( '<B' ) # Unknown.
  oReader.read( '<B' ) # Unknown.
  oReader.read( '<I' ) # ** table name.
  oReader.read( '<I' ) # * list of field identifiers.
  ABOUT = { 0: False, 1: True }
  nData = oReader.read( '<B' )
  if nData not in ABOUT:
    raise Error( MSG_ERR_FILE.format( fp ) )
  oDb.writeProtected = ABOUT[ nData ]
  oDb.versionCommon = oReader.read( '<B' )
  oReader.read( '<H' ) # Unknown.
  oReader.read( '<B' ) # Unknown.
  nAuxiliaryPassCount = oReader.read( '<B' )
  if 0 != nAuxiliaryPassCount:
    raise Error( MSG_ERR_ENCRYPTION )
  oReader.read( '<H' ) # Unknown.
  nCryptInfoFieldPtr = oReader.read( '<I' )
  if 0 != nCryptInfoFieldPtr:
    raise Error( MSG_ERR_ENCRYPTION )
  oReader.read( '<I' ) # * crypt info field end.
  oReader.read( '<B' ) # Unknown.
  oDb.nextAutoInc = oReader.read( '<I' )
  oReader.read( '<H' ) # Unknown.
  oReader.read( '<B' ) # Index update flag.
  oReader.readArray( 5 ) # Unknown.
  oReader.read( '<B' ) # Unknown.
  oReader.read( '<H' ) # Unknown.

  ##  4+ data file header (and pyparadox reads only data files).
  oDb.versionData = oReader.read( '<H' )
  nData = oReader.read( '<H' )
  if nData != oDb.versionData:
    raise Error( MSG_ERR_FILE.format( fp ) )
  oReader.read( '<I' ) # Unknown.
  oReader.read( '<I' ) # Unknown.
  oReader.read( '<H' ) # Unknown.
  oReader.read( '<H' ) # Unknown.
  oReader.read( '<H' ) # Unknown.
  oDb.codepage = oReader.read( '<H' )
  oReader.read( '<I' ) # Unknown.
  oReader.read( '<H' ) # Unknown.
  oReader.readArray( 6 ) # Unknown.

  ##  Fields
  for i in range( oDb.fieldsCount ):
    oField = CField()
    oField.type = oReader.read( '<B' )
    oField.size = oReader.read( '<B' )
    oDb.fields.append( oField )

  oReader.read( '<I' ) # Table name pointer.
  oReader.readArray( oDb.fieldsCount * 4 ) # Field name pointers.

  ##  Table name as original file name with extension. Padded with zeroes.
  sTableName = ""
  while True:
    nChar = oReader.read( '<B', f_dontmove = True )
    if 0 == nChar:
      break
    sTableName += chr( oReader.read( '<B' ) )
  while True:
    nChar = oReader.read( '<B', f_dontmove = True )
    if 0 != nChar:
      break
    oReader.read( '<B' )
  oDb.tableName = sTableName

  ##  Field names.
  for oField in oDb.fields:
    oField.name = oReader.readStr()
  if len( oDb.fields ) != oDb.fieldsCount:
    raise Error( MSG_ERR_FILE.format( fp ) )

  oReader.readArray( oDb.fieldsCount * 2 ) # Field numbers.
  oDb.sortOrderTxt = oReader.readStr()

  ##  Data blocks starts at |header_size| offset.
  oReader.push( oDb.headerSize )

  if start != None and oDb.fields[ 0 ].type != CField.AUTOINCREMENT:
    raise Error( MSG_ERR_INCREMENTAL )

  ##  Records.
  nRemaining = oReader.size() - oReader.offset()
  nBlockSize = oDb.maxTableSize * 1024
  nBlocks = nRemaining // nBlockSize
  nOffsetStart = oReader.offset()
  if 0 != nRemaining % nBlockSize:
    raise Error( MSG_ERR_FILE.format( fp ) )
  ##  Read blocks from end so we can pick new autoincrement fields fast.
  for nBlock in range( nBlocks - 1, -1, -1 ):
    oReader.push( nOffsetStart + nBlock * nBlockSize )
    oReader.read( '<H' ) # Unknown.
    oReader.read( '<H' ) # Block number.
    ##  Amount of data in additional to one record.
    nAddDataSize = oReader.read( '<h' )
    ##  Negative if block don't have records.
    if nAddDataSize >= 0:
      nRecords = nAddDataSize / oDb.recordSize + 1
      ##  Read records in block from end so we pick newest first.
      for nRecord in range( nRecords - 1, -1, -1 ):
        oReader.push( oReader.offset() + nRecord * oDb.recordSize )
        oRecord = CRecord()
        for i, oField in enumerate( oDb.fields ):
          ##  Converting big database from start may take long time, external
          ##  shutdown can abort this process.
          if hasattr( shutdown, 'is_set' ) and shutdown.is_set():
            raise Shutdown()
          uVal = oReader.readField( oField )
          ##  Incremental mode, first field is autoincrement.
          if start != None and 0 == i:
            ##  All done while reading from the end?
            if uVal < start:
              return oDb
          oRecord.fields.append( uVal )
        oDb.records.insert( 0, oRecord )
        oReader.pop()
    oReader.pop()
  if len( oDb.records ) != oDb.recordsCount:
    raise Error( MSG_ERR_FILE.format( fp ) )

  return oDb
Example #33
0
    def __init__(self, filename=None, mode=None,
                 compresslevel=9, fileobj=None, mtime=None):
        """Constructor for the GzipFile class.

        At least one of fileobj and filename must be given a
        non-trivial value.

        The new class instance is based on fileobj, which can be a regular
        file, a StringIO object, or any other object which simulates a file.
        It defaults to None, in which case filename is opened to provide
        a file object.

        When fileobj is not None, the filename argument is only used to be
        included in the gzip file header, which may includes the original
        filename of the uncompressed file.  It defaults to the filename of
        fileobj, if discernible; otherwise, it defaults to the empty string,
        and in this case the original filename is not included in the header.

        The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
        depending on whether the file will be read or written.  The default
        is the mode of fileobj if discernible; otherwise, the default is 'rb'.
        Be aware that only the 'rb', 'ab', and 'wb' values should be used
        for cross-platform portability.

        The compresslevel argument is an integer from 0 to 9 controlling the
        level of compression; 1 is fastest and produces the least compression,
        and 9 is slowest and produces the most compression. 0 is no compression
        at all. The default is 9.

        The mtime argument is an optional numeric timestamp to be written
        to the stream when compressing.  All gzip compressed streams
        are required to contain a timestamp.  If omitted or None, the
        current time is used.  This module ignores the timestamp when
        decompressing; however, some programs, such as gunzip, make use
        of it.  The format of the timestamp is the same as that of the
        return value of time.time() and of the st_mtime member of the
        object returned by os.stat().

        """

        # Make sure we don't inadvertently enable universal newlines on the
        # underlying file object - in read mode, this causes data corruption.
        if mode:
            mode = mode.replace('U', '')
        # guarantee the file is opened in binary mode on platforms
        # that care about that sort of thing
        if mode and 'b' not in mode:
            mode += 'b'
        if fileobj is None:
            fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
        if filename is None:
            # Issue #13781: os.fdopen() creates a fileobj with a bogus name
            # attribute. Avoid saving this in the gzip header's filename field.
            if hasattr(fileobj, 'name') and fileobj.name != '<fdopen>':
                filename = fileobj.name
            else:
                filename = ''
        if mode is None:
            if hasattr(fileobj, 'mode'): mode = fileobj.mode
            else: mode = 'rb'

        if mode[0:1] == 'r':
            self.mode = READ
            # Set flag indicating start of a new member
            self._new_member = True
            # Buffer data read from gzip file. extrastart is offset in
            # stream where buffer starts. extrasize is number of
            # bytes remaining in buffer from current stream position.
            self.extrabuf = ""
            self.extrasize = 0
            self.extrastart = 0
            self.name = filename
            # Starts small, scales exponentially
            self.min_readsize = 100

        elif mode[0:1] == 'w' or mode[0:1] == 'a':
            self.mode = WRITE
            self._init_write(filename)
            self.compress = zlib.compressobj(compresslevel,
                                             zlib.DEFLATED,
                                             -zlib.MAX_WBITS,
                                             zlib.DEF_MEM_LEVEL,
                                             0)
        else:
            raise IOError, "Mode " + mode + " not supported"

        self.fileobj = fileobj
        self.offset = 0
        self.mtime = mtime

        if self.mode == WRITE:
            self._write_gzip_header()
Example #34
0
    def __init__(self,
                 filename=None,
                 mode=None,
                 fileobj=None,
                 version=None,
                 file_headers={},
                 compress=None):
        """
        Initialises a file like object that can be used to read or
        write Arc files. Works for both version 1 or version 2.

        This can be called similar to the builtin `file` constructor. 

        It can also just be given a fileobj which is a file like
        object that it will use directly for its work.

        The file_headers should contain the following fields used to
        create the header for the file. The exact fields used depends
        on whether v1 or v2 files are being created. If a read is
        done, the headers will be autopopulated from the first record.

           * ip_address - IP address of the machine doing the Archiving
           * date - Date of archival
           * org - Organisation that's doing the Archiving. 

        The version parameter tries to work intuitively as follows

            * If version is set to 'n' (where n is 1 or 2), the
              library configures itself to read and write version n
              ARC files.

                  * When we try to write a record, it will generate
                    and write a version n record.

                  * When we try to read a record, it will attempt to
                    parse it as a version n record and will error out
                    if the format is different.

            * If the version is unspecified, the library will
              configures itself as follows

                  * When we try to write a record, it will generate
                    and write a version 2 record.

                  * When we try to read a record, it will read out one
                    record and try to guess the version from it (for
                    the first read).
        
        """
        if fileobj is None:
            fileobj = __builtin__.open(filename, mode or "rb")

        if compress is None and filename and filename.endswith(".gz"):
            compress = True

        if compress:
            fileobj = gzip2.GzipFile(fileobj=fileobj, mode=mode)

        self.fileobj = fileobj

        if version != None and int(version) not in (1, 2):
            raise TypeError("ARC version has to be 1 or 2")
        self.version = version
        self.file_headers = file_headers
        self.header_written = False
        self.header_read = False
Example #35
0
def open(file, new=2):
    import __builtin__
    f = __builtin__.open(file, 'r')
    print f.read()
    f.close()
Example #36
0
 def __init__(self, f):
     if type(f) == type(''):
         f = __builtin__.open(f, 'rb')
     # else, assume it is an open file object already
     self.initfp(f)
Example #37
0
def open(path):
    f = __builtin__.open(path, 'r')
    return UpperCaser(f)
Example #38
0
 def __init__(self, path):
     """
     Opens the blockfile.
     """
     self.f = __builtin__.open(path)
Example #39
0
"""Stuff to parse AIFF-C and AIFF files.
Example #40
0
 def __init__(self, f):
     self._i_opened_the_file = None
     if type(f) == type(''):
         f = __builtin__.open(f, 'wb')
         self._i_opened_the_file = f
     self.initfp(f)
Example #41
0
 def open(self, name, mode):
     import __builtin__
     return self.fileopen(__builtin__.open(name, mode))
Example #42
0
def open(name, mode):
    return builtins.open(path(name), mode)
Example #43
0
def open(file = None):
    if file == None: file = getURL()
    if file.startswith("http://") or file.startswith("https://"):
        return urllib2.urlopen(file)
    else: return __builtin__.open(file)
Example #44
0
def open(filename):
    for path in sys.path:
        path = os.path.join(path, filename)
        if os.path.exists(path):
            return __builtin__.open(path)
    raise IOError('File not found: ' + filename)
Example #45
0
def python2_style():
    # In Python 3, `__builtin__` has no special meaning.
    from __builtin__ import open  #$ use=moduleImport("__builtin__").getMember("open")
    open("hello.txt"
         )  #$ use=moduleImport("__builtin__").getMember("open").getReturn()
Example #46
0
def save(elf, file):

    fp = __builtin__.open(file, 'wb')
    fp.write(elf.save())
    fp.close()
    os.chmod(file, 0755)
Example #47
0
    def simulate(self, archFilename, progFilename):

        global compiledSimulation

        # Create the simulation script.
        simulationScript = ""

        if not archFilename == "":
            simulationScript += "mach " + archFilename + "\n"

        simulationScript = simulationScript + "prog " + progFilename + "\n"

        if access("simulate.ttasim", R_OK):
            script = __builtin__.open("simulate.ttasim", "r")
            simulationScript = simulationScript + script.read()
        else:
            simulationScript = simulationScript + "until 0\n"

        tryRemove("cyclecount")
        tryRemove('operations_executed')
        tryRemove('registers_written')
        tryRemove('registers_read')
        # Create a script that creates the stat files. Do not overwrite
        # possible existing files to allow the custom simulation
        # script to create a stats of its own (for example, in case of
        # wanting to include only a part of the simulated program in the stats)

        simulationScript = simulationScript + '''
if ![file exists cyclecount] {
set cycle_file [open cyclecount w] ; puts $cycle_file "[info proc cycles]"
flush $cycle_file
close $cycle_file
}
'''
        if not compiledSimulation:
            simulationScript += '''
		if ![file exists operations_executed] {
		set f [open operations_executed w] ; puts $f "[info stats executed_operations]"
		flush $f
		close $f
		}
		
		if ![file exists registers_written] {
		set f [open registers_written w] ; puts $f "[info stats register_writes]"
		flush $f
		close $f
		}
		
		if ![file exists registers_read] {
		set f [open registers_read w] ; puts $f "[info stats register_reads]"
		flush $f
		close $f
		}
		'''
        simulationScript = simulationScript + "quit\n"

        simulationCommand = simulatorExe

        if compiledSimulation:
            simulationCommand += " -q"

        exitOk, stdoutContents, stderrContents = runWithTimeout(
            simulationCommand, simulationTimeoutSec, simulationScript)

        if not exitOk:
            self.testFailed("simulation timeout")
            return False

        self.simStdOut = stdoutContents
        self.simStdErr = stderrContents

        verbose = ""
        if len(self.simStdOut) > 0:
            verbose = verbose + "stdout: "
            verbose = verbose + self.simStdOut

        if len(self.simStdErr) > 0:
            verbose = verbose + "stderr: "
            verbose = verbose + self.simStdErr
            self.testFailed("simulation error, stderr: " + self.simStdErr)
            return False

        def getStat(fileName):
            if access(fileName, R_OK):
                f = __builtin__.open(fileName, "r")
                try:
                    return float(f.read().strip())
                except:
                    pass
            else:
                return None

        self.lastStats = SimulationStats()
        self.lastStats.cycleCount = getStat('cyclecount')

        if self.lastStats.cycleCount is None:
            self.testFailed("simulation",
                            "failed to get cycle count " + verbose)
            return False

        if not exitOk:

            gotOutput = len(self.simStdOut) > 0 or len(self.simStdErr) > 0

            self.testFailed("simulation", verbose)
            return False

        self.lastStats.operationExecutions = getStat('operations_executed')
        self.lastStats.registerReads = getStat('registers_read')
        self.lastStats.registerWrites = getStat('registers_written')

        self.stats[archFilename] = self.lastStats
        return True
Example #48
0
File: fits.py Project: jbroll/imrpn
    def __init__(self, fp, primary=True, cards=None):

        self.card = []
        self.file = fp
        self.shape = []
        self.bitpix = 0
        self.head = {
            "GCOUNT": [0, 1],
            "PCOUNT": [0, 0],
            "BSCALE": [0, 1],
            "BZERO": [0, 1]
        }

        if type(fp) == str:
            fp = __builtin__.open(fp, "rb")

        if hasattr(fp, 'read'):
            try:
                self.hoff = fp.tell()
            except:
                self.hoff = 0

            card = fp.read(80)

            if len(card) == 0: raise EOF
            if len(card) != 80: raise BadEOF

            if card[0:8] == "SIMPLE  ": pass
            elif card[0:8] == "XTENSION": pass
            else: raise Exception("This doesn't appear to be a FITS file")

            self.card.append(card)

            while 1:
                card = fp.read(80)

                if card[0:8] == "END     ": break

                self.card.append(card)

                if card[8] == "=":
                    name = card[0:8].strip()
                    valu = card[10:]

            while self.card[-1] == "":  # Delete blank cards after END
                self.card.pop[-1]

            self.ncard = len(self.card) + 1
            self.headbloks = ((self.ncard * 80) + (2880 - 1)) / 2880
            self.headbytes = self.headbloks * 2880

            if len(self.card) % 80 != 0:
                try:
                    fp.seek(self.hoff + self.headbytes, 0)
                except:
                    fp.read(self.headbytes - (len(self.card) + 1) * 80)

            try:
                self.doff = fp.tell()
            except:
                self.doff = 0

        elif isinstance(fp, numpy.ndarray):

            if primary:
                self.card.append(fmtcard("SIMPLE", True))
            else:
                self.card.append(fmtcard("XTENSION", "IMAGE"))

            self.card.append(fmtcard("BITPIX", dtype2bitpix[str(fp.dtype)]))

            naxis = fp.shape
            if len(naxis) == 1:  # Force NAXIS >= 2
                naxis = [naxis[0], 1]

            self.card.append(fmtcard("NAXIS", len(naxis)))

            #for i, j in enumerate(range(len(naxis)-1, -1, -1)) :
            for i in range(0, len(naxis)):
                axis = "NAXIS" + str(i + 1)
                self.card.append(fmtcard(axis, naxis[i]))

            self.ncard = len(self.card) + 1
            self.headbloks = ((self.ncard * 80) + (2880 - 1)) / 2880
            self.headbytes = self.headbloks * 2880

        else:
            raise Huh

        for i, card in enumerate(
                self.card):  # Hash card in head for easy lookup
            try:
                name, value, comment = parcard(card)
                self.head[name] = (i, value)
            except:
                pass

        if cards != None:  # Mix in extra cards from user.
            if isinstance(cards, header):
                cards = cards.card

            if type(cards) == list:
                for card in cards:
                    name, value, comment = parcard(card)

                    if not name in self.head:
                        self.head[name] = (len(self.card), value)
                        self.card.append(card)

            elif type(cards) == dict:
                for name in cards.keys():
                    if not name in self.head:
                        self.head[name] = (len(self.card), cards[name])
                        self.card.append(fmtcard(name, cards[name], None))

            self.ncard = len(self.card) + 1
            self.headbloks = ((self.ncard * 80) + (2880 - 1)) / 2880
            self.headbytes = self.headbloks * 2880

        self.bscale = float(self.head["BSCALE"][1])  # Cache for easy use.
        self.bzero = float(self.head["BZERO"][1])
        self.bitpix = int(self.head["BITPIX"][1])
        self.pixlbytes = abs(self.bitpix / 8)

        if int(self.head["NAXIS"][1]) == 0 or int(self.head["GCOUNT"][1]) == 0:
            self.datapixls = 0
        else:
            self.datapixls = 1  # Compute data sizes

            for i in range(1, int(self.head["NAXIS"][1]) + 1):
                axis = int(self.head["NAXIS" + str(i)][1])
                setattr(self, "NAXIS" + str(i), axis)
                self.datapixls = self.datapixls * axis

                self.shape.insert(0, axis)

            self.datapixls = int(self.head["GCOUNT"][1]) * (
                int(self.head["PCOUNT"][1]) + self.datapixls)

        self.databytes = self.datapixls * self.pixlbytes
        self.databloks = (self.databytes + (2880 - 1)) / 2880
Example #49
0
reviews_df['tfidf'] = reviews_df.tokens.apply(lambda x: get_tfidf(x))
reviews_df['text_tokens'] = reviews_df.tokens.apply(lambda x: ' '.join(x))

print('[Info] Tokenize , pos and what not !', 'time from start', (time.time() - start_time))
print(reviews_df.head())

print('[Info] Ngram and hot encoding .. reducing features .. this step will take time', 'time from start',
      (time.time() - start_time))

# # build the feature matrices
ngram_counter = CountVectorizer(ngram_range=(1, 5), analyzer='word')
ngram_counter.fit(reviews_df.text_tokens)

print('[Info] Ngram and hot encoding done ...saving ...', (time.time() - start_time))

with open('ngram_counter' + city + '.pkl', 'wb') as fid:
    cPickle.dump(ngram_counter, fid)


print('[Info] Computing features and writing ...saving ...', (time.time() - start_time))
feature_names = ngram_counter.get_feature_names()

with open('feature_names' + city + '.pkl', 'wb') as fid:
    cPickle.dump(feature_names, fid)


print('[Info] Computing SVC models ...', (time.time() - start_time))
X_train = ngram_counter.transform(reviews_df.text_tokens)
y_train = list(reviews_df.sentiment)

print('[Info] Total Features', len(feature_names), 'time from start', (time.time() - start_time))
Example #50
0
 def open(self, path, mode):
     """Wrapper on __builtin__.open used to simplify unit testing."""
     import __builtin__
     return __builtin__.open(path, mode)
Example #51
0
    def __init__(self,
                 filename=None,
                 mode=None,
                 compresslevel=9,
                 fileobj=None,
                 buffersize=None,
                 chunksize=58315):
        """Constructor for the GzipFile class.

        At least one of fileobj and filename must be given a
        non-trivial value.

        The new class instance is based on fileobj, which can be a regular
        file, a StringIO object, or any other object which simulates a file.
        It defaults to None, in which case filename is opened to provide
        a file object.

        When fileobj is not None, the filename argument is only used to be
        included in the gzip file header, which may includes the original
        filename of the uncompressed file.  It defaults to the filename of
        fileobj, if discernible; otherwise, it defaults to the empty string,
        and in this case the original filename is not included in the header.

        The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
        depending on whether the file will be read or written.  The default
        is the mode of fileobj if discernible; otherwise, the default is 'rb'.
        Be aware that only the 'rb', 'ab', and 'wb' values should be used
        for cross-platform portability.

        The compresslevel argument is an integer from 1 to 9 controlling the
        level of compression; 1 is fastest and produces the least compression,
        and 9 is slowest and produces the most compression.  The default is 9.

        A nonzero buffersize argument instructs GZip to do buffered compression,
        allowing it to include a dictzip field in the header with flush points
        for random access.  The chunksize argument determines the distance between
        flush points; smaller values means faster random access but lower
        compression.  The default value is close to maximum compression.

        """

        # guarantee the file is opened in binary mode on platforms
        # that care about that sort of thing
        if mode and 'b' not in mode:
            mode += 'b'
        if fileobj is None:
            fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
        if filename is None:
            if hasattr(fileobj, 'name'): filename = fileobj.name
            else: filename = ''
        if mode is None:
            if hasattr(fileobj, 'mode'): mode = fileobj.mode
            else: mode = 'rb'

        if mode[0:1] == 'r':
            self.mode = READ
            # Set flag indicating start of a new member
            self._new_member = True
            # Set flag indicating normal gzip format
            self.dictzip = False
            self.extrabuf = ""
            self.extrasize = 0
            self.filename = filename

        elif mode[0:1] == 'w' or mode[0:1] == 'a':
            self.mode = WRITE
            if buffersize: self.dictzip = True
            else: self.dictzip = False
            self.compresslevel = compresslevel
            self.chunksize = chunksize
            self.buffersize = buffersize
            # dictzip's default chunk size of 58315 is too conservative
            if chunksize > 65400:
                raise IOError, "Chunk size " + str(
                    chunksize) + " is too large; maximum is 65400"
            if self.dictzip and buffersize // chunksize > 32764:
                raise IOError, "Buffer size " + str(
                    buffersize
                ) + " is too large; may result in too many chunks"

            self._init_write(filename)

        else:
            raise IOError, "Mode " + mode + " not supported"

        self.fileobj = fileobj
        self.offset = 0

        if self.mode == WRITE:
            if self.dictzip:
                # intialize write buffer
                self.writebuf = ''
            else:
                # for ordinary gzip files, write header now
                self._write_gzip_header()

        if self.mode == READ:
            # read the headers of all dictzip members, to build database of flush points
            self.memberoffset = [
            ]  # offset of member within uncompressed stream
            self.memberchlen = []  # chunk length
            self.memberflushpoints = [
            ]  # absolute flush points within this member
            self.dictzip = True
            pos = self.fileobj.tell()
            try:
                offset = 0
                while True:
                    self._iseof()
                    dictzipdata = self._read_gzip_header()
                    if dictzipdata:
                        chlen, flushpoints = dictzipdata
                        self.memberoffset.append(offset)
                        self.memberchlen.append(chlen)
                        for idx in range(len(flushpoints)):
                            flushpoints[idx] += self.fileobj.tell()
                        # keep flushpoints, including the one pointing beyond the data stream
                        self.memberflushpoints.append(flushpoints)
                        # point to length field at end of this member
                        # Add 4 to skip over the CRC32 field
                        # (I don't understand the "+2" -- header bytes?)
                        newpos = flushpoints[-1] + 2 + 4
                        self.fileobj.seek(newpos)
                        isize = U32(read32(self.fileobj)
                                    )  # will not exceed 2 Gb for dictzip files
                        offset += isize
                    else:
                        self.dictzip = False
                        break
            except EOFError:
                pass
            self.uncompressed_length = offset
            self.fileobj.seek(pos)
Example #52
0
 def open(self, name, mode='r', bufsize=-1):
     import __builtin__
     return self.fileopen(__builtin__.open(name, mode, bufsize))
 def __init__(self, f):
     if type(f) == type(''):
         import __builtin__
         f = __builtin__.open(f, 'rb')
     self.initfp(f)
    def update_config(self):
        """
        Update the configuration files according to the current
        in-memory SExtractor configuration.
        """

        # -- Write filter configuration file

        # First check the filter itself

        filter = self.config['FILTER_MASK']
        rows = len(filter)
        cols = len(filter[0])  # May raise ValueError, OK

        if PY3:
            filter_f = builtins.open(self.config['FILTER_NAME'], 'w')
        else:
            filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w')

        filter_f.write("CONV NORM\n")
        filter_f.write("# %dx%d Generated from sextractor.py module.\n" %
                       (rows, cols))
        for row in filter:
            filter_f.write(" ".join(map(repr, row)))
            filter_f.write("\n")

        filter_f.close()

        # -- Write parameter list file

        if PY3:
            parameters_f = builtins.open(self.config['PARAMETERS_NAME'], 'w')
        else:
            parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'],
                                            'w')
        for parameter in self.config['PARAMETERS_LIST']:
            print >> parameters_f, parameter

        parameters_f.close()

        # -- Write NNW configuration file

        if PY3:
            nnw_f = builtins.open(self.config['STARNNW_NAME'], 'w')
        else:
            nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w')
        nnw_f.write(nnw_config)
        nnw_f.close()

        # -- Write main configuration file

        if PY3:
            main_f = builtins.open(self.config['CONFIG_FILE'], 'w')
        else:
            main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w')

        for key in self.config.keys():
            if (key in SExtractor._SE_config_special_keys):
                continue

            if (key == "PHOT_AUTOPARAMS"):  # tuple instead of a single value
                value = " ".join(map(str, self.config[key]))
            else:
                value = str(self.config[key])

            print(("%-16s       %-16s # %s" %
                   (key, value, SExtractor._SE_config[key]['comment'])),
                  file=main_f)

        main_f.close()
Example #55
0
def _test():
    # Act like gzip; with -d, act like gunzip; with -D, act like dictzip
    # The input file is not deleted, however, nor are any other gzip
    # options or features supported.
    args = sys.argv[1:]
    decompress = args and args[0] == "-d"
    dictzip = args and args[0] == "-D"
    if decompress or dictzip:
        args = args[1:]
    if not args:
        args = ["-"]
    for arg in args:
        if decompress:
            if arg == "-":
                f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
                g = sys.stdout
            else:
                if arg[-3:] != ".gz" and arg[-3:] != ".dz":
                    print "filename doesn't end in .gz or .dz:", repr(arg)
                    continue
                f = open(arg, "rb")
                g = __builtin__.open(arg[:-3], "wb")
        else:
            if dictzip:
                buffersize = 1000000
                ext = ".dz"
            else:
                buffersize = None
                ext = ".gz"
            if arg == "-":
                f = sys.stdin
                g = GzipFile(filename="",
                             mode="wb",
                             fileobj=sys.stdout,
                             chunksize=1000,
                             buffersize=buffersize)
            else:
                f = __builtin__.open(arg, "rb")
                g = open(arg + ext,
                         "wb",
                         chunksize=1000,
                         buffersize=buffersize)
        blocksize = 1024
        if False:
            while True:
                chunk = f.read(blocksize)
                if not chunk:
                    break
                g.write(chunk)
        else:
            # test the random access code
            ptr = 0
            while True:
                f.seek(0)
                f.seek(ptr)
                chunk = f.read(blocksize)
                if not chunk:
                    break
                g.write(chunk)
                ptr += blocksize
        if g is not sys.stdout:
            g.close()
        if f is not sys.stdin:
            f.close()
    def __init__(self,
                 filename=None,
                 mode=None,
                 compresslevel=9,
                 fileobj=None):
        """Constructor for the GzipFile class.

        At least one of fileobj and filename must be given a
        non-trivial value.

        The new class instance is based on fileobj, which can be a regular
        file, a StringIO object, or any other object which simulates a file.
        It defaults to None, in which case filename is opened to provide
        a file object.

        When fileobj is not None, the filename argument is only used to be
        included in the gzip file header, which may includes the original
        filename of the uncompressed file.  It defaults to the filename of
        fileobj, if discernible; otherwise, it defaults to the empty string,
        and in this case the original filename is not included in the header.

        The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
        depending on whether the file will be read or written.  The default
        is the mode of fileobj if discernible; otherwise, the default is 'rb'.
        Be aware that only the 'rb', 'ab', and 'wb' values should be used
        for cross-platform portability.

        The compresslevel argument is an integer from 1 to 9 controlling the
        level of compression; 1 is fastest and produces the least compression,
        and 9 is slowest and produces the most compression.  The default is 9.

        """

        # guarantee the file is opened in binary mode on platforms
        # that care about that sort of thing
        if mode and 'b' not in mode:
            mode += 'b'
        if fileobj is None:
            fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
        if filename is None:
            if hasattr(fileobj, 'name'): filename = fileobj.name
            else: filename = ''
        if mode is None:
            if hasattr(fileobj, 'mode'): mode = fileobj.mode
            else: mode = 'rb'

        if mode[0:1] == 'r':
            self.mode = READ
            # Set flag indicating start of a new member
            self._new_member = True
            self.extrabuf = ""
            self.extrasize = 0
            self.filename = filename
            # Starts small, scales exponentially
            self.min_readsize = 100

        elif mode[0:1] == 'w' or mode[0:1] == 'a':
            self.mode = WRITE
            self._init_write(filename)
            self.compress = zlib.compressobj(compresslevel, zlib.DEFLATED,
                                             -zlib.MAX_WBITS,
                                             zlib.DEF_MEM_LEVEL, 0)
        else:
            raise IOError, "Mode " + mode + " not supported"

        self.fileobj = fileobj
        self.offset = 0

        if self.mode == WRITE:
            self._write_gzip_header()
    gp_cy['sequence_3'] = gp_cy.split_text.apply(lambda x: get_sequences(x, 3))
    gp_cy['frequent_item_2'] = gp_cy.split_text.apply(lambda x: frequent_itemset(x, 2))
    gp_cy['frequent_item_3'] = gp_cy.split_text.apply(lambda x: frequent_itemset(x, 3))

    # gp_cy['association_rules'] = gp_cy.sequence_3.apply(lambda x: get_association_rules(x))

    # gp_cy['frequent_item_2'] = gp_cy.frequent_item_2.apply(lambda x: from_set_(x))
    # gp_cy['frequent_item_3'] = gp_cy.frequent_item_3.apply(lambda x: from_set_(x))
    #
    # gp_cy['sequence_3'] = gp_cy.sequence_3.apply(lambda x: list(x))
    # gp_cy['sequence_2'] = gp_cy.sequence_2.apply(lambda x: list(x))
    gp_cy['business_id'] = business_id
    if count % 1000 == 1:
        print("[Info] count = {count} stage = {stage}".format(count=count, stage='ALL ') + 'Total ' + str(
            total - count) + " done " + str(len(lis)))

        open("data/word_dict.json", "w+").write(json.dumps(word_dictionary))

    if df is None:
        df = gp_cy
    else:
        df = pd.concat([gp_cy, df])
    count += 1

    if len(df) > 1000:
        df.to_pickle('data/review_proceesed_'+str(count)+".json")
        df = None

df.to_pickle('data/review_proceesed_'+str(count)+".json")

open("data/word_dict.json", "w+").write(json.dumps(word_dictionary))
Example #58
0
def exportObjectToPOVRAY(fileName, objectName, projectObjects):
    if len(projectObjects) == 0:
        FreeCAD.Console.PrintWarning("No objects found!\n")

    outPutString = meshObjects(projectObjects)
    if outPutString == "":
        return

    if not fileName.lower().endswith('inc'):
        fileName = fileName + '.inc'
    #
    try:
        partsManagingC = partsManaging()
        partsManagingC.setDatabase()
        packageData = partsManagingC.__SQL__.findPackage(objectName, "*")

        if packageData[0]:
            newX = packageData[2][2]
            newY = packageData[2][3]
            newZ = packageData[2][4]
            newRX = packageData[2][5] + 90
            newRY = packageData[2][6]
            newRZ = packageData[2][7]
        else:
            newX = 0
            newY = 0
            newZ = 0
            newRX = 0
            newRY = 0
            newRZ = 0
        #
        objectNameFormat = objectName.replace('-', '')
        #
        plik = __builtin__.open(fileName, "w")
        plik.write(
            '''// ////////////////////////////////////////////////////////////
// 
// Add to file e3d_tools.inc
// #include "{0}"
// 
// ////////////////////////////////////////////////////////////

// ////////////////////////////////////////////////////////////
// 
// Add to file 3dusrpac.dat
// {1}:0:1:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:FC_obj_{2}(::
//
// ////////////////////////////////////////////////////////////

'''.format(fileName, objectName, objectNameFormat))

        plik.write('''
#macro FC_obj_%s(value)
union {
''' % objectNameFormat)

        plik.write(outPutString + "\n")
        plik.write('''}''')
        plik.write('''
    rotate<{0},{1},{2}>
    translate<{3},{5},{4}>
#end'''.format(newRX, newRY, newRZ, newX, newY, newZ))

        plik.close()
    except Exception, e:
        FreeCAD.Console.PrintWarning("{0} \n".format(e))
        return
Example #59
0
hashes = '17ca2b6ea517007c7ed8e2731665fa10a6efe132cbedfc3becf9aa60a41970e9c96a5f4d6e0b2f49'
hashes = [hashes[i * 2:(i + 1) * 2] for i in range(len(hashes) // 2)]
hashes = [(_coconut_partial(int, {1: 16},
                            2))((''.join)(hashes[i * 4:(i + 1) * 4][::-1]))
          for i in range(len(hashes) // 4)]
hashes = (list)(map(hex, hashes))

api_hashes = reduce(
    _coconut.operator.add,
    map(
        _coconut_forward_compose(_coconut.operator.methodcaller("read"),
                                 _coconut.operator.methodcaller("split",
                                                                '\n')),
        [
            open('./WindowsAPIhash-master/API_Hash_{_coconut_format_0}.txt'.
                 format(_coconut_format_0=(i + 1))) for i in range(5)
        ]))


def find_hash(h):
    for api in api_hashes:
        if h in api.lower():
            return api
    return None


funcs = filter(lambda x: x is not None, map(find_hash, hashes))

for idx, func in enumerate(funcs):
    print(
        '0x46d + {_coconut_format_0} | {_coconut_format_1}:\t{_coconut_format_2}'
Example #60
0
    def __init__(self, dev=None):
        '''
        Searches for EYES hardware on USB-to-Serial adapters. Presence
        of the device is done by reading the version string. Timeout
        set to 4 sec TODO : Supporting more than one EYES on a PC to
        be done. The question is how to find out whether a port is
        already open or not, without doing any transactions to it.
        '''

        if os.name == 'nt':  # for Windows machines, search COM1 to COM255
            device_list = []
            for k in range(1, 255):
                s = 'COM%d' % k
                device_list.append(s)
            for k in range(1, 11):
                device_list.append(k)

        for dev in linux_list:
            try:
                handle = serial.Serial(dev, BAUDRATE, stopbits=1,
                                       timeout=0.3)  # 8, 1, no parity
                # print 'OP ', dev
            except:
                continue

            self.msg = 'Port %s is existing ' % dev
            if handle.isOpen() != True:
                print 'but could not open'
                continue
            self.msg += 'and opened. '
            handle.flush()
            time.sleep(0.5)
            while handle.inWaiting() > 0:
                handle.flushInput()
            handle.write(chr(GETVERSION))
            res = handle.read(1)
            ver = handle.read(5)  # 5 character version number
            if ver[:2] == 'ej':
                self.device = dev
                self.fd = handle
                self.version = ver
                handle.timeout = 4.0  # r2rtime on .7 Hz require this
                self.msg += 'Found EYES Junior version ' + ver
                try:
                    f = __builtin__.open('eyesj.cal', 'r')
                    ss = f.readline().split()
                    m1 = float(ss[0])
                    c1 = float(ss[1])
                    m2 = float(ss[2])
                    c2 = float(ss[3])
                    f.close()
                    m = 10.0 / 4095
                    c = -5.0
                    dm = m * 0.02  # maximum 2% deviation
                    dc = 5 * 0.02
                    # print m1, c1, m2, c2, dm, dc
                    if abs(m1 - m) < dm and abs(m2 - m) < dm and \
                       abs(c1 - c) < dc and abs(c2 - c) < dc:
                        self.m12[1] = m1
                        self.c[1] = c1
                        self.m12[2] = m2
                        self.c[2] = c2
                        self.m8[1] = m1 * 4095. / 255
                        self.m8[2] = m2 * 4095. / 255
                        # print 'Loaded Calibration from File', m1, c1, m2, c2
                except:
                    print 'Calibration data NOT found. \
You may run Calibrate program'

                return  # Successful return
            else:  # If it is not our device, so close the file
                handle.close()
        print self.msg
        print 'No EYES Junior hardware detected'
        self.fd = None