Example #1
0
def like_recs():
    counter = 0
    try:
        while counter < 20:
            results = get_recs()
            liked = utils.read_file("liked")
            instagrams = utils.read_file("/Instagram/instagrams")
            for i in results:
                time.sleep(1)
                link = 'https://api.gotinder.com/like/{0}'.format(i["_id"])
                liking_header = {'X-Auth-Token': tinder_token,
                                 'Authorization': 'Token token="{0}"'.format(tinder_token).encode('ascii', 'ignore'),
                                 'firstPhotoID': ''+str(i['photos'][0]['id'])
                                 }
                likereq = requests.get(link, headers = liking_header)
                print i['name'] + ' - ' +  i['_id']
                print 'status: ' + str(likereq.status_code) + ' text: ' + str(likereq.text)
                liked += str(i['name']) + ' - ' + str(i['_id']) + ' - ' + str(i['photos'][0]['url']) + '\n'
                try:
                    if 'instagram' in i:
                      instagrams+= str(i['instagram']['username'] + " ")
                    else:
                        print "no instagram mate soz"
                except KeyError as ex:
                    print 'nah mate'
                #print "photoid " + str(i['photos'][0]['id'])
            utils.write_file("liked", liked)
            utils.write_file("/Instagram/instagrams", instagrams)
            counter += 1

    except Exception as ex:
        print "hit an exception i guess"
        print ex
Example #2
0
    def test_bcache_status(self):
        succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None)
        self.assertTrue(succ)
        self.assertTrue(dev)
        self.bcache_dev = dev

        _wait_for_bcache_setup(dev)

        # should work with both "bcacheX" and "/dev/bcacheX"
        status = BlockDev.kbd_bcache_status(self.bcache_dev)
        self.assertTrue(status)
        status = BlockDev.kbd_bcache_status("/dev/" + self.bcache_dev)
        self.assertTrue(status)

        # check some basic values
        self.assertTrue(status.state)
        sys_state = read_file("/sys/block/%s/bcache/state" % self.bcache_dev).strip()
        self.assertEqual(status.state, sys_state)
        sys_block = read_file("/sys/block/%s/bcache/cache/block_size" % self.bcache_dev).strip()
        self.assertEqual(status.block_size, int(bytesize.Size(sys_block)))
        sys_size = self._get_size(self.bcache_dev)
        self.assertGreater(status.cache_size, sys_size)

        succ = BlockDev.kbd_bcache_destroy(self.bcache_dev)
        self.assertTrue(succ)
        self.bcache_dev = None
        time.sleep(1)

        wipe_all(self.loop_dev, self.loop_dev2)
Example #3
0
 def __init__(self):
     '''
     Constructor
     '''
     self.head_name = read_file(HEAD_PATH).strip('\n').rsplit('/', 1)[-1]
     self.head_path = os.path.join(REF_HEADS_DIR, self.head_name)
     self.head_commit = read_file(self.head_path).strip() if os.path.exists(self.head_path) else None
Example #4
0
    def commit(self, msg, ref="HEAD"):
        cur_tree = self.index.do_commit(self.workspace)
        branch_name = read_file(os.path.join(self.workspace, ".git", "HEAD")).strip("\n").rsplit("/", 1)[-1]
        ref_path = os.path.join(self.workspace, ".git", "refs", "heads", branch_name)
        parent_sha1 = None
        if os.path.exists(ref_path):
            parent_sha1 = read_file(ref_path)
        committer_name = self.config.config_dict["user"]["name"]
        committer_email = "<%s>" % (self.config.config_dict["user"]["email"])
        commit_time = int(time.time())

        # TO FIX
        commit_timezone = time.strftime("%z", time.gmtime())

        commit = Commit(
            self.workspace,
            tree_sha1=cur_tree.sha1,
            parent_sha1=parent_sha1,
            name=committer_name,
            email=committer_email,
            timestamp=commit_time,
            timezone=commit_timezone,
            msg=msg,
        )
        write_object_to_file(commit.path, commit.content)
        write_to_file(ref_path, commit.sha1)
Example #5
0
def process(arg):
    global not_deleted_list, update_time
    curs = _connect.cursor()
    res = curs.execute("SELECT BookId FROM libbook WHERE NOT (Deleted&1) and FileType = 'fb2' ")
    not_deleted_list = curs.fetchall()
    not_deleted_list = set([i[0] for i in not_deleted_list])
    curs.execute('SELECT * FROM librusec')
    update_time = curs.fetchone()[0]
    for fn in walk(arg):
        for ftype, z_filename, data in read_file(fn, zip_charset='utf-8'):
            process_file(fn, ftype, z_filename, data)
    if options.search_deleted:
        deleted = set()
        for fn in walk(options.search_deleted):
            bookid = base_name(fn, '.fb2.zip')
            try:
                bookid = int(bookid)
            except ValueError:
                continue
            if bookid in not_deleted_list:
                deleted.append(fn)
        for fn in deleted:
            for ftype, z_filename, data in read_file(fn, zip_charset='utf-8'):
                ret = process_file(fn, ftype, z_filename, data)
                if ret:
                    print_log('restore deleted:', bookid)
    print
    print 'processed:', stats.total
    print 'passed:', stats.passed
    print 'fixed:', stats.fixed
    print 'errors:', stats.errors
    if options.not_found:
        fd = open(options.not_found, 'w')
        for bookid in not_deleted_list:
            print >> fd, bookid
Example #6
0
def verify_gpg_signature(detached_file, signature_file, key_file):
    signature = read_file(signature_file, binary=True)
    #not generic but ok if the signature is generated in linux
    #this is to avoid the signature to be misinterpreted when parsed in another OS
    signature = signature.replace('\n', os.linesep)
    key = read_file(key_file, binary=True)
    message = read_file(detached_file, binary=True)
    result = verify_str(signature, key, detached=message)
    return result == message
Example #7
0
def main():
    FILE_NAME = 'rates_to_watch.pkl'
    EMAIL = ''
    PASSWORD = ''

    arg_count = len(sys.argv)

    # Load trackers and record new rates to them.
    if arg_count == 1:

        # Check if tracking file exists.
        if os.path.isfile(FILE_NAME):
            rates_to_watch = utils.read_file(FILE_NAME)

            for rate in rates_to_watch:
                rate.add_rate(grab_rate(rate.get_currencies()))
                utils.write_file(FILE_NAME, rates_to_watch)

            report = generate_report(rates_to_watch)
            utils.send_email('Exchange Rate Report', report,
                             EMAIL, EMAIL, PASSWORD)

        # Tracking file doesn't exist, tell user to add trackers.
        else:
            print("Error: No currencies are being tracked.")
            print("Please run the following command:")
            print("python currency_report.py CURRENCY1 CURRENCY2")
            print("eg. python currency_report.py GBP JPY")

    # Create new currency tracker.
    elif arg_count == 3:
        __, currency_1, currency_2 = sys.argv

        with open('currencies.txt') as file:
            valid_currencies = file.read()

        # Check if currencies are valid.
        if currency_1 in valid_currencies and currency_1 in valid_currencies:
            currencies = (currency_1, currency_2)
            new_tracker = trackers.CurrencyTracker(currencies,
                                                   grab_rate(currencies))

            # Edit existing tracker file.
            if os.path.isfile(FILE_NAME):
                rates_to_watch = utils.read_file(FILE_NAME)
                rates_to_watch.append(new_tracker)
                utils.write_file(FILE_NAME, rates_to_watch)

            # Create new tracker file.
            else:
                rates_to_watch = [new_tracker]
                utils.write_file(FILE_NAME, rates_to_watch)
        else:
            print("Error: Invalid currency codes.")
    else:
        print("Error: Invalid number of arguments. {count}"
              "argument(s).".format(count=arg_count))
 def test_rewrite_exists(self):
     """Test rewrite() with an existing file"""
     with utils.TempDir() as tmpdir:
         fname = os.path.join(tmpdir, 'fname')
         utils.write_file(fname, 'foo')
         python_tools.rewrite(fname, 'foo')
         self.assertEqual(utils.read_file(fname), 'foo')
         python_tools.rewrite(fname, 'bar')
         self.assertEqual(utils.read_file(fname), 'bar')
         python_tools.rewrite(fname, 'foo', verbose=True)
         self.assertEqual(utils.read_file(fname), 'foo')
Example #9
0
def like_recs_AI():
    try:
        folder = "ladiesAI/"
        extension = ".jpg"
        counter = 0
        #try:
        while True:
            results = get_recs()
            liked = utils.read_file("liked")
            instagrams = utils.read_file("/Instagram/instagrams")
            for i in results:
                time.sleep(1)
                like = 'https://api.gotinder.com/like/{0}'.format(i["_id"])
                dislike = 'https://api.gotinder.com/pass/{0}'.format(i["_id"])
                liking_header = {'X-Auth-Token': tinder_token,
                                 'Authorization': 'Token token="{0}"'.format(tinder_token).encode('ascii', 'ignore'),
                                 'firstPhotoID': '' + str(i['photos'][0]['id'])
                                 }
                #img - tinder img url, path = path on physical device, urllib saves the picture
                img = str(i['photos'][0]['url'])
                path = folder+img[27:51]+extension
                #print "image is " + str(i['photos'][0]['url'])
                urllib.urlretrieve(str(i['photos'][0]['url']), path)
                result = Predictor.predict(path)
                print 'AIresult is: ' + result

                if result == "G":
                    req = requests.get(like, headers = liking_header)
                    print 'status: ' + str(req.status_code) + ' text: ' + str(req.text)
                elif result == "B":
                    req = requests.get(dislike, headers = liking_header)
                    liked += str(i['name']) + ' - ' + str(i['_id']) + ' - ' + str(i['photos'][0]['url']) + '\n'
                    print 'status: ' + str(req.status_code) + ' text: ' + str(req.text)

                print i['name'] + ' - ' + i['_id']
                try:
                    if 'instagram' in i:
                        instagrams += str(i['instagram']['username'] + " ")
                    else:
                        print "no instagram mate soz"
                except KeyError as ex:
                    print 'nah mate'
                    # print "photoid " + str(i['photos'][0]['id'])
            utils.write_file("liked", liked)
            utils.write_file("/Instagram/instagrams", instagrams)
            counter += 1

    except Exception as ex:
        print "hit an exception i guess"
        print ex
Example #10
0
    def initialize_for_query(self, genre_file, movie_file, actors_file):
        print 'reading genre list...'
        self.genre_dicts = utils.read_file(genre_file)
        for genre in self.genre_dicts:
            self.genres += genre.keys()[0]
            self.genres += '|'

        print 'recording movie data...'
        movies = utils.read_file(movie_file)
        record_movies(movies)

        print 'reading actors file...'
        actors = utils.read_file(actors_file)
        record_data(actors)
Example #11
0
    def _zram_get_stats_new(self):
        with _track_module_load(self, "zram", "_loaded_zram_module"):
            self.assertTrue(BlockDev.kbd_zram_create_devices(1, [10 * 1024**2], [2]))
            time.sleep(1)

        # XXX: this needs to get more complex/serious
        stats = BlockDev.kbd_zram_get_stats("zram0")
        self.assertTrue(stats)

        # /dev/zram0 should work too
        stats = BlockDev.kbd_zram_get_stats("/dev/zram0")
        self.assertTrue(stats)

        self.assertEqual(stats.disksize, 10 * 1024**2)
        # XXX: 'max_comp_streams' is currently broken on rawhide
        # https://bugzilla.redhat.com/show_bug.cgi?id=1352567
        # self.assertEqual(stats.max_comp_streams, 2)
        self.assertTrue(stats.comp_algorithm)

        # read 'num_reads' and 'num_writes' from '/sys/block/zram0/stat'
        sys_stats = read_file("/sys/block/zram0/stat").strip().split()
        self.assertGreaterEqual(len(sys_stats), 11)  # 15 stats since 4.19
        num_reads = int(sys_stats[0])
        num_writes = int(sys_stats[4])
        self.assertEqual(stats.num_reads, num_reads)
        self.assertEqual(stats.num_writes, num_writes)

        # read 'orig_data_size', 'compr_data_size', 'mem_used_total' and
        # 'zero_pages' from '/sys/block/zram0/mm_stat'
        sys_stats = read_file("/sys/block/zram0/mm_stat").strip().split()
        self.assertGreaterEqual(len(sys_stats), 7)  # since 4.18 we have 8 stats
        orig_data_size = int(sys_stats[0])
        compr_data_size = int(sys_stats[1])
        mem_used_total = int(sys_stats[2])
        zero_pages = int(sys_stats[5])
        self.assertEqual(stats.orig_data_size, orig_data_size)
        self.assertEqual(stats.compr_data_size, compr_data_size)
        self.assertEqual(stats.mem_used_total, mem_used_total)
        self.assertEqual(stats.zero_pages, zero_pages)

        # read 'invalid_io' and 'num_writes' from '/sys/block/zram0/io_stat'
        sys_stats = read_file("/sys/block/zram0/io_stat").strip().split()
        self.assertEqual(len(sys_stats), 4)
        invalid_io = int(sys_stats[2])
        self.assertEqual(stats.invalid_io, invalid_io)

        with _track_module_load(self, "zram", "_loaded_zram_module"):
            self.assertTrue(BlockDev.kbd_zram_destroy_devices())
Example #12
0
    def test_luks2_integrity(self):
        """Verify that we can get create a LUKS 2 device with integrity"""

        if not BlockDev.utils_have_kernel_module("dm-integrity"):
            self.skipTest('dm-integrity kernel module not available, skipping.')

        extra = BlockDev.CryptoLUKSExtra()
        extra.integrity = "hmac(sha256)"

        succ = BlockDev.crypto_luks_format(self.loop_dev, "aes-cbc-essiv:sha256", 512, PASSWD, None, 0,
                                           BlockDev.CryptoLUKSVersion.LUKS2, extra)
        self.assertTrue(succ)

        succ = BlockDev.crypto_luks_open(self.loop_dev, "libblockdevTestLUKS", PASSWD, None, False)
        self.assertTrue(succ)

        info = BlockDev.crypto_integrity_info("libblockdevTestLUKS")
        self.assertIsNotNone(info)

        self.assertEqual(info.algorithm, "hmac(sha256)")

        # get integrity device dm name
        _ret, int_name, _err = run_command('ls /sys/block/%s/holders/' % self.loop_dev.split("/")[-1])
        self.assertTrue(int_name)  # true == not empty

        tag_size = read_file("/sys/block/%s/integrity/tag_size" % int_name)
        self.assertEqual(info.tag_size, int(tag_size))

        succ = BlockDev.crypto_luks_close("libblockdevTestLUKS")
        self.assertTrue(succ)
Example #13
0
    def stage(self, files):
        try:
            for file in files:
                content = read_file(file)
                blob = Blob(self.workspace, content)
                if not os.path.exists(blob.path):
                    write_object_to_file(blob.path, blob.content)
                stat = os.stat(os.path.join(self.workspace, file))
                self.index.add_entry(
                    file,
                    ctime=stat.st_ctime,
                    mtime=stat.st_mtime,
                    dev=stat.st_dev,
                    ino=stat.st_ino,
                    mode=cal_mode(stat.st_mode),
                    uid=stat.st_uid,
                    gid=stat.st_gid,
                    size=stat.st_size,
                    sha1=blob.sha1,
                    flags=0,
                )
            self.index.write_to_file()

        except Exception, e:
            print "stage file %s error: %s" % (file, e)
Example #14
0
def authenticate(domain=None,token_folder=None):
    '''authenticate will authenticate the user with Singularity Hub. This means
    either obtaining the token from the environment, and then trying to obtain
    the token file and reading it, and then finally telling the user to get it.
    :param domain: the domain to direct the user to for the token, default is api_base
    :param token_folder: the location of the token file, default is $HOME (~/)
    '''
    # Attempt 1: Get token from environmental variable
    token = os.environ.get("SINGULARITY_TOKEN",None)

    if token == None:
        # Is the user specifying a custom home folder?
        if token_folder == None:
            token_folder = os.environ["HOME"]

        token_file = "%s/.shub" %(token_folder)
        if os.path.exists(token_file):
            token = read_file(token_file)[0].strip('\n')
        else:
            if domain == None:
                domain = api_base
            print('''Please obtain token from %s/token
                     and save to .shub in your $HOME folder''' %(domain))
            sys.exit(1)
    return token
Example #15
0
    def _image2XMLhelper(self, image_xml, output_xmls, qemu=False):
        image2guestdir = self.basedir + "image2guest/"
        image = virtinst.ImageParser.parse_file(self.basedir + image_xml)
        if type(output_xmls) is not list:
            output_xmls = [output_xmls]

        conn = qemu and self.qemuconn or self.conn
        caps = qemu and self.qemucaps or self.caps
        gtype = qemu and "qemu" or "xen"

        for idx in range(len(output_xmls)):
            fname = output_xmls[idx]
            inst = virtinst.ImageInstaller(image, caps, boot_index=idx,
                                           conn=conn)

            utils.set_conn(conn)

            if inst.is_hvm():
                g = utils.get_basic_fullyvirt_guest(typ=gtype)
            else:
                g = utils.get_basic_paravirt_guest()

            g.installer = inst
            g._prepare_install(None)

            actual_out = g.get_config_xml(install=False)
            expect_file = os.path.join(image2guestdir + fname)
            expect_out = utils.read_file(expect_file)
            expect_out = expect_out.replace("REPLACEME", os.getcwd())

            utils.diff_compare(actual_out,
                               expect_file,
                               expect_out=expect_out)

            utils.reset_conn()
Example #16
0
    def test_header(self):
        """Test make_all_header.py script"""
        with utils.TempDir() as tmpdir:
            topdir = os.path.join(tmpdir, 'include', 'test')
            os.makedirs(topdir)
            os.mkdir(os.path.join(topdir, 'subdir'))
            explicit_h = os.path.join(topdir, 'explicit.h')
            sub_h = os.path.join(topdir, 'subdir', 'sub.h')
            sub_deprec_h = os.path.join(topdir, 'subdir', 'deprecated.h')
            utils.write_file(explicit_h, '')
            utils.write_file(sub_h, "sub_h")
            utils.write_file(sub_deprec_h, "DEPRECATED_HEADER")
            p = subprocess.Popen([header_py, 'include/test.h', 'bar',
                                  'include/test/explicit.h',
                                  'include/test/subdir'], cwd=tmpdir)
            stdout, stderr = p.communicate()
            self.assertEqual(p.returncode, 0)
            self.assertEqual(utils.read_file(os.path.join(tmpdir,
                                                          'include/test.h')),
                             """/**
 *  \\file test.h
 *  \\brief Include all non-deprecated headers in test.
 *
 *  Copyright 2007-2019 IMP Inventors. All rights reserved.
 */

#ifndef TEST_H
#define TEST_H
#include <bar/explicit.h>
#include <bar/sub.h>
#ifdef IMP_SWIG_WRAPPER
#include <bar/deprecated.h>
#endif
#endif /* TEST_H */
""")
Example #17
0
    def load_text(self, p, variant):
        filename = self.cache_dir + self.lang + '/' + str(p.latestRevision())

        if not os.path.exists(filename):
            html = self.get_html(p)
            new_html = common_html.get_head(u'TITLE') + u"\n<body>"  + html + u'\n</body>\n</html>'

            new_html = new_html.replace(u'&nbsp;', u' ')

            root = etree.fromstring(new_html.encode('utf-8'))
            exclude = set()
            html_id = self.config[variant]['modernize_div_id']

            for it in root.findall(".//{http://www.w3.org/1999/xhtml}div[@id='%s']" % html_id):
                exclude.add(it)

            text = self.get_etree_text(root, exclude)
            for d in self.config[variant]['transform']:
                text = re.sub(d[0], d[1], text)

            utils.write_file(filename, text)
        else:
            text = utils.read_file(filename)

        return text
Example #18
0
def load_configuration(configuration_file):
    default_tokens ={'http_port':9090,
                     'http_host':'localhost',
                     'log_level': 'DEBUG',
                     'log_level_db': 'ERROR',
                     'log_level_vimconn': 'DEBUG',
                    }
    try:
        #Check config file exists
        if not os.path.isfile(configuration_file):
            raise LoadConfigurationException("Error: Configuration file '"+configuration_file+"' does not exist.")
            
        #Read file
        (return_status, code) = utils.read_file(configuration_file)
        if not return_status:
            raise LoadConfigurationException("Error loading configuration file '"+configuration_file+"': "+code)
        #Parse configuration file
        try:
            config = yaml.load(code)
        except yaml.YAMLError, exc:
            error_pos = ""
            if hasattr(exc, 'problem_mark'):
                mark = exc.problem_mark
                error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
            raise LoadConfigurationException("Error loading configuration file '"+configuration_file+"'"+error_pos+": content format error: Failed to parse yaml format")

        #Validate configuration file with the config_schema
        try:
            js_v(config, config_schema)
        except js_e.ValidationError, exc:
            error_pos = ""
            if len(exc.path)>0: error_pos=" at '" + ":".join(map(str, exc.path))+"'"
            raise LoadConfigurationException("Error loading configuration file '"+configuration_file+"'"+error_pos+": "+exc.message) 
Example #19
0
    def test_write_read_files(self):
        '''test_write_read_files will test the functions write_file and read_file
        '''
        print("Testing utils.write_file...")
        from utils import write_file
        import json
        tmpfile = tempfile.mkstemp()[1]
        os.remove(tmpfile)
        write_file(tmpfile,"hello!")
        self.assertTrue(os.path.exists(tmpfile))        

        print("Testing utils.read_file...")
        from utils import read_file
        content = read_file(tmpfile)[0]
        self.assertEqual("hello!",content)

        from utils import write_json
        print("Testing utils.write_json...")
        print("Case 1: Providing bad json")
        bad_json = {"Wakkawakkawakka'}":[{True},"2",3]}
        tmpfile = tempfile.mkstemp()[1]
        os.remove(tmpfile)        
        with self.assertRaises(TypeError) as cm:
            write_json(bad_json,tmpfile)

        print("Case 2: Providing good json")        
        good_json = {"Wakkawakkawakka":[True,"2",3]}
        tmpfile = tempfile.mkstemp()[1]
        os.remove(tmpfile)
        write_json(good_json,tmpfile)
        content = json.load(open(tmpfile,'r'))
        self.assertTrue(isinstance(content,dict))
        self.assertTrue("Wakkawakkawakka" in content)
Example #20
0
    def __init_from_file(self, filename):
        lines = read_file(filename)

        current_line = 0

        self.generation = int(lines[current_line])
        current_line += 1

        self.width, self.height = split_str_to_ints(lines[current_line])
        current_line += 1

        self.map = []
        self.history = []

        # current state

        for i in range(current_line, self.height + current_line):
            self.map.append(split_str_to_ints(lines[i]))

        current_line += self.height

        for i in range(current_line, len(lines), self.height):

            history_step = []

            for j in range(0, self.height):
                history_step.append(split_str_to_ints(lines[i + j]))

            self.history.append(history_step)
Example #21
0
 def get_info(self, cd_or_iso_path):
     if (cd_or_iso_path, self.info_file) in Distro.cache:
         return Distro.cache[(cd_or_iso_path, self.info_file)]
     else:
         Distro.cache[(cd_or_iso_path, self.info_file)] = None
         if os.path.isfile(cd_or_iso_path):
             info_file = self.backend.extract_file_from_iso(
                 cd_or_iso_path,
                 self.info_file,
                 output_dir=self.backend.info.temp_dir,
                 overwrite=True)
         elif os.path.isdir(cd_or_iso_path):
             info_file = os.path.join(cd_or_iso_path, self.info_file)
         else:
             return
         if not info_file or not os.path.isfile(info_file):
             return
         try:
             info = read_file(info_file)
             info = self.parse_isoinfo(info)
         except Exception, err:
             log.error(err)
             return
         Distro.cache[(cd_or_iso_path, self.info_file)] = info
         return info
Example #22
0
 def get(self, request, app_name, app_path):
     if request.is_anonymous:
         return HttpResponse('', 'text/plain; charset=utf-8')
     git_runner = _make_git_runner(request, app_name)
     parts = [git_runner.run('diff', 'HEAD')]
     for status_line in git_runner.run(
         'status', '--porcelain', '--untracked-files=all').splitlines():
         if not status_line.startswith('?? '):
             continue
         escaped = status_line[3] == '"'
         path = status_line[4:-1] if escaped else status_line[3:]
         content = read_file(
             app_path.code + '/' +
             (path.decode('string_escape') if escaped else path))
         a_path = '"a/%s"' % path if escaped else 'a/' + path
         b_path = '"b/%s"' % path if escaped else 'b/' + path
         parts.append('diff --git %s %s\n' % (a_path, b_path))
         if content:
             parts.append('added new file\n--- /dev/null\n+++ %s\n' % b_path)
             if '\0' in content:
                 parts.append(
                     'Binary files /dev/null and %s differ\n' % b_path)
             else:
                 parts += ['+%s\n' % line for line in content.splitlines()]
                 if content[-1] != '\n':
                     parts.append('\\ No newline at end of file\n')
         else:
             parts.append('added new empty file\n')
     return HttpResponse(''.join(parts), 'text/plain; charset=utf-8')
Example #23
0
def convert_inc(fn, dst):
    text = utils.read_file(fn)
    lst = []
    for line in text.split('\n'):
        if line.startswith(';'):
            continue
        if line.find('  EQU ') > 0:
            line = line.replace('  EQU ', ';')
            t = line.split(';')
            t[0] = t[0].strip()
            t[1] = t[1].strip()
            lst.append([t[0], t[1]])
            
    f = open(dst, 'w+')
    #print >>f, 'defines = {'
    for t in lst:
        name = t[0]
        value = t[1]
        if value.find('H') >= 0: #"H'0F8C'"
            value = value.replace('H', '')
            value = value.replace('\'', '')
            value = '0x' + value
        else:
            value = hex(int(value))
        #name = '\'' + name + '\''
        print >>f, '%s:%s' % (name, value)
    #print >>f, '}'
    f.close()
Example #24
0
def test_speg(grammar_file):
    grammar, text, result = [
        read_file(grammar_file + postfix)
        for postfix in ('', '.txt', '.json')]
    speg = SPEG()
    ast = speg.parse(grammar, text)
    assert json.loads(ast.to_json()) == json.loads(result)
Example #25
0
def genereHtml(d, i):
	s = utils.read_file("corpus/" + i)
	for j in d[i]:
		pattern = re.compile(j + ' ' , re.I)
		s = pattern.sub('<a href="keywords/' + j + '.html"/>' + j + "</a> ", s)
		# print j, "\n", s.encode('utf-8'), "\n\n"
	utils.write_file("wiki/" + i + ".html", s)
Example #26
0
 def check_rss(self):
     rss_cache_dir = config.cache_dir + os.sep + "rss"
     newest_item_written = False
     if config.rss_feeds:
         self.rootlog.debug("rss feeds found:" + str(config.rss_feeds))
         for name, feed in config.rss_feeds.items():
             last_cache_item = utils.read_file(rss_cache_dir + os.sep + name)
             f = feedparser.parse(feed)
             self.rootlog.debug(str(f["channel"]["title"] + " feed fetched"))
             if last_cache_item != None:
                 self.rootlog.debug("last_cache_item not None: " + last_cache_item)
                 for i in f["items"]:
                     if str(last_cache_item.strip()) == str(i["date"].strip()):
                         self.rootlog.debug("item found, aborting")
                         break
                     else:
                         if newest_item_written == False:
                             utils.write_file(rss_cache_dir + os.sep + name, i["date"].strip())
                             newest_item_written = True
                         # write date of this feed into file (not elegant)
                         text2chat = "".join(["[", name, "] ", i["title"], " ", i["link"]])
                         self.rootlog.debug(text2chat)
                         self.send(self.room_name, text2chat)
             else:
                 self.rootlog.debug("last_cache_item is None")
                 utils.write_file(rss_cache_dir + os.sep + name, f["items"][0]["date"])
Example #27
0
def dockerfile_to_singularity(dockerfile_path, output_dir=None):
    '''dockerfile_to_singularity will return a Singularity build file based on
    a provided Dockerfile. If output directory is not specified, the string
    will be returned. Otherwise, a file called Singularity will be written to 
    output_dir
    :param dockerfile_path: the path to the Dockerfile
    :param output_dir: the output directory to write the Singularity file to
    '''
    if os.path.basename(dockerfile_path) == "Dockerfile":
        spec = read_file(dockerfile_path)
        # Use a common mapping
        mapping = get_mapping()
   
        # Put into dict of keys (section titles) and list of commands (values)
        sections = organize_sections(lines=spec,
                                     mapping=mapping)

        # We have to, by default, add the Docker bootstrap
        sections["bootstrap"] = ["docker"]

        # Put into one string based on "order" variable in mapping
        build_file = print_sections(sections=sections,
                                    mapping=mapping)
        if output_dir != None:
            write_file("%s/Singularity" %(output_dir),build_file)
            print("Singularity spec written to %s" %(output_dir))
        return build_file

    # If we make it here, something didn't work
    logger.error("Could not find %s, exiting.", dockerfile_path)
    return sys.exit(1)
Example #28
0
    def releases(self, qs):
        if 'offset' in qs:
            offset = int(qs['offset'][0])
        else:
            offset = 0

        project_name = qs['module'][0]

        if project_name == PUPPET_PROJECT_ERROR:
            # Simulate errors
            filename = PUPPET_RELEASES_ERROR
        elif project_name == PUPPET_PROJECT_NO_NAME:
            # Send release without name
            filename = PUPPET_RELEASES_NO_NAME
        elif project_name == PUPPET_PROJECT_VAGRANT:
            filename = PUPPET_RELEASES_VAGRANT
        elif offset < 20:
            filename = PUPPET_RELEASES_STDLIB_1
        else:
            filename = PUPPET_RELEASES_STDLIB_2

        filepath = os.path.join(TEST_FILES_DIRNAME, filename)
        json = read_file(filepath)

        self.send_response(200, 'Ok')
        self.end_headers()
        self.wfile.write(json)
Example #29
0
 def check_metalink(self, metalink, base_url, associated_task=None):
     if self.info.skip_md5_check:
         return True
     url = base_url +"/" + self.info.distro.metalink_md5sums
     metalink_md5sums = downloader.download(url, self.info.install_dir, web_proxy=self.info.web_proxy)
     url = base_url +"/" + self.info.distro.metalink_md5sums_signature
     metalink_md5sums_signature = downloader.download(url, self.info.install_dir, web_proxy=self.info.web_proxy)
     if not verify_gpg_signature(metalink_md5sums, metalink_md5sums_signature, self.info.trusted_keys):
         log.error("Could not verify signature for metalink md5sums")
         return False
     md5sums = read_file(metalink_md5sums)
     log.debug("metalink md5sums:\n%s" % md5sums)
     md5sums = dict([reversed(line.split()) for line in md5sums.replace('*','').split('\n') if line])
     hashsum = md5sums.get(os.path.basename(metalink))
     if not hashsum:
         log.error("Could not find %s in metalink md5sums)" % os.path.basename(metalink))
         return False
     hash_len = len(hashsum)*4
     if hash_len == 160:
         hash_name = 'sha1'
     elif hash_len in [224, 256, 384, 512]:
         hash_name = 'sha' + str(hash_len)
     else:
         hash_name = 'md5'
     hashsum2 = get_file_hash(metalink, hash_name)
     if hashsum != hashsum2:
         log.error("The %s of the metalink does not match (%s != %s)" % (hash_name, hashsum, hashsum2))
         return False
     return True
Example #30
0
def do_process_file(filename):
    errors = 0
    output = [('Validation of file '+filename, markup.h2)]
    for file_format, z_filename, data in read_file(filename):
        LogOptions.outfile = []
        out = []
        err = 0
        if z_filename is not None:
            out.append(('Zipped file found: '+z_filename, markup.h3))
        if file_format == 'error':
            out.append(('FATAL: read file error', markup.bad))
            err += 1
        else:
            c = check_file(data)
            out += LogOptions.outfile
            if c:
                out.append(('OK. This file is good', markup.good))
            else:
                errors += 1
                err += 1
                if not options.quiet:
                    out.append(('Some errors found', markup.bad))
        if err or not options.quiet:
            output += out
            output.append(markup.hr)
    return errors, output
Example #31
0
def find_root(file, d, name):
    root = None
    for line in read_file(file):
        if 'root=' or 'root =' in line:
            line = line.split()
            for i, val in enumerate(line):
                if 'root=' in val:
                    root = val.split('=')[-1][:-1]
    if root is None:
        root = 'initial_spectra'
    d[name][root] = {}
    d[name][root]['peaks'] = {}
    return d, root
Example #32
0
def start():
    utils.check_configure()
    if os.path.exists(child_path + "/script_file/routed"):
        os.chdir(child_path + "/script_file")
        routed_ports = utils.read_file("routed_ports").split(',')
        for i in routed_ports:
            utils.check_port_public(i, "jxwy_routed")
        os.chdir(parent_path)
        os.system("./jxwy_routed")
    if os.path.exists(child_path + "/script_file/all"):
        utils.check_single_process_port("jxwy_routed")
        os.chdir(parent_path)
        os.system("./jxwy_routed")
Example #33
0
 def test_make_files_no_json(self):
     """Test make_files() function, no JSON"""
     with utils.TempDir() as tmpdir:
         for f in [
                 os.path.join(tmpdir, x)
                 for x in ['foo.cpp', 'bar.cu', 'baz.py']
         ]:
             utils.write_file(f, "foo")
         setup_cmake_module.make_files(tmpdir)
         self.assertEqual(
             utils.read_file(os.path.join(tmpdir, 'Files.cmake')),
             'set(pyfiles "baz.py")\nset(cppfiles "foo.cpp")\n'
             'set(cudafiles "bar.cu")\n')
Example #34
0
def main():
    """
    Reads all event_data files,
    concatenates them to one dataframe
    and writes them into file.
    """
    print("Reading event_data files...")
    event_data_new = pd.concat([
        utils.read_file(file) for file in utils.get_files("../data/event_data")
    ])
    print("Writing event_data_new file...")
    event_data_new.to_csv("../data/event_data_new.csv", index=False)
    print("Done.")
def test_do_incremental_cshd(options, verified_cshd_name, setup_tmpdir_param):
    tmpdir = setup_tmpdir_param
    root_dir = os.path.join(tmpdir, "tt")
    # use "" as last join to make sure tmpdir_failed_md5 ends in os.sep so it gets treated as
    # dir path and not as file path
    # When using copytree, you need to ensure that src exists and dst does not exist.
    # Even if the top level directory contains nothing, copytree won't work because it
    # expects nothing to be at dst and will create the top level directory itself.
    shutil.copytree(
        os.path.join(TESTS_DIR, "test_incremental_files", "with_cshd"),
        os.path.join(root_dir, ""))

    if options['incremental_skip_unchanged']:
        # modify files and set mtime to original -> they should not be included
        mod1 = os.path.join(root_dir, "new 2.txt")
        with open(mod1, "w") as f:
            f.write("MODIFIED1")
        os.utime(mod1, times=(1524334794.4067194, 1524334794.4067194))
        mod2 = os.path.join(root_dir, "sub1", "sub2", "new 3.txt")
        with open(mod2, "w") as f:
            f.write("MODIFIED2")
        os.utime(mod2, times=(1524334698.6291595, 1524334698.6291595))

    checksume_hlpr = ChecksumHelper(root_dir, hash_filename_filter=None)
    checksume_hlpr.options.update(options)
    assert os.path.isabs(checksume_hlpr.root_dir)

    incremental = checksume_hlpr.do_incremental_checksums("sha512")
    assert incremental is not None
    incremental.write()

    verified_cshd_contents = read_file(
        os.path.join(TESTS_DIR, "test_incremental_files", verified_cshd_name))

    generated_cshd_name = f"tt_{time.strftime('%Y-%m-%d')}.cshd"
    generated_cshd_contents = read_file(
        os.path.join(root_dir, generated_cshd_name))

    compare_lines_sorted(verified_cshd_contents, generated_cshd_contents)
Example #36
0
def upload():
    global new_data, src_data, new_data_filename, src_data_filename
    if 'new_file' in request.files:
        new_file_content = request.files['new_file']
        new_data_filename = new_file_content.filename
        new_data = utils.read_file(new_file_content.read(), new_data_filename.split('.')[-1])
        columns = utils.get_columns(new_data)
        return json.dumps(columns)

    elif 'new_file' in request.form:
        new_data = None
        return "new_data cleared"
    
    if 'src_file' in request.files:
        src_file_content = request.files['src_file']
        src_data_filename = src_file_content.filename
        src_data = utils.read_file(src_file_content.read(), src_data_filename.split('.')[-1])
        return "src_data uploaded: " + src_data_filename

    elif 'src_file' in request.form:
        src_data = None
        return "src_data cleared"
Example #37
0
def solve_from_file(input_file, output_directory, params=[]):
    print('Processing', input_file)

    input_data = utils.read_file(input_file)
    num_of_locations, num_houses, list_locations, list_houses, starting_car_location, adjacency_matrix = data_parser(input_data)
    car_path, drop_offs = solve(list_locations, list_houses, starting_car_location, adjacency_matrix, params=params)

    basename, filename = os.path.split(input_file)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    output_file = utils.input_to_output(input_file, output_directory)

    convertToFile(car_path, drop_offs, output_file, list_locations)
Example #38
0
def configure(args):

    # Get fullpath to each file, and concurrently check that exists
    defaultfile = get_fullpath(args.defaults) # ../src/lib/config_defaults.h
    infile = get_fullpath(args.infile)       # singularity.conf.in

    # Find define statements
    define_re = re.compile("#define ([A-Z_]+) (.*)")

    # Read in input and default files
    defaultfile = read_file(defaultfile)
    data = "".join(read_file(infile))

    # Lookup for values we want replaced
    lookup = {'0':'no',
              '1':'yes'}

    defaults = {}
    # Read in defaults to dictionary
    for line in defaultfile:
        match = define_re.match(line)
        if match:
            key, value = match.groups()

            # Maintain the original default set by user
            defaults[key] = value

            # Use parsed value for final config
            new_value = value.replace('"', '')
            if new_value in lookup:
                new_value = lookup[new_value]
            data = data.replace("@" + key + "@", new_value)

    # Write to output file
    outfile = "%s.tmp" %(args.outfile)
    write_file(outfile,data)
    os.rename(outfile, args.outfile)

    logger.info("*** FINISHED PYTHON CONFIGURATION HELPER ****\n")
Example #39
0
 def test(self, folder_path):
     correct_answers = 0
     total = 0
     for class_id in os.listdir(folder_path):
         for name_document in os.listdir(os.path.join(
                 folder_path, class_id)):
             text_document = utils.read_file(
                 os.path.join(folder_path, class_id, name_document))
             if self.__evaluate(text_document, class_id):
                 correct_answers += 1
             total += 1
     print("Result: %s of %s ==> %s %%" %
           (correct_answers, total, correct_answers * 100 / total))
def test_do_incremental(setup_dir_to_checksum):
    checksume_hlpr, include_unchanged, root_dir = setup_dir_to_checksum
    assert os.path.isabs(checksume_hlpr.root_dir)

    incremental = checksume_hlpr.do_incremental_checksums("sha512",
                                                          single_hash=True)
    assert incremental is not None
    incremental.write()

    if include_unchanged:
        verified_sha_name = "tt_2018-04-22_inc_full.sha512"
    else:
        verified_sha_name = "tt_2018-04-22_inc.sha512"
    verified_sha_contents = read_file(
        os.path.join(TESTS_DIR, "test_incremental_files", verified_sha_name))

    # find written sha (current date is appended)
    generated_sha_name = f"tt_{time.strftime('%Y-%m-%d')}.sha512"
    generated_sha_contents = read_file(
        os.path.join(root_dir, generated_sha_name))

    compare_lines_sorted(verified_sha_contents, generated_sha_contents)
Example #41
0
def start():
    utils.check_configure()
    if os.path.exists(child_path + "/script_file/dbcached"):
        os.chdir(child_path + "/script_file")
        utils.check_port_public(utils.read_file("dbcached_port"),
                                "jxwy_dbcached")
        os.chdir(parent_path)
        os.system("./jxwy_dbcached")
    if os.path.exists(child_path + "/script_file/all") or os.path.exists(
            child_path + "/script_file/game_server"):
        utils.check_single_process_port("jxwy_dbcached")
        os.chdir(parent_path)
        os.system("./jxwy_dbcached")
Example #42
0
 def test_create_runscript(self):
     '''test_create_runscript should ensure that a runscript is generated
     with some command
     '''
     from docker.api import create_runscript
     cmd = "echo 'Hello World'"
     base_dir = tempfile.mkdtemp()
     runscript = create_runscript(cmd=cmd, base_dir=base_dir)
     self.assertTrue(os.path.exists(runscript))
     generated_cmd = read_file(runscript)[0]
     # Commands are always in format exec [] "$@"
     # 'exec echo \'Hello World\' "$@"'
     self.assertEqual('exec %s "$@"' % cmd, generated_cmd)
Example #43
0
def tests(input_file, params=[]):
    input_data = utils.read_file(input_file)
    number_of_kingdoms, list_of_kingdom_names, starting_kingdom, adjacency_matrix = data_parser(
        input_data)

    if (int(input_data[0][0]) not in RANGE_OF_INPUT_SIZES):
        print(
            f'Your input does not belong to the valid range of input sizes ({RANGE_OF_INPUT_SIZES})'
        )

    if not len(list_of_kingdom_names) == number_of_kingdoms:
        print(
            f'The number of kingdom names you listed ({len(list_of_kingdom_names)}) differs from the number you gave on the first line ({number_of_kingdoms})'
        )

    if not len(adjacency_matrix) == len(
            adjacency_matrix[0]) == number_of_kingdoms:
        print(
            f'The dimensions of your adjacency matrix ({len(adjacency_matrix)} x {len(adjacency_matrix[0])}) do not match the number of kingdoms you provided ({number_of_kingdoms})'
        )

    if not all(entry == 'x' or (type(entry) is float and entry > 0 and
                                entry <= 2e9 and decimal_digits_check(entry))
               for row in adjacency_matrix for entry in row):
        print(
            f'Your adjacency matrix may only contain the character "x", or strictly positive integers less than 2e+9, or strictly positive floats with less than 5 decimal digits'
        )

    if not len(set(list_of_kingdom_names)) == len(list_of_kingdom_names):
        print('Your kingdom names are not distinct')

    if not all(name.isalnum() and len(name) <= MAX_NAME_LENGTH
               for name in list_of_kingdom_names):
        print(
            f'One or more of your kingdom names are either not alphanumeric or are above the max length of {MAX_NAME_LENGTH}'
        )

    if not starting_kingdom in list_of_kingdom_names:
        print('Your starting kingdom is not in the list of kingdom names')

    G = adjacency_matrix_to_graph(adjacency_matrix)

    if not nx.is_connected(G):
        print('Your graph is not connected')

    if not is_metric(G):
        print('Your graph is not metric')

    print(
        "If you've received no other error messages, then your input is valid!"
    )
Example #44
0
def build_image_with_heat(openstack_client, image_name, flavor_name,
                          dns_nameservers):
    template = None
    template_filename = cfg.CONF.image_builder_template
    try:
        am = lambda f: config.IMAGE_BUILDER_TEMPLATES + '%s.yaml' % f
        template = utils.read_file(template_filename, alias_mapper=am)
    except IOError:
        LOG.error(
            'Error reading template file: %s. '
            'Please verify correctness of --image-builder-template '
            'parameter', template_filename)
        exit(1)
    external_net = (cfg.CONF.external_net
                    or neutron.choose_external_net(openstack_client.neutron))
    stack_name = 'shaker_%s' % uuid.uuid4()
    stack_parameters = {
        'external_net': external_net,
        'flavor': flavor_name,
        'dns_nameservers': dns_nameservers
    }
    stack_id = None
    try:
        stack_id = heat.create_stack(openstack_client.heat, stack_name,
                                     template, stack_parameters)

        outputs = heat.get_stack_outputs(openstack_client.heat, stack_id)
        LOG.debug('Stack outputs: %s', outputs)

        LOG.debug('Waiting for server to shutdown')
        server_id = outputs['server_info'].get('id')
        nova.wait_server_shutdown(openstack_client.nova, server_id)

        LOG.debug('Making snapshot')
        openstack_client.nova.servers.create_image(server_id, image_name)

        LOG.debug('Waiting for server to snapshot')
        nova.wait_server_snapshot(openstack_client.nova, server_id)

        LOG.info('Created image: %s', image_name)
    except BaseException as e:
        if isinstance(e, KeyboardInterrupt):
            LOG.info('Caught SIGINT. Terminating')
        else:
            error_msg = 'Error while building the image: %s' % e
            LOG.error(error_msg)
            LOG.exception(e)
    finally:
        if stack_id and cfg.CONF.cleanup_on_exit:
            LOG.debug('Cleaning up the stack: %s', stack_id)
            openstack_client.heat.stacks.delete(stack_id)
Example #45
0
def compare_wrapper():
    specific_path = os.path.join(consts.DATA_DIR, "specific.pickle")
    generic_path = os.path.join(consts.DATA_DIR, "generic.pickle")

    if not os.path.exists(specific_path):
        extract_all_info(consts.CLASSIFIED_PATH, specific_path)

    if not os.path.exists(generic_path):
        extract_all_info(consts.CLASSIFIED_PATH, generic_path)

    specresults = read_file(specific_path)
    genresults = read_file(generic_path)

    results = extraction_score(specresults, genresults)

    order = [
        'Media', 'Title', 'Rating', 'Genre', 'Director', 'Date', 'Box Office',
        'Runtime'
    ]

    rows = [["Atributo", "", ""], ["", "", ""]]

    for label in order:
        rows.append([label, "", ""])
        rows.append(["", "Possui Atributo", "Nao Possui Atributo"])
        rows.append(
            ["Retornou Atributo", results[label]['TP'], results[label]['FP']])
        rows.append([
            "Nao Retornou Atributo", results[label]['FN'], results[label]['TN']
        ])
        rows.append(["", "", ""])
        rows.append(["Precision", results[label]['precision'], ""])
        rows.append(["Recall", results[label]['recall'], ""])
        rows.append(["Accuracy", results[label]['accuracy'], ""])
        rows.append(["", "", ""])
        rows.append(["", "", ""])

    save_csv(os.path.join(consts.RESULTS_DIR, "wrapper_results.csv"), rows)
Example #46
0
 def create_preseed(self):
     template_file = join_path(self.info.data_dir, 'preseed.' + self.info.distro.name)
     if not os.path.exists(template_file):
         template_file = join_path(self.info.data_dir, 'preseed.lupin')
     template = read_file(template_file)
     if self.info.distro.packages:
         distro_packages_skip = ''
     else:
         distro_packages_skip = '#'
     partitioning = ""
     partitioning += "d-i partman-auto/disk string LIDISK\n"
     partitioning += "d-i partman-auto/method string loop\n"
     partitioning += "d-i partman-auto-loop/partition string LIPARTITION\n"
     partitioning += "d-i partman-auto-loop/recipe string \\\n"
     disks_dir = unix_path(self.info.disks_dir) + '/'
     if self.info.root_size_mb:
         partitioning += '  %s 3000 %s %s $default_filesystem method{ format } format{ } use_filesystem{ } $default_filesystem{ } mountpoint{ / } . \\\n' \
         %(disks_dir + 'root.disk', self.info.root_size_mb, self.info.root_size_mb)
     if self.info.swap_size_mb:
         partitioning += '  %s 100 %s %s linux-swap method{ swap } format{ } . \\\n' \
         %(disks_dir + 'swap.disk', self.info.swap_size_mb, self.info.swap_size_mb)
     if self.info.home_size_mb:
         partitioning += '  %s 100 %s %s $default_filesystem method{ format } format{ } use_filesystem{ } $default_filesystem{ } mountpoint{ /home } . \\\n' \
         %(disks_dir + 'home.disk', self.info.home_size_mb, self.info.home_size_mb)
     if self.info.usr_size_mb:
         partitioning += '  %s 100 %s %s $default_filesystem method{ format } format{ } use_filesystem{ } $default_filesystem{ } mountpoint{ /usr } . \\\n' \
         %(disks_dir + 'usr.disk', self.info.usr_size_mb, self.info.usr_size_mb)
     partitioning += "\n"
     safe_host_username = self.info.host_username.replace(" ", "+")
     user_directory = self.info.user_directory.replace("\\", "/")[2:]
     host_os_name = "Windows XP Professional" #TBD
     password = md5_password(self.info.password)
     dic = dict(
         timezone = self.info.timezone,
         password = password,
         user_full_name = self.info.user_full_name,
         distro_packages_skip  = distro_packages_skip,
         distro_packages = self.info.distro.packages,
         host_username = self.info.host_username,
         username = self.info.username,
         partitioning = partitioning,
         user_directory = user_directory,
         safe_host_username = safe_host_username,
         host_os_name = host_os_name,)
     content = template
     for k,v in dic.items():
         k = "$(%s)" % k
         content = content.replace(k, v)
     preseed_file = join_path(self.info.custominstall, "preseed.cfg")
     write_file(preseed_file, content)
Example #47
0
    def test_read_file_with_expected_results_test(self):
        dir = os.path.dirname(__file__)
        filename = os.path.join(dir, './resources/test_file.csv')
        print filename
        list_from_file = utils.read_file(filename)
        list_expected = dict()
        list_expected['2016-01-01 03:00:00'] = 'not_completed'
        list_expected['2016-01-01 02:45:00'] = 'not_completed'
        list_expected['2016-01-01 08:15:00'] = 'not_completed'
        list_expected['2016-01-01 04:00:00'] = 'completed'

        self.assertEquals(
            len(set(list_from_file.items()) & set(list_expected.items())),
            len(list_from_file))
Example #48
0
def dive_2():
    cmds = utils.read_file("day02").split("\n")
    hor = 0
    depth = 0
    aim = 0
    for cmd in cmds:
        if "forward" in cmd:
            hor += int(cmd[-1])
            depth += aim * int(cmd[-1])
        elif "up" in cmd:
            aim -= int(cmd[-1])
        elif "down" in cmd:
            aim += int(cmd[-1])
    return hor * depth
Example #49
0
def app(tmpdir, request):

    port = 8000
    # this is to make sure sqlite is initialized
    # for every unit test
    reload(default_settings)

    # fresh files before running
    for filename in ['auth.sq3', 'index.sq3', 'alias.sq3']:
        if os.path.exists(filename):
            os.remove(filename)
    indexd_app = get_indexd_app()

    indexd_init(*SIGNPOST['auth'])
    indexd = Process(target=indexd_app.run, args=['localhost', port])
    indexd.start()
    wait_for_indexd_alive(port)

    gencode_json = tmpdir.mkdir("slicing").join("test_gencode.json")
    gencode_json.write(
        json.dumps({
            'a_gene': ['chr1', None, 200],
            'b_gene': ['chr1', 150, 300],
            'c_gene': ['chr1', 200, None],
            'd_gene': ['chr1', None, None],
        }))

    def teardown():
        for filename in ['auth.sq3', 'index.sq3', 'alias.sq3']:
            if os.path.exists(filename):
                os.remove(filename)

        indexd.terminate()
        wait_for_indexd_not_alive(port)

    _app.config.from_object("sheepdog.test_settings")

    request.addfinalizer(teardown)

    app_init(_app)
    dictionary_setup(_app)

    _app.logger.setLevel(os.environ.get("GDC_LOG_LEVEL", "WARNING"))

    _app.jwt_public_keys = {
        _app.config['USER_API']: {
            'key-test': utils.read_file('resources/keys/test_public_key.pem')
        }
    }
    return _app
Example #50
0
def read_file(file):
    file_content = []
    f = utils.read_file(file)
    data = f.next()
    file_content.append([data, 0])

    while True:
        try:
            data = f.next()  # generate next chunk
            file_content.append([data, 1])  # invoked by data from above
        except StopIteration:
            print('file reached end')
            break
    return file_content
    def test_it_returns_last_by_date_log_file_from_dir(self):
        old_log_path = self.test_environment.create_log_file_and_get_its_path(
            name=self._TEST_LOGS_NAMES['2017.06.29'])
        new_log_path = self.test_environment.create_log_file_and_get_its_path(
            name=self._TEST_LOGS_NAMES['2017.06.30'])
        expected_log_content = read_file(path=new_log_path)

        log_dir = self.test_environment.get_log_dir()
        report_dir = self.test_environment.get_report_dir()
        log_files_handler = LogFilesHandler(log_dir=log_dir,
                                            report_dir=report_dir)
        returned_log_content = log_files_handler.get_file_to_parse().content

        self.assertListEqual(expected_log_content, list(returned_log_content))
Example #52
0
    def get_cached_auth_token(self):
        """ Return auth_token cached in .pantri_auth_token """

        # Return cached auth token.
        auth_token_cache = os.path.join(self.git_path, '.pantri_auth_token')
        if os.path.exists(auth_token_cache):
            try:
                auth_token = json.loads(
                    utils.read_file(auth_token_cache))['auth_token']
                return auth_token
            except:
                pass

        return None
Example #53
0
def import_admin_keys(gpg, indir, admins):
    fingerprints = []
    for admin in admins:
        key = read_file(os.path.join(indir, admin.get('keyfile')))
        import_result = gpg.import_keys(key)
        if import_result.count < 1:
            print('could not import admin key for {}'.format(
                admin.get('name')))
            continue

        fingerprints.append(import_result.fingerprints[0])
        print('Imported admin key for {}, fingerprint: {}'.format(
            admin.get('name'), import_result.fingerprints[0]))
    return fingerprints
Example #54
0
def submitter(encoded_jwt):
    private_key = utils.read_file('resources/keys/test_private_key.pem')
    # set up a fake User object which has all the attributes needed
    # to generate a token
    user_properties = {
        'id': 1,
        'username': '******',
        'is_admin': False,
        'policies': [],
        'google_proxy_group_id': None,
    }
    user = type('User', (object, ), user_properties)
    token = encoded_jwt(private_key, user)
    return {'Authorization': 'bearer ' + token}
Example #55
0
def get_teams():
    if is_production():
        teams = read_csv('/home/guerredesclans/mysite/teams.csv')
        teams_points = read_file('/home/guerredesclans/mysite/teams_points')
    else:
        teams = read_csv('teams.csv')
        teams_points = read_file('teams_points')

    s = ""
    for l in teams_points:
        s += l

    c = Counter(s.split(";"))

    for t in teams:
        team_id = t[4]
        t.insert(4, c[team_id])
    try:
        teams.sort(key=lambda x: int(x[3]), reverse=True)
    except:
        print("Int error")

    return teams
Example #56
0
 def test_run_error(self):
     """Test cleanup_code handling of subprocess error"""
     with utils.TempDir() as tmpdir:
         env = self.make_dummy_format('autopep8', tmpdir, exitval=1)
         pyfile = os.path.join(tmpdir, 'test.py')
         utils.write_file(pyfile, 'bar')
         p = subprocess.Popen([cleanup_py, '-a', '--all'],
                              cwd=tmpdir,
                              env=env)
         stdout, stderr = p.communicate()
         # error should be caught and not fail entire job
         self.assertEqual(p.returncode, 0)
         # file should be unchanged
         self.assertEqual(utils.read_file(pyfile), 'bar')
Example #57
0
 def test_python_autopep8(self):
     """Test cleanup_code script on Python files with autopep8."""
     for args in ['--all', 'test.py']:
         with utils.TempDir() as tmpdir:
             env = self.make_dummy_format('autopep8', tmpdir)
             pyfile = os.path.join(tmpdir, 'test.py')
             utils.write_file(pyfile, 'def foo():\n  bar\n')
             p = subprocess.Popen([cleanup_py, '-a', '-v', args],
                                  cwd=tmpdir,
                                  env=env)
             stdout, stderr = p.communicate()
             self.assertEqual(p.returncode, 0)
             # dummy autopep8 should have written out 'foo'
             self.assertEqual(utils.read_file(pyfile), 'foo\n')
Example #58
0
def process_file(filename):
    # process one file
    LogOptions.filename = os.path.abspath(filename)
    for file_format, z_filename, data in read_file(filename):
        LogOptions.z_filename = z_filename
        Stat.total += 1
        if file_format == 'error':
            Stat.not_xml += 1
            print_log('FATAL: read file error', level=3)
            continue
        if not check_xml(data):
            Stat.not_xml += 1
            continue
        calc_statistics(data)
Example #59
0
def admin(encoded_jwt):
    private_key = utils.read_file('resources/keys/test_private_key.pem')
    project_ids = ['phs000218', 'phs000235', 'phs000178']
    user_properties = {
        'id': 2,
        'username': '******',
        'is_admin': True,
        'project_access': {project: ROLES.values() for project in project_ids},
        'policies': [],
        'google_proxy_group_id': None,
    }
    user = type('User', (object,), user_properties)
    token = encoded_jwt(private_key, user)
    return {'Authorization': 'bearer ' + token}
Example #60
0
    def test_get_payroll_report_42(self):
        filename = 'time-report-42.csv'
        with open(filename, 'rb') as csvFile:
            data = {'file': (filename, csvFile, 'text/csv')}
            _, _, data_ = utils.read_file(csvFile)
        csvFile.close()

        status, msg, df = utils.parse_employee_logs(data_, 42)
        status, msg = self.model.insert_csv(df, 42)

        rows = self.model.get_logs()
        status, payrollReport = utils.make_payroll_report(rows)
        # app.logger.info(json.dumps(payrollReport, indent = 4))
        self.assertEqual(status, 200)