def test_one_replacement(self):
     """Test a target with a single replacement editor
     Before the test execution we copy the target file from a sample each
     time. Target will be removed on successful test execution. On
     failure it will be kept for debugging and overwritten on next test
     execution (no checks)
     """
     sample = os.path.join(self.test_files_dir, "one_replacement.sample")
     targetfile = os.path.join(self.test_files_dir, "one_replacement.conf")
     expected = os.path.join(self.test_files_dir,
                             "one_replacement.expected")
     thisincr = 10
     backupfile = "{0}.backup.{1}".format(targetfile, thisincr)
     shutil.copy(sample, targetfile)
     tgt = target.Target()
     tgt.targetfile = targetfile
     tgt.edlist.append(editor.Editor("replace", 10,
                                     ("email", "*****@*****.**")))
     thisincr = mock.Mock()
     thisincr.is_current.return_value = True
     thisincr.__int__ = mock.Mock()
     thisincr.__int__.return_value = 10
     tgt.run_editors(thisincr)
     result = filecmp.cmp(targetfile, expected)
     self.assertTrue(result)
     if not result:
         print >> sys.stderr, "Failed result in {}".format(targetfile)
     else:
         os.unlink(targetfile)
     self.assertTrue(filecmp.cmp(backupfile, sample))
     os.unlink(backupfile)
  def test_SetUpInDirAndFsConfig_WithRootFsConfig(self):
    root_dir = common.MakeTempDir()
    with open(os.path.join(root_dir, 'init'), 'w') as init_fp:
      init_fp.write('init')

    origin_in = common.MakeTempDir()
    with open(os.path.join(origin_in, 'file'), 'w') as in_fp:
      in_fp.write('system-file')
    os.symlink('../etc', os.path.join(origin_in, 'symlink'))

    fs_config_system = self._gen_fs_config('system')
    fs_config_root = self._gen_fs_config('root')

    prop_dict = {
        'fs_config': fs_config_system,
        'mount_point': 'system',
        'root_dir': root_dir,
        'root_fs_config': fs_config_root,
    }
    in_dir, fs_config = SetUpInDirAndFsConfig(origin_in, prop_dict)

    self.assertTrue(filecmp.cmp(
        os.path.join(in_dir, 'init'), os.path.join(root_dir, 'init')))
    self.assertTrue(filecmp.cmp(
        os.path.join(in_dir, 'system', 'file'),
        os.path.join(origin_in, 'file')))
    self.assertTrue(os.path.islink(os.path.join(in_dir, 'system', 'symlink')))

    with open(fs_config) as fs_config_fp:
      fs_config_data = fs_config_fp.readlines()
    self.assertIn('fs-config-system\n', fs_config_data)
    self.assertIn('fs-config-root\n', fs_config_data)
    self.assertEqual('/', prop_dict['mount_point'])
 def test_compressor_nominify(self):
     comm.clear_compressor()
     compre = ""
     comm.compressor(compre, self)
     self.assertTrue(filecmp.cmp(comm.compDir + "script.js", comm.oriDir + "script.js"))
     self.assertTrue(filecmp.cmp(comm.compDir + "style.css", comm.oriDir + "style.css"))
     comm.clear_compressor()
 def test_two_replacements(self):
     """Test a target with a two replacement editors
     Basically copied from test_one_replacement, did not bother to factor
     out common code.
     """
     sample = os.path.join(self.test_files_dir, "two_replacements.sample")
     targetfile = os.path.join(self.test_files_dir, "two_replacements.conf")
     expected = os.path.join(self.test_files_dir,
                             "two_replacements.expected")
     thisincr = mock.Mock()
     thisincr.is_current.return_value = True
     thisincr.__int__ = mock.Mock()
     thisincr.__int__.return_value = 22
     backupfile = "{0}.backup.{1}".format(targetfile, int(thisincr))
     shutil.copy(sample, targetfile)
     tgt = target.Target()
     tgt.targetfile = targetfile
     tgt.edlist.append(editor.Editor("replace", 22,
                                     ("email", "*****@*****.**")))
     tgt.edlist.append(editor.Editor("replace", 22,
                                     ("myip", "1.2.3.4")))
     tgt.run_editors(thisincr)
     result = filecmp.cmp(targetfile, expected)
     self.assertTrue(result)
     if not result:
         print >> sys.stderr, "Failed result in {}".format(targetfile)
     else:
         os.unlink(targetfile)
     self.assertTrue(filecmp.cmp(backupfile, sample))
     os.unlink(backupfile)
Beispiel #5
0
    def test_minimap_reads_to_all_ref_seqs(self):
        '''test test_minimap_reads_to_all_ref_seqs'''
        clusters_tsv = os.path.join(data_dir, 'clusters_minimap_reads_to_all_refs.clstrs.tsv')
        ref_fasta = os.path.join(data_dir, 'clusters_minimap_reads_to_all_refs.ref.fa')
        reads_1 = os.path.join(data_dir, 'clusters_minimap_reads_to_all_refs.reads_1.fq')
        reads_2 = os.path.join(data_dir, 'clusters_minimap_reads_to_all_refs.reads_2.fq')
        tmp_outprefix = 'tmp.clusters_test_minimap_reads_to_all_ref_seqs'
        clusters.Clusters._minimap_reads_to_all_ref_seqs(clusters_tsv, ref_fasta, reads_1, reads_2, tmp_outprefix)
        expected_cluster2rep = os.path.join(data_dir, 'clusters_minimap_reads_to_all_refs.out.clstr2rep')
        expected_cluster_counts = os.path.join(data_dir, 'clusters_minimap_reads_to_all_refs.out.clstr_count')
        expected_proper_pairs = os.path.join(data_dir, 'clusters_minimap_reads_to_all_refs.out.pairs')
        expected_insert_hist = os.path.join(data_dir, 'clusters_minimap_reads_to_all_refs.out.hist')

        # not sure that the reads order is preserved, so just check read store file exists
        self.assertTrue(os.path.exists(os.path.join(tmp_outprefix + '.reads')))

        self.assertTrue(filecmp.cmp(expected_cluster2rep, tmp_outprefix + '.cluster2representative', shallow=False))
        self.assertTrue(filecmp.cmp(expected_cluster_counts, tmp_outprefix + '.clusterCounts', shallow=False))
        self.assertTrue(filecmp.cmp(expected_proper_pairs, tmp_outprefix + '.properPairs', shallow=False))
        self.assertTrue(filecmp.cmp(expected_insert_hist, tmp_outprefix + '.insertHistogram', shallow=False))
        os.unlink(tmp_outprefix + '.cluster2representative')
        os.unlink(tmp_outprefix + '.clusterCounts')
        os.unlink(tmp_outprefix + '.properPairs')
        os.unlink(tmp_outprefix + '.insertHistogram')
        os.unlink(tmp_outprefix + '.reads')
def test_uploadFileEntity():
    # Create a FileEntity
    # Dictionaries default to FileEntity as a type
    fname = utils.make_bogus_data_file()
    schedule_for_cleanup(fname)
    entity = {'name'        : 'fooUploadFileEntity', \
              'description' : 'A test file entity', \
              'parentId'    : project['id']}
    entity = syn.uploadFile(entity, fname)

    # Download and verify
    entity = syn.downloadEntity(entity)
    assert entity['files'][0] == os.path.basename(fname)
    assert filecmp.cmp(fname, entity['path'])

    # Check if we upload the wrong type of file handle
    fh = syn.restGET('/entity/%s/filehandles' % entity.id)['list'][0]
    assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'

    # Create a different temporary file
    fname = utils.make_bogus_data_file()
    schedule_for_cleanup(fname)

    # Update existing FileEntity
    entity = syn.uploadFile(entity, fname)

    # Download and verify that it is the same file
    entity = syn.downloadEntity(entity)
    assert entity['files'][0] == os.path.basename(fname)
    assert filecmp.cmp(fname, entity['path'])
Beispiel #7
0
def tst_link(mnt_dir):
    name1 = pjoin(mnt_dir, name_generator())
    name2 = pjoin(mnt_dir, name_generator())
    shutil.copyfile(TEST_FILE, name1)
    assert filecmp.cmp(name1, TEST_FILE, False)

    fstat1 = os.lstat(name1)
    assert fstat1.st_nlink == 1

    os.link(name1, name2)

    fstat1 = os.lstat(name1)
    fstat2 = os.lstat(name2)
    for attr in ('st_mode', 'st_dev', 'st_uid', 'st_gid',
                 'st_size', 'st_atime', 'st_mtime', 'st_ctime'):
        assert getattr(fstat1, attr) == getattr(fstat2, attr)
    assert os.path.basename(name2) in os.listdir(mnt_dir)
    assert filecmp.cmp(name1, name2, False)

    os.unlink(name2)

    assert os.path.basename(name2) not in os.listdir(mnt_dir)
    with pytest.raises(FileNotFoundError):
        os.lstat(name2)

    os.unlink(name1)
def test_MaterialPackageWrangler_make_package_09():
    r'''Makes handmade package. Copies canned material definition py.
    Makes output py. Corrupts output py. Makes sure score manager
    starts when output py is corrupt. Removes package.
    '''

    with systemtools.FilesystemState(remove=[path]):
        input_ = 'mm new testnotes y q'
        ide._run(input_=input_)
        assert os.path.exists(path)
        assert os.path.exists(definition_py_path)
        assert not os.path.exists(output_py_path)
        shutil.copyfile(
            boilerplate_definition_py_path,
            definition_py_path,
            )
        assert filecmp.cmp(
            definition_py_path,
            boilerplate_definition_py_path,
            )
        input_ = 'mm testnotes dp y q'
        ide._run(input_=input_)
        assert os.path.exists(output_py_path)
        assert not filecmp.cmp(output_py_path, exception_file_path)
        shutil.copyfile(exception_file_path, output_py_path)
        assert filecmp.cmp(output_py_path, exception_file_path)
        input_ = 'mm rm testnotes remove q'
        ide._run(input_=input_)
        assert not os.path.exists(path)
Beispiel #9
0
  def make_makefile(directory):
    """
    Make makefile into a selected directory.

    Parameters
    ----------
    directory : str
      relative path to tested directory
    """
    print("Testing " + directory)
    make_ok = False
    old_pwd = os.getcwd()
    os.chdir(os.path.dirname(os.path.abspath(__file__)) + '/' + directory)
    run_fobis(fake_args=['build', '-f', 'fobos', '-m', 'makefile_check'])
    make_ok = filecmp.cmp('makefile_check', 'makefile_ok')
    if not make_ok:
      if os.path.exists('makefile_ok2'):
        make_ok = filecmp.cmp('makefile_check', 'makefile_ok2')
      if not make_ok:
        print('makefile generated')
        with open('makefile_check', 'r') as mk_check:
          print(mk_check.read())
    run_fobis(fake_args=['clean', '-f', 'fobos'])
    os.chdir(old_pwd)
    return make_ok
  def testOutsideChrootDifferentTokenFile(self):
    mocked_outsidechroot = self._MockOutsideChroot('foo')

    self.mox.StubOutWithMock(cgt, '_ChrootPathToExternalPath')
    self.mox.StubOutWithMock(os.path, 'exists')
    self.mox.StubOutWithMock(shutil, 'copy2')
    self.mox.StubOutWithMock(filecmp, 'cmp')
    self.mox.StubOutWithMock(build_lib, 'RunCommand')
    cmd = ['check_gdata_token', 'foo']
    run_result = cros_test_lib.EasyAttr(returncode=0)

    # Create replay script.
    build_lib.RunCommand(cmd, enter_chroot=True,
                         print_cmd=False,
                         error_code_ok=True).AndReturn(run_result)
    cgt._ChrootPathToExternalPath(cgt.TOKEN_FILE).AndReturn('chr-tok')
    os.path.exists('chr-tok').AndReturn(True)
    os.path.exists(cgt.TOKEN_FILE).AndReturn(True)
    filecmp.cmp(cgt.TOKEN_FILE, 'chr-tok').AndReturn(False)
    shutil.copy2('chr-tok', cgt.TOKEN_FILE)
    self.mox.ReplayAll()

    # Run test verification.
    with self.OutputCapturer():
      cgt.OutsideChroot.Run(mocked_outsidechroot)
    self.mox.VerifyAll()
    def test_variations(self):
        """Adds image and checks filesystem as well as width and height."""
        instance = ResizeModel.objects.create(
            image=self.fixtures['600x400.jpg']
        )

        source_file = os.path.join(FIXTURE_DIR, '600x400.jpg')

        self.assertTrue(os.path.exists(os.path.join(IMG_DIR, 'image.jpg')))
        self.assertEqual(instance.image.width, 600)
        self.assertEqual(instance.image.height, 400)
        path = os.path.join(IMG_DIR, 'image.jpg')
        assert filecmp.cmp(source_file, path)

        path = os.path.join(IMG_DIR, 'image.medium.jpg')
        assert os.path.exists(path)
        self.assertEqual(instance.image.medium.width, 400)
        self.assertLessEqual(instance.image.medium.height, 400)
        self.assertFalse(filecmp.cmp(
            source_file,
            os.path.join(IMG_DIR, 'image.medium.jpg')))

        self.assertTrue(os.path.exists(
            os.path.join(IMG_DIR, 'image.thumbnail.jpg'))
        )
        self.assertEqual(instance.image.thumbnail.width, 100)
        self.assertLessEqual(instance.image.thumbnail.height, 75)
        self.assertFalse(filecmp.cmp(
            source_file,
            os.path.join(IMG_DIR, 'image.thumbnail.jpg'))
        )
Beispiel #12
0
def compare_files(tshark_bin, tmpdir, tshark_cmp, num_procs, max_files, cap_files):
    pool = multiprocessing.Pool(num_procs)
    results_bin = [pool.apply_async(dissect_file_process, [tshark_bin, tmpdir, file]) for file in cap_files]
    results_cmp = [pool.apply_async(dissect_file_process, [tshark_cmp, tmpdir, file]) for file in cap_files]
    try:
        for (cur_item_idx,(result_async_bin, result_async_cmp)) in enumerate(zip(results_bin, results_cmp)):
            file_result_bin = result_async_bin.get()
            file_result_cmp = result_async_cmp.get()
            if file_result_cmp[1] is False or file_result_bin[1] is False:
                action = "FAILED (exitcode)"
            if not filecmp.cmp(file_result_bin[2], file_result_cmp[2]):
                action = "FAILED (stdout)"
            if not filecmp.cmp(file_result_bin[3], file_result_cmp[3]):
                action = "FAILED (stderr)"
            else:
                action = "PASSED"
                os.remove(file_result_bin[2])
                os.remove(file_result_cmp[2])
                os.remove(file_result_bin[3])
                os.remove(file_result_cmp[3])

            print "%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result_bin[0], os.path.getsize(file_result_bin[0]))
            print "%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result_cmp[0], os.path.getsize(file_result_cmp[0]))
    except KeyboardInterrupt:
        print "%s was interrupted by user" % (sys.argv[0])
        pool.terminate()
        exit(1)
Beispiel #13
0
    def test_Branch(self):
        """
        Test Branch and Branch.run.
        Note that fuzzy junctions are not merged.
        """
        test_name = "test_branch"
        good_gff_fn = op.join(_OUT_DIR_, test_name + ".good.gff.unfuzzy")
        bad_gff_fn = op.join(_OUT_DIR_, test_name + ".bad.gff.unfuzzy")
        group_fn = op.join(_OUT_DIR_, test_name + ".group.txt.unfuzzy")

        rmpath(good_gff_fn)
        rmpath(bad_gff_fn)
        rmpath(group_fn)

        b = Branch(isoform_filename=READS_DS, sam_filename=SORTED_GMAP_SAM,
                   cov_threshold=2, min_aln_coverage=0.99, min_aln_identity=0.95)

        b.run(allow_extra_5exon=True, skip_5_exon_alt=False,
              ignored_ids_fn=None,
              good_gff_fn=good_gff_fn,
              bad_gff_fn=bad_gff_fn,
              group_fn=group_fn)

        self.assertTrue(op.exists(good_gff_fn))
        self.assertTrue(op.exists(bad_gff_fn))
        self.assertTrue(op.exists(group_fn))

        std_good_gff_fn = op.join(SIV_STD_DIR, "test_branch", test_name + ".good.gff.unfuzzy")
        std_bad_gff_fn = op.join(SIV_STD_DIR, "test_branch", test_name + ".bad.gff.unfuzzy")
        std_group_fn = op.join(SIV_STD_DIR, "test_branch", test_name + ".group.txt.unfuzzy")

        print "Comparing %s and %s"  %  (good_gff_fn, std_good_gff_fn)
        self.assertTrue(filecmp.cmp(good_gff_fn, std_good_gff_fn))
        self.assertTrue(filecmp.cmp(bad_gff_fn, std_bad_gff_fn))
        self.assertTrue(filecmp.cmp(group_fn, std_group_fn))
Beispiel #14
0
def main():
    cdcVisLinks = getVisLinks()
    global modifiedPage
    for link in cdcVisLinks:
         visPageUrl = 'http://www.cdc.gov/vaccines/hcp/vis/vis-statements/' + link
         visPageReq = urllib.request.Request(visPageUrl)
         visPageResp = urllib.request.urlopen(visPageReq)
         visPageRespData = visPageResp.read()
         downloadPage = re.findall(r'href="/vaccines/hcp/vis/vis-statements/(.*?.pdf)"',str(visPageRespData))
         downloadLink = 'http://www.cdc.gov/vaccines/hcp/vis/vis-statements/' + downloadPage[0]
         urllib.request.urlretrieve(downloadLink,downloadPage[0]) # Download VIS PDF
         pdfFileName = downloadPage[0]
         if os.path.isfile(currentFilePath+'/lib/'+downloadPage[0]) == True:
              #print downloadPage[0],'File exists in ./lib/ folder'
              if filecmp.cmp(downloadPage[0],currentFilePath+'/lib/'+downloadPage[0]) == True:
                   subprocess.call(["rm",downloadPage[0]])
              elif filecmp.cmp(downloadPage[0],currentFilePath+'/lib/'+downloadPage[0]) == False:
                   subprocess.call(["mv",downloadPage[0],currentFilePath+'/lib/'])
                   modifiedPage.append(pdfFileName)
                   writeLogEntry(logFileName, [pdfFileName,datetime.datetime.now().strftime("%m/%d/%Y %H:%M:%S")])
         else:
              #print downloadPage[0],'File does not exist in ./lib/ folder'
              subprocess.call(["mv",downloadPage[0],'./lib/'])
              modifiedPage.append(pdfFileName)
              writeLogEntry(logFileName, [pdfFileName,datetime.datetime.now().strftime("%m/%d/%Y %H:%M:%S")])
Beispiel #15
0
    def _copy_file(self, filename, destination):
        filepath = os.path.join(THIS_FOLDER, filename)

        def do_it():
            shutil.copyfile(filepath, destination)

        if self.creating:
            do_it()
        else:
            # first check if the file is already present
            if not os.path.exists(destination):
                do_it()
            else:
                # file exists. Let's check if what the instance has is the
                # vainilla old version or not
                old_version_filepath = os.path.join(
                    self.old_version_path, 'iepy', 'instantiation', filename)
                if os.path.exists(old_version_filepath) and filecmp.cmp(old_version_filepath, destination):
                    # vainilla old version. Let's simply upgrade it
                    do_it()
                else:
                    # customized file.
                    # Check if it's exactly what the new version provides
                    if filecmp.cmp(filepath, destination):
                        # you have local changes that are 100% new version. Sounds like
                        # you made your instance with a nightly built.. dunno, but we'll
                        # do nothing, since is not needed.
                        return
                    # We'll back it up, and later upgrade
                    self.preserve_old_file_version_as_copy(destination)
                    do_it()
Beispiel #16
0
 def test_saveResponse(self):
     """
     Fetches and stores response information as SEED RESP file.
     """
     client = Client()
     start = UTCDateTime("2005-001T00:00:00")
     end = UTCDateTime("2008-001T00:00:00")
     # RESP, single channel
     origfile = os.path.join(self.path, "data", "RESP.ANMO.IU.00.BHZ")
     tempfile = NamedTemporaryFile().name
     client.saveResponse(tempfile, "IU", "ANMO", "00", "BHZ", start, end)
     self.assertTrue(filecmp.cmp(origfile, tempfile))
     os.remove(tempfile)
     # RESP, multiple channels
     origfile = os.path.join(self.path, "data", "RESP.ANMO.IU._.BH_")
     tempfile = NamedTemporaryFile().name
     client.saveResponse(tempfile, "IU", "ANMO", "*", "BH?", start, end)
     self.assertTrue(filecmp.cmp(origfile, tempfile))
     os.remove(tempfile)
     # StationXML, single channel
     tempfile = NamedTemporaryFile().name
     client.saveResponse(tempfile, "IU", "ANMO", "00", "BHZ", start, end, format="StationXML")
     data = open(tempfile).read()
     self.assertTrue('<Station net_code="IU" sta_code="ANMO">' in data)
     os.remove(tempfile)
     # SACPZ, single channel
     tempfile = NamedTemporaryFile().name
     client.saveResponse(tempfile, "IU", "ANMO", "00", "BHZ", start, end, format="SACPZ")
     data = open(tempfile).read()
     self.assertTrue("NETWORK   (KNETWK): IU" in data)
     self.assertTrue("STATION    (KSTNM): ANMO" in data)
     os.remove(tempfile)
Beispiel #17
0
    def test_folder_decompression(self):
        zip_file = "tests/file_manager_tests/data/dir.zip"
        out_dir = "tests/file_manager_tests/data/dirout"

        if os.path.exists(zip_file):
            os.remove(zip_file)
        if os.path.exists(out_dir):
            shutil.rmtree(out_dir)

        self.assertFalse(os.path.exists(out_dir) and os.path.exists(zip_file), "File(s) exists prior to test")

        FileUtils.compress_file("tests/file_manager_tests/data/dir", zip_file)
        FileUtils.decompress_file(zip_file, out_dir)

        self.assertTrue(os.path.exists(out_dir) and os.path.isdir(out_dir), "Dir does not exist after test")
        self.assertTrue(os.path.exists(out_dir + "/tests/file_manager_tests/data/dir/b.txt"),
                        "File b does not exist after test")
        self.assertTrue(os.path.exists(out_dir + "/tests/file_manager_tests/data/dir/c.txt"),
                        "File c does not exist after test")

        self.assertTrue(filecmp.cmp("tests/file_manager_tests/data/dir/b.txt",
                                    out_dir + "/tests/file_manager_tests/data/dir/b.txt"),
                        "File b is not the same")
        self.assertTrue(filecmp.cmp("tests/file_manager_tests/data/dir/c.txt",
                                    out_dir + "/tests/file_manager_tests/data/dir/c.txt"),
                        "File c is not the same")

        os.remove(zip_file)
        shutil.rmtree(out_dir)

        return
Beispiel #18
0
    def test_gbif_to_file(self, mock_urlretrieve=None, mock_urlopen=None):
        mock_urlretrieve.side_effect = self._urlretrieve
        mock_urlopen.side_effect = self._urlopen
        # mock urllib.urlretrieve ....
        #        return zip file with data.csv and citation.csv
        # mock urllib.urlretriev ...
        #        return gbif_metadata.json

        file_dest = {
            'url': 'file://{}'.format(self.tmpdir)
        }
        move(self.gbif_source, file_dest)

        # Check files are created
        self.assertTrue(os.path.exists(
            os.path.join(self.tmpdir, 'gbif_dataset.json')))
        self.assertTrue(os.path.exists(
            os.path.join(self.tmpdir, 'gbif_occurrence.zip')))
        self.assertTrue(os.path.exists(
            os.path.join(self.tmpdir, 'gbif_metadata.json')))

        # Check file contents
        zf = zipfile.ZipFile(os.path.join(self.tmpdir, 'gbif_occurrence.zip'))
        zf.extractall(self.tmpdir)
        self.assertTrue(filecmp.cmp(os.path.join(self.tmpdir, 'gbif_metadata.json'),
                                    pkg_resources.resource_filename(__name__, 'data/gbif_metadata.json')))
        self.assertTrue(filecmp.cmp(os.path.join(self.tmpdir, 'data', 'gbif_occurrence.csv'),
                                    pkg_resources.resource_filename(__name__, 'data/gbif_occurrence.csv')))
        self.assertTrue(filecmp.cmp(os.path.join(self.tmpdir, 'data', 'gbif_citation.txt'),
                                    pkg_resources.resource_filename(__name__, 'data/gbif_citation.txt')))
    def test_file_overwrite(self):
        """
        Make sure FileWrapper overwrites an existing file
        """
        # read the content to be written from the expected result
        expected_file = os.path.join("data", "filewrap.txt")
        with open(expected_file, "r") as fh_src:
            org_data = fh_src.read()

        # make the target file
        dummy_file = os.path.join("data", "fileappend.txt")
        temp_file = os.path.join("data", "deleteme.txt")
        shutil.copyfile(dummy_file, temp_file)

        # make sure the current content is different from the expected result
        self.assertFalse(filecmp.cmp(expected_file, temp_file))

        # FileWrapper overwrites the target file
        w = target.FileWrapper(temp_file)
        try:
            with w.open() as fh:
                fh.write(org_data)
            self.assertTrue(filecmp.cmp(expected_file, temp_file))
        finally:
            os.remove(temp_file)
    def test_processPrimers(self):
        """Test function _processPrimers()."""
        inPFN = op.join(self.dataDir, "test_primers_in.fasta")
        obj = Classifier()

        # Test on an artificial example.
        outPFN = op.join(self.outDir, "test_primers_out.fasta")
        stdoutPFN = op.join(self.stdDir, "test_primers_out.fasta")
        obj._processPrimers(primer_fn=inPFN, window_size=50,
                            primer_out_fn=outPFN,
                            revcmp_primers=False)

        self.assertTrue(filecmp.cmp(outPFN, stdoutPFN))

        # Test on real PacBio primers.fasta
        pbPFN = op.join(self.dataDir, "primers.fasta")

        # outPFN2 = primers.fasta for primer detection.
        outPFN2 = op.join(self.outDir, "test_primers_out_2.fasta")
        stdoutPFN2 = op.join(self.stdDir, "test_primers_out_2.fasta")
        obj._processPrimers(primer_fn=pbPFN, window_size=50,
                            primer_out_fn=outPFN2,
                            revcmp_primers=False)
        self.assertTrue(filecmp.cmp(outPFN2, stdoutPFN2))

        # outPFN3 = primers.fasta for chimera detection.
        outPFN2 = op.join(self.outDir, "test_primers_out_3.fasta")
        stdoutPFN2 = op.join(self.stdDir, "test_primers_out_3.fasta")
        obj._processPrimers(primer_fn=pbPFN, window_size=50,
                            primer_out_fn=outPFN2,
                            revcmp_primers=True)
        self.assertTrue(filecmp.cmp(outPFN2, stdoutPFN2))
 def test_getBestFrontBackRecord(self):
     """Test function _parseBestFrontBackRecord()."""
     obj = Classifier()
     domFN = op.join(self.dataDir, "test_parseHmmDom.dom")
     front, back = obj._getBestFrontBackRecord(domFN)
     # In the following, verify the front and back are equivalent
     # to stdout/test_parseHmmDom_dFront/Back.txt
     def prettystr(d):
         """Return Pretty print string for front & back."""
         return "\n".join(
             [key + ":\n" + "\n".join(
                 [k + ":" + str(v) for k, v in val.iteritems()])
              for key, val in d.iteritems()])
     frontFN = op.join(self.outDir, "test_parseHmmDom_dFront.txt")
     backFN = op.join(self.outDir, "test_parseHmmDom_dBack.txt")
     f = open(frontFN, 'w')
     f.write(prettystr(front))
     f.close()
     f = open(backFN, 'w')
     f.write(prettystr(back))
     f.close()
     stdoutFrontFN = op.join(self.stdDir,
                             "test_parseHmmDom_dFront.txt")
     stdoutBackFN = op.join(self.stdDir,
                            "test_parseHmmDom_dBack.txt")
     self.assertTrue(filecmp.cmp(frontFN, stdoutFrontFN))
     self.assertTrue(filecmp.cmp(backFN, stdoutBackFN))
Beispiel #22
0
    def test01(self):
        """
        Test basic usage
        """

        datadir = path.join(self.datadir)

        this_test = sys._getframe().f_code.co_name
        sseqids = path.join(datadir, 'sseqid_basic')

        outdir = self.mkoutdir()

        fasta_out = path.join(outdir, 'out1.fasta')
        info_out = path.join(outdir, 'out1.csv')

        fasta_ref = path.join(datadir, 'test01', 'ref.fasta')
        info_ref = path.join(datadir, 'test01', 'ref.csv')

        args = ['--outfasta', fasta_out,
                '--seqinfo', info_out,
                '--email', '*****@*****.**',
                sseqids]

        log.info(self.log_info.format(' '.join(map(str, args))))

        self.main(args)

        self.assertTrue(filecmp.cmp(fasta_ref, fasta_out))
        self.assertTrue(filecmp.cmp(info_ref, info_out))
Beispiel #23
0
 def test_zip(self):
     file_uri = 'file://'+resources_path+'heb.nt.zip'
     ae = ArchiveExtractor(file_uri)
     self.assertTrue(filecmp.cmp(ae.get_extracted_file_path_list()[0],
             original_head), "file 1 differs")
     self.assertTrue(filecmp.cmp(ae.get_extracted_file_path_list()[1],
             original_tail), "file 2 differs")
Beispiel #24
0
    def test_split_by_base_count(self):
        '''Check that fasta/q files get split by base count correctly'''
        infile = os.path.join(data_dir, 'sequences_test_split_test.fa')
        outprefix = 'tmp.sequences_test_split_test.fa.test'
        length2files = {2: ['1','2','3','4'],
                        3: ['1','2','3'],
                        4: ['1', '2', '3'],
                        6: ['1', '2']}
        for l in length2files:
            tasks.split_by_base_count(infile, outprefix, l)
            for x in range(len(length2files[l])):
                file_index = str(length2files[l][x])
                fname = outprefix + '.' + file_index
                self.assertTrue(filecmp.cmp(fname, infile + '.' + str(l) + '.' + file_index))
                os.unlink(fname)

        # check that limiting the number of files works
        tasks.split_by_base_count(infile, outprefix, 6, 2)
        for i in range(1,4):
            test_file = outprefix + '.' + str(i)
            self.assertTrue(filecmp.cmp(test_file, os.path.join(data_dir, 'sequences_test_split_test.fa.6.limit2.') + str(i)))
            os.unlink(test_file)

        # check big sequence not broken
        tasks.split_by_base_count(os.path.join(data_dir, 'sequences_test_split_test.long.fa'), outprefix, 2)
        self.assertTrue(filecmp.cmp(outprefix + '.1', os.path.join(data_dir, 'sequences_test_split_test.long.fa.2.1')))
        self.assertTrue(filecmp.cmp(outprefix + '.2', os.path.join(data_dir, 'sequences_test_split_test.long.fa.2.2')))
        os.unlink(outprefix + '.1')
        os.unlink(outprefix + '.2')
Beispiel #25
0
    def test_to_fasta(self):
        '''Test to_fasta'''
        tmpfile = 'tmp.to_fasta'
        infiles = [
            'sequences_test_good_file.fq',
            'sequences_test_gffv3.gff',
            'sequences_test_gffv3.no_FASTA_line.gff',
            'sequences_test.embl',
            'sequences_test.gbk',
            'sequences_test_phylip.interleaved',
            'sequences_test_phylip.interleaved2',
            'sequences_test_phylip.sequential'
        ]
        infiles = [os.path.join(data_dir, x) for x in infiles]
        expected_outfiles = [x + '.to_fasta' for x in infiles]

        for i in range(len(infiles)):
            tasks.to_fasta(infiles[i], tmpfile)
            self.assertTrue(filecmp.cmp(expected_outfiles[i], tmpfile))

        tasks.to_fasta(os.path.join(data_dir, 'sequences_test.fa'), tmpfile, line_length=3)
        self.assertTrue(filecmp.cmp(os.path.join(data_dir, 'sequences_test.line_length3.fa'), tmpfile))
        tasks.to_fasta(os.path.join(data_dir, 'sequences_test_strip_after_whitespace.fa'), tmpfile, strip_after_first_whitespace=True)
        self.assertTrue(filecmp.cmp(os.path.join(data_dir, 'sequences_test_strip_after_whitespace.fa.to_fasta'), tmpfile))
        os.unlink(tmpfile)
Beispiel #26
0
    def test_bam_extract_01(self):
        TEST_DIR, T_TEST_DIR = self.__get_temp_dirs()

        input_file = TEST_DIR + "test_terg_02.bam"
        output_file = T_TEST_DIR + "test_terg_02.filtered.bam"
        output_file_s = T_TEST_DIR + "test_terg_02.filtered.sam"
        test_file = TEST_DIR + "test_terg_02.filtered.sam"

        # c = BAMExtract(input_file)
        # c.extract("chr21:39000000-40000000", "chr5:1-2", output_file)
        command = ["bin/dr-disco",
                   "bam-extract",
                   "chr21:39000000-40000000",
                   "chr5:1-2",
                   output_file,
                   input_file]

        self.assertEqual(subprocess.call(command), 0)

        # Bam2Sam
        fhq = open(output_file_s, "w")
        fhq.write(pysam.view(output_file))
        fhq.close()

        if not filecmp.cmp(output_file_s, test_file):
            print 'diff \'' + output_file_s + '\' \'' + test_file + '\''

        self.assertTrue(filecmp.cmp(output_file_s, test_file))
 def test_real_grammar_and_keyword_file(self):
     self._copy_file_without_generated_keywords(KEYWORD_FILE, TEST_PY_FILE)
     self.addCleanup(support.unlink, TEST_PY_FILE)
     self.assertFalse(filecmp.cmp(KEYWORD_FILE, TEST_PY_FILE))
     self.assertEqual((0, b''), self._generate_keywords(GRAMMAR_FILE,
                                                        TEST_PY_FILE))
     self.assertTrue(filecmp.cmp(KEYWORD_FILE, TEST_PY_FILE))
 def test_writeSeqs(self):
     fdb = FastaDB.FastaDB()
     fdb.read_seqs('three.fasta')
     with tempfile.NamedTemporaryFile() as temp:
         fdb.write_seqs(temp.name)
         # self.assertTrue(hashlib.sha256(open(temp.name, 'rb').read()).digest() == hashlib.sha256(open("three.fasta", 'rb').read()).digest())
         filecmp.cmp(temp.name, 'three.fasta')
Beispiel #29
0
    def test_backup(self):
        self._init_test_data_file()
        title1 = 'title 1'
        title2 = 'title 2'
        title3 = 'title 3'
        title4 = 'title 4'
        tag1 = 'tag 1'
        tag2 = 'tag 2'
        tag3 = 'tag 3'
        tag4 = 'tag 4'
        tags1 = [tag1, tag2]
        tags2 = [tag3, tag4]
        item1 = Item(title1, {Elem.TAGS : tags1})
        item2 = Item(title2, {Elem.TAGS : tags2})
        item3 = Item(title3, {Elem.TAGS : tags1})
        item4 = Item(title4, {Elem.TAGS : tags2})
        orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
        orgm.add_item(item1)
        orgm.add_item(item2)
        orgm.add_item(item3)
        orgm.add_item(item4)        

        bak_data_file = 'orgm_test.dat_bak'
        orgm.backup(bak_data_file)
        import filecmp
        filecmp.cmp(TEST_DATA_FILE, bak_data_file)
def compare_dl_files(fileExtension):

	# Need to handle gbk in a unique manner as the date will always modify the file slightly so a diff will return false
	if fileExtension == 'gbk':
		newestFile = min(glob.iglob('/Users/jmatsumura/Downloads/VAC1_test2.annotation.*.'+fileExtension), key=os.path.getctime)
		my_cmd = ['diff', '/Users/jmatsumura/mana_dumps/VAC1_test2.annotation.20160329.gbk'] + [newestFile]
		with open('/Users/jmatsumura/mana_dumps/gbk_diff.txt', "w") as outfile:
			subprocess.call(my_cmd, stdout=outfile)
		result = "OK" if os.stat("/Users/jmatsumura/mana_dumps/gbk_diff.txt").st_size < 300 else "FAILED"

	# Similar to the previous, handle by file size differences. 
	elif fileExtension == 'GO_annotation.txt':
		newestFile = min(glob.iglob('/Users/jmatsumura/Downloads/VAC1_test2_'+fileExtension), key=os.path.getctime)
		my_cmd = ['diff', '/Users/jmatsumura/mana_dumps/VAC1_test2_GO_annotation.txt'] + [newestFile]
		with open('/Users/jmatsumura/mana_dumps/GO_diff.txt', "w") as outfile:
			subprocess.call(my_cmd, stdout=outfile)
		f_size = os.stat("/Users/jmatsumura/mana_dumps/GO_diff.txt").st_size
		result = "OK" if ((f_size > 2200000) and (f_size < 2900000)) else "FAILED"

	elif fileExtension == 'tbl' or fileExtension == 'gff3':
		newestFile = min(glob.iglob('/Users/jmatsumura/Downloads/VAC1_test2.annotation.*.'+fileExtension), key=os.path.getctime)
		result = "OK" if filecmp.cmp('/Users/jmatsumura/mana_dumps/VAC1_test2.annotation.20160329.'+fileExtension, newestFile) else "FAILED"

	elif fileExtension == 'sigp':
		newestFile = min(glob.iglob('/Users/jmatsumura/Downloads/sigp4.1_VAC.transcript.9803630972.1_pred.txt'), key=os.path.getctime)
		result = "OK" if filecmp.cmp('/Users/jmatsumura/mana_dumps/sigpOut.txt', newestFile) else "FAILED"

	else:
		newestFile = min(glob.iglob('/Users/jmatsumura/Downloads/VAC1_test2_'+fileExtension), key=os.path.getctime)
		result = "OK" if filecmp.cmp('/Users/jmatsumura/mana_dumps/VAC1_test2_'+fileExtension, newestFile) else "FAILED"

	return result 
Beispiel #31
0
def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points):
    # This is terribly complex, because we check the list of used DLLs
    # trying to avoid duplicates, and detecting errors with them not
    # being binary identical, so we can report them. And then of course
    # we also need to handle OS specifics.
    # pylint: disable=too-many-branches,too-many-locals

    used_dlls = detectUsedDLLs(source_dir, standalone_entry_points)

    # Fist make checks and remove some.
    for dll_filename1, sources1 in tuple(iterItems(used_dlls)):
        for dll_filename2, sources2 in tuple(iterItems(used_dlls)):
            if dll_filename1 == dll_filename2:
                continue

            # Colliding basenames are an issue to us.
            if os.path.basename(dll_filename1) != \
               os.path.basename(dll_filename2):
                continue

            # May already have been removed earlier
            if dll_filename1 not in used_dlls:
                continue

            if dll_filename2 not in used_dlls:
                continue

            dll_name = os.path.basename(dll_filename1)

            if Options.isShowInclusion():
                info("""Colliding DLL names for %s, checking identity of \
'%s' <-> '%s'.""" % (
                    dll_name,
                    dll_filename1,
                    dll_filename2,
                ))

            # Check that if a DLL has the same name, if it's identical,
            # happens at least for OSC and Fedora 20.
            import filecmp
            if filecmp.cmp(dll_filename1, dll_filename2):
                del used_dlls[dll_filename2]
                continue

            # So we have conflicting DLLs, in which case we do not proceed.
            sys.exit("""Error, conflicting DLLs for '%s'.
%s used by:
   %s
different from
%s used by
   %s""" % (dll_name, dll_filename1, "\n   ".join(sources1), dll_filename2,
            "\n   ".join(sources2)))

    dll_map = []

    for dll_filename, sources in iterItems(used_dlls):
        dll_name = os.path.basename(dll_filename)

        target_path = os.path.join(dist_dir, dll_name)

        shutil.copy(dll_filename, target_path)

        dll_map.append((dll_filename, dll_name))

        if Options.isShowInclusion():
            info("Included used shared library '%s' (used by %s)." %
                 (dll_filename, ", ".join(sources)))

    if Utils.getOS() == "Darwin":
        # For MacOS, the binary and the DLLs needs to be changed to reflect
        # the relative DLL location in the ".dist" folder.
        for standalone_entry_point in standalone_entry_points:
            fixupBinaryDLLPaths(
                binary_filename=standalone_entry_point[1],
                is_exe=standalone_entry_point is standalone_entry_points[0],
                dll_map=dll_map)

        for _original_path, dll_filename in dll_map:
            fixupBinaryDLLPaths(binary_filename=os.path.join(
                dist_dir, dll_filename),
                                is_exe=False,
                                dll_map=dll_map)

    if Utils.getOS() == "Linux":
        # For Linux, the "rpath" of libraries may be an issue and must be
        # removed.
        for standalone_entry_point in standalone_entry_points[1:]:
            removeSharedLibraryRPATH(standalone_entry_point[1])

        for _original_path, dll_filename in dll_map:
            removeSharedLibraryRPATH(os.path.join(dist_dir, dll_filename))
Beispiel #32
0
 def _test_checkout(self):
     self.assertTrue(os.path.isfile(self.FOO))
     self.assertTrue(filecmp.cmp(self.FOO, self.orig, shallow=False))
Beispiel #33
0
def main():
    parser = MyParser(description='This application create components files from cdsl files or .ice from idsl\n'
                                  '\ta) to generate code from a CDSL file:     ' + sys.argv[0].split('/')[-1]
                                  + '   INPUT_FILE.CDSL   OUTPUT_PATH\n'
                                  +'\tb) to generate a new CDSL file:           ' + sys.argv[0].split('/')[-1]
                                  + '   NEW_COMPONENT_DESCRIPTOR.CDSL',
                      formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument("-I", "--include_dirs", nargs='*', help="Include directories",
                        action=FullPaths, default=[])
    parser.add_argument("-d", '--diff', dest='diff', choices=DIFF_TOOLS, action='store')
    parser.add_argument("input_file", help="The input dsl file")
    parser.add_argument("output_path", nargs='?', help="The path to put the files")
    args = parser.parse_args()

    if args.output_path is None:
        if args.input_file.endswith(".cdsl"):
            generateDummyCDSL(args.input_file)
            generateDummySMDSL("statemachine.smdsl")
            sys.exit(0)
        else:
            print(args.output_path, args.input_file)
            print(parser.error("No output path with non .cdsl file"))
            sys.exit(-1)

    inputFile = args.input_file
    outputPath = args.output_path

    sys.path.append('/opt/robocomp/python')

    new_existing_files = {}

    if inputFile.endswith(".cdsl"):

        component = DSLFactory().from_file(inputFile, includeDirectories=args.include_dirs)
        imports = ''.join( [ imp+'#' for imp in component['imports'] ] )

        # verification
        pool = IDSLPool(imports, args.include_dirs)
        interface_list = component['requires'] + component['implements'] + component['subscribesTo'] + component['publishes']

        for interface_required in interface_list:
            interface_required = interface_required if isinstance(interface_required, str) else interface_required[0]
            if not pool.moduleProviding(interface_required):
                raise rcExceptions.InterfaceNotFound(interface_required, pool.interfaces())

        if component['language'].lower() == 'cpp' or component['language'].lower() == 'cpp11':
            #
            # Check output directory
            #
            if not os.path.exists(outputPath):
                create_directory(outputPath)
            # Create directories within the output directory
            try:
                create_directory(outputPath + "/bin")
                create_directory(outputPath + "/etc")
                create_directory(outputPath + "/src")
            except:
                print('There was a problem creating a directory')
                sys.exit(1)
                pass
            #
            # Generate regular files
            #
            files = [ 'CMakeLists.txt', 'DoxyFile', 'README-STORM.txt', 'README.md', 'etc/config', 'src/main.cpp', 'src/CMakeLists.txt', 'src/CMakeListsSpecific.txt', 'src/commonbehaviorI.h', 'src/commonbehaviorI.cpp', 'src/genericmonitor.h', 'src/genericmonitor.cpp', 'src/config.h', 'src/specificmonitor.h', 'src/specificmonitor.cpp', 'src/genericworker.h', 'src/genericworker.cpp', 'src/specificworker.h', 'src/specificworker.cpp', 'src/mainUI.ui' ]
            specificFiles = [ 'src/specificworker.h', 'src/specificworker.cpp', 'src/CMakeListsSpecific.txt', 'src/mainUI.ui', 'src/specificmonitor.h', 'src/specificmonitor.cpp', 'README.md', 'etc/config' ]
            for f in files:
                ofile = outputPath + '/' + f
                if f in specificFiles and os.path.exists(ofile):
                    print('Not overwriting specific file "'+ ofile +'", saving it to '+ofile+'.new')
                    new_existing_files[os.path.abspath(ofile)] = os.path.abspath(ofile)+'.new'
                    ofile += '.new'
                ifile = "/opt/robocomp/share/robocompdsl/templateCPP/" + f
                if f != 'src/mainUI.ui' or component['gui'] is not None:
                    print('Generating', ofile)
                    run = "cog.py -z -d -D theCDSL="+inputFile + " -D theIDSLs="+imports + ' -D theIDSLPaths='+ '#'.join(args.include_dirs) + " -o " + ofile + " " + ifile
                    run = run.split(' ')
                    ret = Cog().main(run)
                    if ret != 0:
                        print('ERROR')
                        sys.exit(-1)
                    replaceTagsInFile(ofile)
            #
            # Generate interface-dependent files
            #
            for ima in component['implements']:
                im = ima
                if type(im) != type(''):
                    im = im[0]
                if communication_is_ice(ima):
                    for f in [ "SERVANT.H", "SERVANT.CPP"]:
                        ofile = outputPath + '/src/' + im.lower() + 'I.' + f.split('.')[-1].lower()
                        print('Generating ', ofile, ' (servant for', im + ')')
                        # Call cog
                        run = "cog.py -z -d -D theCDSL="+inputFile  + " -D theIDSLs="+imports + ' -D theIDSLPaths='+ '#'.join(args.include_dirs) + " -D theInterface="+im + " -o " + ofile + " " + "/opt/robocomp/share/robocompdsl/templateCPP/" + f
                        run = run.split(' ')
                        ret = Cog().main(run)
                        if ret != 0:
                            print('ERROR')
                            sys.exit(-1)
                        replaceTagsInFile(ofile)

            for imp in component['subscribesTo']:
                im = imp
                if type(im) != type(''):
                    im = im[0]
                if communication_is_ice(imp):
                    for f in [ "SERVANT.H", "SERVANT.CPP"]:
                        ofile = outputPath + '/src/' + im.lower() + 'I.' + f.split('.')[-1].lower()
                        print('Generating ', ofile, ' (servant for', im + ')')
                        # Call cog
                        theInterfaceStr = im
                        if type(theInterfaceStr) == type([]):
                            theInterfaceStr = str(';'.join(im))
                        run = "cog.py -z -d -D theCDSL="+inputFile  + " -D theIDSLs="+imports  + ' -D theIDSLPaths='+ '#'.join(args.include_dirs) + " -D theInterface="+theInterfaceStr + " -o " + ofile + " " + "/opt/robocomp/share/robocompdsl/templateCPP/" + f
                        #print(run
                        run = run.split(' ')
                        ret = Cog().main(run)
                        if ret != 0:
                            print('ERROR')
                            sys.exit(-1)
                        replaceTagsInFile(ofile)
        elif component['language'].lower() == 'python':
            #
            # Check output directory
            #
            if not os.path.exists(outputPath):
                create_directory(outputPath)
            # Create directories within the output directory
            try:
                create_directory(outputPath + "/etc")
                create_directory(outputPath + "/src")
            except:
                print('There was a problem creating a directory')
                sys.exit(1)
                pass

            needStorm = False
            for pub in component['publishes']:
                if communication_is_ice(pub):
                    needStorm = True
            for sub in component['subscribesTo']:
                if communication_is_ice(sub):
                    needStorm = True
            #
            # Generate regular files
            #
            files = [ 'CMakeLists.txt', 'DoxyFile', 'README-STORM.txt', 'README.md', 'etc/config', 'src/main.py', 'src/genericworker.py', 'src/specificworker.py', 'src/mainUI.ui' ]
            specificFiles = [ 'src/specificworker.py', 'src/mainUI.ui', 'README.md', 'etc/config' ]
            for f in files:
                if f == 'src/main.py':
                    ofile = outputPath + '/src/' + component['name'] + '.py'
                else:
                    ofile = outputPath + '/' + f
                if f in specificFiles and os.path.exists(ofile):
                    print('Not overwriting specific file "'+ ofile +'", saving it to '+ofile+'.new')
                    new_existing_files[os.path.abspath(ofile)] = os.path.abspath(ofile) + '.new'
                    ofile += '.new'
                ifile = "/opt/robocomp/share/robocompdsl/templatePython/" + f
                ignoreFile = False
                if f == 'src/mainUI.ui' and component['gui'] is None: ignoreFile = True
                if f == 'CMakeLists.txt' and component['gui'] is None: ignoreFile = True
                if f == 'README-STORM.txt' and needStorm == False: ignoreFile = True
                if not ignoreFile:
                    print('Generating', ofile)
                    run = "cog.py -z -d -D theCDSL="+inputFile + " -D theIDSLs="+imports + ' -D theIDSLPaths='+ '#'.join(args.include_dirs) + " -o " + ofile + " " + ifile
                    run = run.split(' ')
                    ret = Cog().main(run)
                    if ret != 0:
                        print('ERROR')
                        sys.exit(-1)
                    replaceTagsInFile(ofile)
                    if f == 'src/main.py': os.chmod(ofile, os.stat(ofile).st_mode | 0o111)
            #
            # Generate interface-dependent files
            #
            for imp in component['implements']+component['subscribesTo']:
                if type(imp) != type(''):
                    im = imp[0]
                else:
                    im = imp
                if communication_is_ice(imp):
                    for f in [ "SERVANT.PY"]:
                        ofile = outputPath + '/src/' + im.lower() + 'I.' + f.split('.')[-1].lower()
                        print('Generating', ofile, ' (servant for', im + ')')
                        # Call cog
                        run = "cog.py -z -d -D theCDSL="+inputFile  + " -D theIDSLs="+imports  + ' -D theIDSLPaths='+ '#'.join(args.include_dirs) + " -D theInterface="+im + " -o " + ofile + " " + "/opt/robocomp/share/robocompdsl/templatePython/" + f
                        run = run.split(' ')
                        ret = Cog().main(run)
                        if ret != 0:
                            print('ERROR')
                            sys.exit(-1)
                        replaceTagsInFile(ofile)
        else:
            print('Unsupported language', component['language'])


        if component['usingROS'] == True:
            for imp in component['imports']:
                generate_ROS_headers(imp, outputPath + "/src", component, args.include_dirs)

        # Code to launch diff tool on .new files to be compared with their old version
        if args.diff is not None:
            diff_tool,_ = get_diff_tool(prefered=args.diff)
            print("Executing diff tool for existing files. Close if no change is needed.")
            for o_file, n_file in new_existing_files.items():
                if not filecmp.cmp(o_file,n_file):
                    print([diff_tool, o_file, n_file])
                    try:
                        subprocess.call([diff_tool, o_file, n_file])
                    except KeyboardInterrupt as e:
                        print("Comparasion interrupted. All files have been generated. Check this .new files manually:")
                        for o_file2, n_file2 in new_existing_files.items():
                            if not filecmp.cmp(o_file2, n_file2):
                                print("%s %s"%(o_file2,n_file2))
                        break
                    except Exception as e:
                        print("Exception trying to execute %s"%(diff_tool))
                        print(e.message)

                else:
                    print("Binary equal files %s and %s"%(o_file, n_file))

    elif inputFile.endswith(".idsl"):
        # idsl = IDSLParsing.fromFileIDSL(inputFile)
        print('Generating ICE file ', outputPath)
        # Call cog
        run = "cog.py -z -d" + " -D theIDSL="+inputFile + ' -D theIDSLPaths='+ '#'.join(args.include_dirs) +" -o " + outputPath + " /opt/robocomp/share/robocompdsl/TEMPLATE.ICE"
        run = run.split(' ')
        ret = Cog().main(run)
        if ret != 0:
            print('ERROR')
            sys.exit(-1)
        replaceTagsInFile(outputPath)
    def test_common_convert_input_folder(self, copy_test_data_to_data_folder):
        # Execute the target tool on the input file with the arguments.
        command = [
            str(self._executable)
        ]

        # Insert options.
        for key, value in self._data.options.items():
            command.append(key)
            
            # Handle file arguments by discarding the empty value.
            if value != "":
                command.append(value)

        # Handle files to auto include.
        for f in self._data.input:
            if f.auto_include:
                target_path = self._data_folder / f.relative_path / f.name
                command.append(str(target_path))

        # Ensure the data path exists.
        if not self._data_folder.exists():
            self._data_folder.mkdir(parents=True)

        if self._data.name is not None:
            print(f"Starting test with comment: {self._data.name}")

        print(f"Executing command: {command}")

        # Perform test.
        call_result = subprocess.run(command, cwd=self._data_folder)
        
        # Ensure the return code matches the expected code.
        return_code = call_result.returncode

        assert return_code == self._data.return_code, f"Unexpected returncode. Expected {self._data.return_code} but got {return_code}."
        
        # Ensure the expected files are present.
        expected_output_files = []
        for output_file in self._data.output:
            target_path = self._data_folder / output_file.relative_path / output_file.name
            expected_output_files.append(target_path)
        
        missing_files = []
        for expected_output_file in expected_output_files:
            if not expected_output_file.exists():
                missing_files.append(expected_output_file)

        assert len(missing_files) == 0, f"Missing expected files: {missing_files}"
        
        # Ensure the file contents match.
        comparison_errors = []
        for output_file in self._data.output:
            # Find the data file to compare against.
            if output_file.comparision_data is None:
                output_file.comparision_data = output_file.name
            comparison_file = get_tool_data(tool=self._data.tool, name=output_file.comparision_data)
            data_file = self._data_folder / output_file.relative_path / output_file.name

            # Determine comparison to perform based on file extension.
            extension = data_file.suffix.lower()
            if extension in [".log", ".trc", ".asc", ".txt", ".csv"]:
                comparison_result = compare_ignore_line_ending(comparison_file, data_file)
            else:
                comparison_result = filecmp.cmp(comparison_file, data_file)
                
            if not comparison_result:
                comparison_errors.append(output_file.name)

        assert len(comparison_errors) == 0, f"Mismatch in output data for the following file(s): {comparison_errors}"
        
        return
import os, urllib, json, filecmp, shutil

## set params
airtable_url = "<insert the airtable api url for your table>"
airtable_key = "<insert your airtable api key>"
url = "{}?api_key={}".format(airtable_url, airtable_key)
dest_file = os.path.join("example", "table-data.json")
temp_file = dest_file.replace(".json", "-tmp.json")

## retrieve data from airtable and write to file
response = urllib.urlopen(url)
data = json.loads(response.read())
with open(temp_file, "wb") as outfile:
    json.dump(data, outfile, indent=1)

## compare new file to existing file and replace if necessary
if not filecmp.cmp(temp_file, dest_file):
    shutil.copy(temp_file, dest_file)
os.remove(temp_file)
Beispiel #36
0
f.close()  # you can omit in most cases as the destructor will call if
f = open('redirect_target.html', 'w')
f.close()

with open("./.htaccess", "a") as myfile:
    myfile.write("RewriteRule ^redirect_target.html$ $2fetch_target.html [L]")

chmod('./.htaccess', 0644)
chmod('fetch_target.html', 0644)
chmod('redirect_target.html', 0644)

system(
    "wget -O- https://www.cs.ubc.ca/~mglgms/mta/redirect_target.html >out.txt 2> /dev/null"
)
import filecmp
if filecmp.cmp('out.txt', 'fetch_target.html'):
    print 'htaccess works. Hurray!'
else:
    remove('.htaccess')
remove('out.txt')

user = raw_input("Administrator User: "******"admin"
print "Administrator '" + user + "' created"
while True:
    password = getpass.getpass("Administrator Password: "******"Re-type Administrator Password: ")
    if password == password_match:
        break
    print 'Password was re-typed incorrectly'

entries = []
Beispiel #37
0
def compare(file1, file2):
    if (filecmp.cmp(file1, file2) == False):
        print("Failed test in {}".format(os.path.dirname(file1)))
    else:
        os.remove(file2)
Beispiel #38
0
def run_ota(source, target, payload_path, tempdir, output_dir):
    """Run an OTA on host side"""
    payload = update_payload.Payload(payload_path)
    payload.Init()
    if source and zipfile.is_zipfile(source):
        source = zipfile.ZipFile(source)
    if target and zipfile.is_zipfile(target):
        target = zipfile.ZipFile(target)
    source_exist = source and (isinstance(source, zipfile.ZipFile)
                               or os.path.exists(source))
    target_exist = target and (isinstance(target, zipfile.ZipFile)
                               or os.path.exists(target))

    old_partitions = []
    new_partitions = []
    expected_new_partitions = []
    for part in payload.manifest.partitions:
        name = part.partition_name
        old_image = os.path.join(tempdir, "source_" + name + ".img")
        new_image = os.path.join(tempdir, "target_" + name + ".img")
        if part.HasField("old_partition_info"):
            assert source_exist, \
                "source target file must point to a valid zipfile or directory " + \
                source
            print("Extracting source image for", name)
            extract_img(source, name, old_image)
        if target_exist:
            print("Extracting target image for", name)
            extract_img(target, name, new_image)

        old_partitions.append(old_image)
        scratch_image_name = new_image + ".actual"
        new_partitions.append(scratch_image_name)
        with open(scratch_image_name, "wb") as fp:
            fp.truncate(part.new_partition_info.size)
        expected_new_partitions.append(new_image)

    delta_generator_args = ["delta_generator", "--in_file=" + payload_path]
    partition_names = [
        part.partition_name for part in payload.manifest.partitions
    ]
    if payload.manifest.partial_update:
        delta_generator_args.append("--is_partial_update")
    if payload.is_incremental:
        delta_generator_args.append("--old_partitions=" +
                                    ":".join(old_partitions))
    delta_generator_args.append("--partition_names=" +
                                ":".join(partition_names))
    delta_generator_args.append("--new_partitions=" + ":".join(new_partitions))

    print("Running ", " ".join(delta_generator_args))
    subprocess.check_output(delta_generator_args)

    valid = True
    if not target_exist:
        for part in new_partitions:
            print("Output written to", part)
            shutil.copy(part, output_dir)
        return
    for (expected_part, actual_part, part_name) in \
            zip(expected_new_partitions, new_partitions, partition_names):
        if filecmp.cmp(expected_part, actual_part):
            print("Partition `{}` is valid".format(part_name))
        else:
            valid = False
            print(
                "Partition `{}` is INVALID expected image: {} actual image: {}"
                .format(part_name, expected_part, actual_part))

    if not valid and sys.stdout.isatty():
        input(
            "Paused to investigate invalid partitions, press any key to exit.")
Beispiel #39
0
def recreate_cm(folder_path: str):
    # read inputs
    gather_designs = {}
    with open(os.path.join(folder_path, "FINAL_summary"), "r") as input_sum:
        input_sum.readline()
        for line in input_sum:
            if line.strip() == '':
                continue
            parts = line.strip().split('\t')
            gather_designs[parts[0]] = parts[5]
    gather_results = {}
    with open(os.path.join(folder_path, "FINAL_all"), 'r') as input_all:
        input_all.readline()
        for line in input_all:
            if line.strip() == '':
                continue
            parts = line.strip().split('\t')
            res_map = gather_results.get(parts[0], {})
            res_map[parts[1]] = parts[4]
            gather_results[parts[0]] = res_map
    # start calculations
    folder = vienna.LiveRNAfold()
    folder.start()
    for design_code, sequence in gather_designs.items():
        structure = folder.fold(sequence)['MFE']
        cm_path = os.path.join(folder_path, "{}.cm".format(design_code))
        sto_path = os.path.join(folder_path, "{}.sto".format(design_code))
        if os.path.exists(cm_path):
            continue
        temp_cm_path = "{}_tmp".format(cm_path)
        temp_sto_path = "{}_tmp".format(sto_path)
        if not infernal.generate_single_seq_cm(sequence, cm_path, structure):
            print("Could not generate single cm for {}".format(design_code))
            exit(-1)
        if not infernal.align_sequences({'{}'.format(design_code): sequence},
                                        cm_path, sto_path):
            print("Could not generate single sto for {}".format(design_code))
            exit(-1)
        design_results = gather_results.get(design_code)
        no_found = 0
        temp_fasta = infernal.generate_fasta(design_results)
        while no_found < len(design_results):
            results = infernal.search_cm(cm_path, temp_fasta.name, inc_e=10.0)
            sto_parts = {}
            sto_target = get_sto_targets(sto_path)
            for item in results:
                if item['target name'] not in sto_target:
                    sto_parts[item['target name']] = item['sequence']
            if len(sto_parts) == 0:
                print(
                    "ERROR: no new sequences found for {} maxed at {} sequences out of {} original\nListing: {}"
                    .format(design_code, len(sto_target), len(design_results),
                            [
                                res for res in design_results.keys()
                                if res not in get_sto_targets(sto_path)
                            ]))
                break
            if not infernal.align_sequences(
                    sto_parts, cm_path, temp_sto_path, in_align_path=sto_path):
                print("Could not generate sto for {}".format(design_code))
                exit(-1)
            if filecmp.cmp(sto_path, temp_sto_path, shallow=False):
                print("ERROR: {} missing codes: {}".format(
                    design_code, [
                        res for res in design_results.keys()
                        if res not in get_sto_targets(sto_path)
                    ]))
                shutil.move(temp_sto_path, sto_path)
                break
            shutil.move(temp_sto_path, sto_path)
            if not infernal.generate_cm(sto_path, temp_cm_path):
                print("Could not generate cm for {}".format(design_code))
                exit(-1)
            shutil.move(temp_cm_path, cm_path)
            no_found = len(results)
        os.remove(temp_fasta.name)
Beispiel #40
0
def cmp(f1, f2):
    return filecmp.cmp(f1, f2, shallow=True)
Beispiel #41
0
def test_imagefolder(remove_json_files=True):
    """
    Test simulating resnet50 dataset pipeline.
    """
    data_dir = "../data/dataset/testPK/data"
    ds.config.set_seed(1)

    # define data augmentation parameters
    rescale = 1.0 / 255.0
    shift = 0.0
    resize_height, resize_width = 224, 224
    weights = [
        1.0, 0.1, 0.02, 0.3, 0.4, 0.05, 1.2, 0.13, 0.14, 0.015, 0.16, 1.1
    ]

    # Constructing DE pipeline
    sampler = ds.WeightedRandomSampler(weights, 11)
    data1 = ds.ImageFolderDatasetV2(data_dir, sampler=sampler)
    data1 = data1.repeat(1)
    data1 = data1.map(input_columns=["image"],
                      operations=[vision.Decode(True)])
    rescale_op = vision.Rescale(rescale, shift)

    resize_op = vision.Resize((resize_height, resize_width), Inter.LINEAR)
    data1 = data1.map(input_columns=["image"],
                      operations=[rescale_op, resize_op])
    data1 = data1.batch(2)

    # Serialize the dataset pre-processing pipeline.
    # data1 should still work after saving.
    ds.serialize(data1, "imagenet_dataset_pipeline.json")
    ds1_dict = ds.serialize(data1)
    assert validate_jsonfile("imagenet_dataset_pipeline.json") is True

    # Print the serialized pipeline to stdout
    ds.show(data1)

    # Deserialize the serialized json file
    data2 = ds.deserialize(json_filepath="imagenet_dataset_pipeline.json")

    # Serialize the pipeline we just deserialized.
    # The content of the json file should be the same to the previous serialize.
    ds.serialize(data2, "imagenet_dataset_pipeline_1.json")
    assert validate_jsonfile("imagenet_dataset_pipeline_1.json") is True
    assert filecmp.cmp('imagenet_dataset_pipeline.json',
                       'imagenet_dataset_pipeline_1.json')

    # Deserialize the latest json file again
    data3 = ds.deserialize(json_filepath="imagenet_dataset_pipeline_1.json")
    data4 = ds.deserialize(input_dict=ds1_dict)
    num_samples = 0
    # Iterate and compare the data in the original pipeline (data1) against the deserialized pipeline (data2)
    for item1, item2, item3, item4 in zip(data1.create_dict_iterator(),
                                          data2.create_dict_iterator(),
                                          data3.create_dict_iterator(),
                                          data4.create_dict_iterator()):
        assert np.array_equal(item1['image'], item2['image'])
        assert np.array_equal(item1['image'], item3['image'])
        assert np.array_equal(item1['label'], item2['label'])
        assert np.array_equal(item1['label'], item3['label'])
        assert np.array_equal(item3['image'], item4['image'])
        assert np.array_equal(item3['label'], item4['label'])
        num_samples += 1

    logger.info("Number of data in data1: {}".format(num_samples))
    assert num_samples == 6

    # Remove the generated json file
    if remove_json_files:
        delete_json_files()
Beispiel #42
0
    def test_cmap2act(self):
        """Export colormap to act file."""
        colors.cmap2act('viridis', filename=self.f)
        ref = os.path.join(self.ref_dir, 'viridis.act')

        assert filecmp.cmp(self.f, ref)
Beispiel #43
0
from_file = FileSource()
image_sources.append(from_file)
image_sources.append(CalCamSaveSource())

example_file = os.path.join(paths.calcampath, 'usercode_examples',
                            'image_source.py_')
user_files = [
    fname for fname in os.listdir(paths.image_sources) if fname.endswith('.py')
]

# See if the user already has a CAD model definition example file, and if it's up to date. If not, create it.
# If the definitions might have changed, warn the user.
if 'Example.py' in user_files:
    is_current_version = filecmp.cmp(os.path.join(paths.image_sources,
                                                  'Example.py'),
                                     example_file,
                                     shallow=False)
    if not is_current_version:
        shutil.copy2(example_file,
                     os.path.join(paths.image_sources, 'Example.py'))
        print(
            '[Calcam Import] The latest image source definition example is different from your user copy. Your existing copy has been updated. If you get image source related errors, you may need to check and edit the CAD definition files in '
            + paths.image_sources)
    user_files.remove('Example.py')
else:
    shutil.copy2(example_file, os.path.join(paths.image_sources, 'Example.py'))
    print('[Calcam Import] Created image source definition example in ' +
          os.path.join(paths.image_sources, 'Example.py'))

user_im_sources = []
# Go through all the python files which aren't examples, and import the CAD definitions
Beispiel #44
0
 def compare_files(self, file1: PATH_TYPES, file2: PATH_TYPES) -> bool:
     """Compare two files, returning true if they are the same and False if not."""
     return filecmp.cmp(str(file1), str(file2), shallow=False)
def generate_exercise(env, spec_path, exercise, check=False):
    """
    Renders test suite for exercise and if check is:
    True: verifies that current tests file matches rendered
    False: saves rendered to tests file
    """
    slug = os.path.basename(exercise)
    meta_dir = os.path.join(exercise, ".meta")
    plugins_module = None
    plugins_name = "plugins"
    plugins_source = os.path.join(meta_dir, f"{plugins_name}.py")
    try:
        if os.path.isfile(plugins_source):
            plugins_spec = importlib.util.spec_from_file_location(
                plugins_name, plugins_source
            )
            plugins_module = importlib.util.module_from_spec(plugins_spec)
            sys.modules[plugins_name] = plugins_module
            plugins_spec.loader.exec_module(plugins_module)
        spec = load_canonical(slug, spec_path)
        additional_tests = load_additional_tests(slug)
        spec["additional_cases"] = additional_tests
        template_path = posixpath.join(slug, ".meta", "template.j2")
        template = env.get_template(template_path)
        tests_path = os.path.join(exercise, f"{to_snake(slug)}_test.py")
        spec["has_error_case"] = has_error_case(spec["cases"])
        if plugins_module is not None:
            spec[plugins_name] = plugins_module
        logger.debug(f"{slug}: attempting render")
        rendered = template.render(**spec)
        with NamedTemporaryFile("w", delete=False) as tmp:
            logger.debug(f"{slug}: writing render to tmp file {tmp.name}")
            tmp.write(rendered)
        try:
            logger.debug(f"{slug}: formatting tmp file {tmp.name}")
            format_file(tmp.name)
        except FileNotFoundError as e:
            logger.error(f"{slug}: the black utility must be installed")
            return False

        if check:
            try:
                check_ok = True
                if not os.path.isfile(tmp.name):
                    logger.debug(f"{slug}: tmp file {tmp.name} not found")
                    check_ok = False
                if not os.path.isfile(tests_path):
                    logger.debug(f"{slug}: tests file {tests_path} not found")
                    check_ok = False
                if check_ok and not filecmp.cmp(tmp.name, tests_path):
                    with open(tests_path) as f:
                        current_lines = f.readlines()
                    with open(tmp.name) as f:
                        rendered_lines = f.readlines()
                    diff = difflib.unified_diff(
                        current_lines,
                        rendered_lines,
                        fromfile=f"[current] {os.path.basename(tests_path)}",
                        tofile=f"[generated] {tmp.name}",
                    )
                    logger.debug(f"{slug}: ##### DIFF START #####")
                    for line in diff:
                        logger.debug(line.strip())
                    logger.debug(f"{slug}: ##### DIFF END #####")
                    check_ok = False
                if not check_ok:
                    logger.error(
                        f"{slug}: check failed; tests must be regenerated with bin/generate_tests.py"
                    )
                    return False
                logger.debug(f"{slug}: check passed")
            finally:
                logger.debug(f"{slug}: removing tmp file {tmp.name}")
                os.remove(tmp.name)
        else:
            logger.debug(f"{slug}: moving tmp file {tmp.name}->{tests_path}")
            shutil.move(tmp.name, tests_path)
            print(f"{slug} generated at {tests_path}")
    except (TypeError, UndefinedError, SyntaxError) as e:
        logger.debug(str(e))
        logger.error(f"{slug}: generation failed")
        return False
    except TemplateNotFound as e:
        logger.debug(str(e))
        logger.info(f"{slug}: no template found; skipping")
    except FileNotFoundError as e:
        logger.debug(str(e))
        logger.info(f"{slug}: no canonical data found; skipping")
    return True
Beispiel #46
0
    try:
        conf = json.loads(cs)
    except:
        logging.error('json parse err')
        return False

    version_url = conf['AutoUpdate']['version']
    zip_url = conf['AutoUpdate']['package']

    logging.debug('to download %s' % version_url)
    if not __dlfile(version_url, 'tmp_version'):
        logging.error('can\'t download version file, to check url')
        return False

    logging.debug('to compare version')
    if os.path.isfile('local_version') and filecmp.cmp('local_version',
                                                       'tmp_version'):
        logging.debug('version NOT changed!')
        return False

    logging.debug('to download update zip package, from %s' % zip_url)
    if not __dlfile(zip_url, 'tmp_package.zip'):
        logging.error('can\'t download update package, to check url!!!')
        return False

    try:
        extract_path = tempfile.mkdtemp('.zonekey', 'au.')
        logging.debug('using temp path of %s' % extract_path)
    except:
        logging.error(
            'can\'t create tmp path for extract?? to check disk free space!')
        return False
Beispiel #47
0
def hsdis(args, copyToDir=None):
    """download the hsdis library

    This is needed to support HotSpot's assembly dumping features.
    By default it downloads the Intel syntax version, use the 'att' argument to install AT&T syntax."""
    flavor = None
    if mx.get_arch() == "amd64":
        flavor = mx.get_env('HSDIS_SYNTAX')
        if flavor is None:
            flavor = 'intel'
        if 'att' in args:
            flavor = 'att'

    libpattern = mx.add_lib_suffix('hsdis-' + mx.get_arch() + '-' +
                                   mx.get_os() + '-%s')

    sha1s = {
        'att/hsdis-amd64-windows-%s.dll':
        'bcbd535a9568b5075ab41e96205e26a2bac64f72',
        'att/hsdis-amd64-linux-%s.so':
        '36a0b8e30fc370727920cc089f104bfb9cd508a0',
        'att/hsdis-amd64-darwin-%s.dylib':
        'c1865e9a58ca773fdc1c5eea0a4dfda213420ffb',
        'intel/hsdis-amd64-windows-%s.dll':
        '6a388372cdd5fe905c1a26ced614334e405d1f30',
        'intel/hsdis-amd64-linux-%s.so':
        '0d031013db9a80d6c88330c42c983fbfa7053193',
        'intel/hsdis-amd64-darwin-%s.dylib':
        '67f6d23cbebd8998450a88b5bef362171f66f11a',
        'hsdis-sparcv9-solaris-%s.so':
        '970640a9af0bd63641f9063c11275b371a59ee60',
        'hsdis-sparcv9-linux-%s.so':
        '0c375986d727651dee1819308fbbc0de4927d5d9',
    }

    if flavor:
        flavoredLib = flavor + "/" + libpattern
    else:
        flavoredLib = libpattern
    if flavoredLib not in sha1s:
        mx.warn(
            "hsdis with flavor '{}' not supported on this platform or architecture"
            .format(flavor))
        return

    sha1 = sha1s[flavoredLib]
    lib = flavoredLib % (sha1)
    path = join(_suite.get_output_root(), lib)
    if not exists(path):
        sha1path = path + '.sha1'
        mx.download_file_with_sha1(
            'hsdis',
            path, ['https://lafo.ssw.uni-linz.ac.at/pub/hsdis/' + lib],
            sha1,
            sha1path,
            True,
            True,
            sources=False)

    overwrite = True
    if copyToDir is None:
        # Try install hsdis into JAVA_HOME
        overwrite = False
        base = mx.get_jdk().home
        if exists(join(base, 'jre')):
            copyToDir = join(base, 'jre', 'lib')
        else:
            copyToDir = join(base, 'lib')

    if exists(copyToDir):
        dest = join(copyToDir, mx.add_lib_suffix('hsdis-' + mx.get_arch()))
        if exists(dest) and not overwrite:
            import filecmp
            # Only issue warning if existing lib is different
            if filecmp.cmp(path, dest) == False:
                mx.warn('Not overwriting existing {} with {}'.format(
                    dest, path))
        else:
            try:
                shutil.copy(path, dest)
                mx.log('Copied {} to {}'.format(path, dest))
            except IOError as e:
                mx.warn('Could not copy {} to {}: {}'.format(
                    path, dest, str(e)))
Beispiel #48
0
    def find_mozconfig(self, env=os.environ):
        """Find the active mozconfig file for the current environment.

        This emulates the logic in mozconfig-find.

        1) If ENV[MOZCONFIG] is set, use that
        2) If $TOPSRCDIR/mozconfig or $TOPSRCDIR/.mozconfig exists, use it.
        3) If both exist or if there are legacy locations detected, error out.

        The absolute path to the found mozconfig will be returned on success.
        None will be returned if no mozconfig could be found. A
        MozconfigFindException will be raised if there is a bad state,
        including conditions from #3 above.
        """
        # Check for legacy methods first.

        if 'MOZ_MYCONFIG' in env:
            raise MozconfigFindException(MOZ_MYCONFIG_ERROR)

        env_path = env.get('MOZCONFIG', None) or None
        if env_path is not None:
            if not os.path.isabs(env_path):
                potential_roots = [self.topsrcdir, os.getcwd()]
                # Attempt to eliminate duplicates for e.g.
                # self.topsrcdir == os.curdir.
                potential_roots = set(
                    os.path.abspath(p) for p in potential_roots)
                existing = [
                    root for root in potential_roots
                    if os.path.exists(os.path.join(root, env_path))
                ]
                if len(existing) > 1:
                    # There are multiple files, but we might have a setup like:
                    #
                    # somedirectory/
                    #   srcdir/
                    #   objdir/
                    #
                    # MOZCONFIG=../srcdir/some/path/to/mozconfig
                    #
                    # and be configuring from the objdir.  So even though we
                    # have multiple existing files, they are actually the same
                    # file.
                    mozconfigs = [
                        os.path.join(root, env_path) for root in existing
                    ]
                    if not all(
                            map(
                                lambda p1, p2: filecmp.cmp(
                                    p1, p2, shallow=False), mozconfigs[:-1],
                                mozconfigs[1:])):
                        raise MozconfigFindException(
                            'MOZCONFIG environment variable refers to a path that '
                            + 'exists in more than one of ' +
                            ', '.join(potential_roots) +
                            '. Remove all but one.')
                elif not existing:
                    raise MozconfigFindException(
                        'MOZCONFIG environment variable refers to a path that '
                        + 'does not exist in any of ' +
                        ', '.join(potential_roots))

                env_path = os.path.join(existing[0], env_path)
            elif not os.path.exists(env_path):  # non-relative path
                raise MozconfigFindException(
                    'MOZCONFIG environment variable refers to a path that '
                    'does not exist: ' + env_path)

            if not os.path.isfile(env_path):
                raise MozconfigFindException(
                    'MOZCONFIG environment variable refers to a '
                    'non-file: ' + env_path)

        srcdir_paths = [
            os.path.join(self.topsrcdir, p)
            for p in self.DEFAULT_TOPSRCDIR_PATHS
        ]
        existing = [p for p in srcdir_paths if os.path.isfile(p)]

        if env_path is None and len(existing) > 1:
            raise MozconfigFindException('Multiple default mozconfig files '
                                         'present. Remove all but one. ' +
                                         ', '.join(existing))

        path = None

        if env_path is not None:
            path = env_path
        elif len(existing):
            assert len(existing) == 1
            path = existing[0]

        if path is not None:
            return os.path.abspath(path)

        deprecated_paths = [
            os.path.join(self.topsrcdir, s)
            for s in self.DEPRECATED_TOPSRCDIR_PATHS
        ]

        home = env.get('HOME', None)
        if home is not None:
            deprecated_paths.extend(
                [os.path.join(home, s) for s in self.DEPRECATED_HOME_PATHS])

        for path in deprecated_paths:
            if os.path.exists(path):
                raise MozconfigFindException(MOZCONFIG_LEGACY_PATH %
                                             (path, self.topsrcdir))

        return None
Beispiel #49
0
def generate_report():
    # Reference to variables containing details for each rack
    global rack_1_name, rack_2_name
    global rack_1_fullpath, rack_2_fullpath
    global rack_1_outputdir, rack_2_outputdir
    global rack_1_shortpath, rack_2_shortpath
    global rack_1_node_id_map, rack_2_node_id_map

    html_report_path = os.getcwd() + "/rack_comparison_" + strftime(
        "%y%m%d_%H%M%S", localtime()) + ".html"

    # Initiate a html report
    fd = open(html_report_path, 'a')
    try:
        # Print the pre-formatted basic html tags to the output file before processing the main content
        fd.write(print_header())

        # Print the header row of the main table which will contain the details on each parameters
        fd.write("""
<table class="main">
<tr border=5px>
  <th class="param_name" rowspan=2>Parameter Name</th>
  <th class="param_name" rowspan=2>Component Type</th>
  <th class="rack_col" colspan=2>Rack """ + rack_1_shortpath + """</th>
  <th class="rack_col" colspan=2>Rack """ + rack_2_shortpath + """</th>
  <th class="diff_col" rowspan=2>Difference</th>
  <th class="status" rowspan=2>Status</th>
</tr>
<tr border=5px> 
  <th width: 7%;>Node Name</th>
  <th width: 20%;>Details</th>
  <th width: 7%>Node Name</th>
  <th width:20%>Details</th>
</tr> 
""")

        global temp_identical_path
        iden_fd = open(temp_identical_path, 'a+')

        global temp_different_path
        diff_fd = open(temp_different_path, 'a+')

        # Loop through each check as gathered in the check_list set
        for checkname in sorted(check_list.keys()):
            rowspan = str(len(check_list[checkname]))
            count = 0
            global total_count, identical_count
            total_count += 1
            identical_bool = "true"
            buffer = ""
            # Loop through data from each node regarding 'checkname' variable
            for node_id in sorted(check_list[checkname]):
                buffer = buffer + "<tr border=5px>\n"
                checkname_string = checkname.replace('_', ' ')
                if count == 0:
                    # Prepare the cell containing the name of the parameter to be checked
                    buffer = buffer + "<td rowspan=" + rowspan + ">" + checkname_string + "</td>\n"
                #print checkname
                #print sorted(check_list[checkname])
                buffer = buffer + "<td>" + to_string(node_id) + "</td>"

                rack_1_check_filename = rack_1_outputdir + checkname + "__" + node_id
                rack_2_check_filename = rack_2_outputdir + checkname + "__" + node_id

                # Prepare the columns containing data for each parameter to be checked, gathered from each rack respectively
                buffer = buffer + prepare_rack_columns(
                    fd, rack_1_node_id_map, node_id, rack_1_shortpath,
                    checkname, rack_1_check_filename)
                buffer = buffer + prepare_rack_columns(
                    fd, rack_2_node_id_map, node_id, rack_2_shortpath,
                    checkname, rack_2_check_filename)

                # Prepare the column containing differences for each parameter between two racks
                buffer = buffer + "<td>"
                if (os.path.exists(rack_1_check_filename)
                        and os.path.exists(rack_2_check_filename)):
                    diff_str = subprocess.Popen(
                        ["diff", rack_1_check_filename, rack_2_check_filename],
                        stdout=subprocess.PIPE).communicate()[0]
                    if not diff_str or diff_str.isspace():
                        diff_str = "No difference"
                    diff_stream = cStringIO.StringIO(diff_str)
                    div_id = "diff_" + node_id + "_" + checkname
                    buffer = buffer + print_content(diff_stream, div_id)
                else:
                    if (not os.path.exists(rack_1_check_filename)):
                        buffer = buffer + "Data from Rack 1 is unavailable for this parameter<br/>"
                    if (not os.path.exists(rack_2_check_filename)):
                        buffer = buffer + "Data from Rack 2 is unavailable for this parameter<br/>"
                buffer = buffer + "</td>"

                # Prepare the column containing the status of each parameter, i.e. whether it's identical on both racks or not
                if (os.path.exists(rack_1_check_filename)
                        and os.path.exists(rack_2_check_filename)
                        and filecmp.cmp(rack_1_check_filename,
                                        rack_2_check_filename)):
                    status = "<span class=\"status_PASS\">IDENTICAL</span>"
                else:
                    status = "<span class=\"status_FAIL\">DIFFERENT</span>"
                    identical_bool = "false"
                buffer = buffer + "<td>" + status + "</td>"
                buffer = buffer + "</tr>"
                count += 1
            if identical_bool is "true":
                identical_count += 1
                iden_fd.write(buffer)
            else:
                diff_fd.write(buffer)
                # Reset the boolean tracker for the next check
                identical_bool = "true"

        # After all the rows have been processed, flush data rows to the html report
        # organized by parameters that are different to be shown earlier, and identical latter towards the bottom
        diff_fd.seek(0)
        fd.write(diff_fd.read())
        diff_fd.close()

        iden_fd.seek(0)
        fd.write(iden_fd.read())
        iden_fd.close()

        # End of the loop, print the rest of the footer html tags to complete the html report
        fd.write("""
</table>
</body>
<br><a href=\"#\" onclick=\"javascript:processForm();\"><div id=\"results\">Switch to old format</div></a>
</html>
""")
    finally:
        fd.close()

    # Append a summary table at the top of the html report
    insert_summary_table(html_report_path)
    return html_report_path
Beispiel #50
0
def copyfile(src, dst, rnx_ver=2):
    """
    Copies a file from path src to path dst.
    If a file already exists at dst, it will not be overwritten, but:
     * If it is the same as the source file, do nothing
     * If it is different to the source file, pick a new name for the copy that
       is different and unused, then copy the file there (if rnx_ver=2)
     * If because rinex 3 files have names that are more comprehensive (include start time and duration)
       if a rnx_ver == 3 then copy the file unless it already exists (in which case it does nothing)
    Returns the path to the copy.
    """
    if not os.path.exists(src):
        raise ValueError('Source file does not exist: {}'.format(src))

    # make the folders if they don't exist
    # careful! racing condition between different workers
    try:
        dst_dir = os.path.dirname(dst)
        if not os.path.exists(dst_dir):
            os.makedirs(dst_dir)
    except OSError:
        # some other process created the folder an instant before
        pass

    # Keep trying to copy the file until it works
    if rnx_ver < 3:
        # only use this method for RINEX 2
        # RINEX 3 files should have distinct names as a default if the files are different
        dst_gen = _increment_filename(dst)

    while True:
        if rnx_ver < 3:
            dst = next(dst_gen)

        # Check if there is a file at the destination location
        if os.path.exists(dst):

            # If the namesake is the same as the source file, then we don't
            # need to do anything else.
            if filecmp.cmp(src, dst):
                return dst
            else:
                # DDG: if the rinex version is == 3 and the files have the same name:
                # 1) if dst size is < than src, replace file
                # 2) if dst size is > than src, do nothing
                # for RINEX 2 files, loop over and find a different filename
                if rnx_ver >= 3:
                    if os.path.getsize(src) > os.path.getsize(dst):
                        os.remove(dst)
                        if do_copy_op(src, dst):
                            return dst
                        else:
                            raise OSError('File exists during copy of RINEX 3 file: ' + dst)
                    else:
                        return dst
        else:
            if do_copy_op(src, dst):
                # If we get to this point, then the write has succeeded
                return dst
            else:
                if rnx_ver >= 3:
                    raise OSError('Problem while copying RINEX 3 file: ' + dst)
Beispiel #51
0
def gen_equal_output_map(config):
    """Formats 'in_files' with configs inside the 'out_dir' with Uncrustify and
       groups formatted files with equal content together.
       Expects config filename format generated by write_config_files

    :param config: configuration object, expects that it was processed by
                   check_config
    :return: dict of files with equal content
                     key   -- group index
                     value -- filepath list
    """

    # maps that will hold configurations that produce the same formatted files
    equal_output_map = {}
    # map len counter
    map_val_idx = 0

    # iterate through all generated config file names

    for cfg_path in sorted(iglob('%s/*.cfg' % config["out_dir"])):
        for in_file_idx in range(len(config["in_files"])):
            # extract substring form config gile name (removes __unc.cfg)
            splits_file = cfg_path.split("__unc")
            if len(splits_file) < 1:
                raise Exception('split with "__unc" | Wrong split len: %d' %
                                len(splits_file))

            out_path = ("%s__%d" % (splits_file[0], in_file_idx))

            # gen formatted files with uncrustify binary
            proc = Popen([
                config["unc_bin"],
                "-c",
                cfg_path,
                "-f",
                config["in_files"][in_file_idx],
                "-o",
                out_path,
            ])
            proc.wait()
            if proc.returncode != 0:
                continue

            # populate 'equal_output_map' map
            if len(equal_output_map) == 0:
                equal_output_map[0] = [out_path]
                map_val_idx += 1
            else:
                found_flag = False
                for i in range(map_val_idx):
                    # compare first file of group i with the generated file
                    if cmp(equal_output_map[i][0], out_path):
                        equal_output_map[i].append(out_path)
                        found_flag = True
                        break
                # create new group if files do not match
                if not found_flag:
                    equal_output_map[map_val_idx] = [out_path]
                    map_val_idx += 1

    return equal_output_map
Beispiel #52
0
    def test(self):
        if not self.should_test():
            return

        cache = self.scheme + self.scheme_sep + self.bucket + self.sep + str(
            uuid.uuid4())

        ret = main(['config', 'cache.' + self.cache_scheme, 'myrepo'])
        self.assertEqual(ret, 0)
        ret = main(['remote', 'add', 'myrepo', cache])
        self.assertEqual(ret, 0)

        remote_name = 'myremote'
        remote_key = str(uuid.uuid4())
        remote = self.scheme + self.scheme_sep + self.bucket + self.sep + remote_key

        ret = main(['remote', 'add', remote_name, remote])
        self.assertEqual(ret, 0)

        self.dvc = Project('.')

        foo_key = remote_key + self.sep + self.FOO
        bar_key = remote_key + self.sep + self.BAR

        foo_path = self.scheme + self.scheme_sep + self.bucket + self.sep + foo_key
        bar_path = self.scheme + self.scheme_sep + self.bucket + self.sep + bar_key

        # Using both plain and remote notation
        out_foo_path = 'remote://' + remote_name + '/' + self.FOO
        out_bar_path = bar_path

        self.write(self.bucket, foo_key, self.FOO_CONTENTS)

        sleep()

        import_stage = self.dvc.imp(out_foo_path, 'import')
        self.assertTrue(os.path.exists('import'))
        self.assertTrue(filecmp.cmp('import', self.FOO, shallow=False))

        import_remote_stage = self.dvc.imp(out_foo_path,
                                           out_foo_path + '_imported')

        cmd_stage = self.dvc.run(outs=[out_bar_path],
                                 deps=[out_foo_path],
                                 cmd=self.cmd(foo_path, bar_path))

        self.write(self.bucket, foo_key, self.BAR_CONTENTS)

        sleep()

        self.dvc.status()

        stages = self.dvc.reproduce(import_stage.path)
        self.assertEqual(len(stages), 1)
        self.assertTrue(os.path.exists('import'))
        self.assertTrue(filecmp.cmp('import', self.BAR, shallow=False))

        stages = self.dvc.reproduce(cmd_stage.path)
        self.assertEqual(len(stages), 1)

        self.dvc.gc()

        self.dvc.remove(cmd_stage.path, outs_only=True)
        self.dvc.checkout(cmd_stage.path)
def test_bulk_put_default_options():
    result = invoke([
        'os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE,
        '--bucket-name', bulk_put_bucket_name, '--src-dir',
        root_bulk_put_folder
    ])

    # No failures or skips and we uploaded everything
    parsed_result = parse_json_response_from_mixed_output(result.output)
    assert parsed_result['skipped-objects'] == []
    assert parsed_result['upload-failures'] == {}
    assert len(
        parsed_result['uploaded-objects']
    ) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)

    # Pull everything down and verify that the files match (everything in source appears in destination and they are equal)
    download_folder = 'tests/temp/verify_files_{}'.format(bulk_put_bucket_name)
    invoke([
        'os', 'object', 'bulk-download', '--namespace', util.NAMESPACE,
        '--bucket-name', bulk_put_bucket_name, '--download-dir',
        download_folder
    ])
    object_name_set = set()
    for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
        for file in file_list:
            source_file_path = os.path.join(dir_name, file)
            downloaded_file_path = source_file_path.replace(
                root_bulk_put_folder, download_folder)

            assert os.path.exists(downloaded_file_path)
            assert filecmp.cmp(source_file_path,
                               downloaded_file_path,
                               shallow=False)

            # Sanity check that we're reporting back that we uploaded the right files
            assert get_object_name_from_path(
                root_bulk_put_folder,
                source_file_path) in parsed_result['uploaded-objects']
            object_name_set.add(
                get_object_name_from_path(root_bulk_put_folder,
                                          source_file_path))

    # If we try and put it in the same bucket without --overwrite then everything should be skipped. There should be prompts
    result = invoke([
        'os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE,
        '--bucket-name', bulk_put_bucket_name, '--src-dir',
        root_bulk_put_folder
    ])
    parsed_result = parse_json_response_from_mixed_output(result.output)
    assert 'Are you sure you want to overwrite it?' in result.output
    assert set(parsed_result['skipped-objects']) == object_name_set
    assert parsed_result['upload-failures'] == {}
    assert parsed_result['uploaded-objects'] == {}

    # If we say to --no-overwrite then everything should be skipped. There should be no prompts
    result = invoke([
        'os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE,
        '--bucket-name', bulk_put_bucket_name, '--src-dir',
        root_bulk_put_folder, '--no-overwrite'
    ])
    parsed_result = parse_json_response_from_mixed_output(result.output)
    assert 'Are you sure you want to overwrite it?' not in result.output
    assert set(parsed_result['skipped-objects']) == object_name_set
    assert parsed_result['upload-failures'] == {}
    assert parsed_result['uploaded-objects'] == {}

    # Now we force it
    result = invoke([
        'os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE,
        '--bucket-name', bulk_put_bucket_name, '--src-dir',
        root_bulk_put_folder, '--overwrite'
    ])
    parsed_result = parse_json_response_from_mixed_output(result.output)
    assert parsed_result['skipped-objects'] == []
    assert parsed_result['upload-failures'] == {}
    assert len(parsed_result['uploaded-objects']) == len(object_name_set)
    for object_name in object_name_set:
        assert object_name in parsed_result['uploaded-objects']

    shutil.rmtree(download_folder)
import filecmp
import os
test_res = os.system(
    'python3 frequentwords.py --mainfile b-test1.txt --file2 b-test2.txt > b-test-r.txt'
)

if (filecmp.cmp('b-test-r-correct.txt', 'b-test-r.txt')):
    print("Test: PASSED")
else:
    print("Test: FAILED")
def _perform_mng_test(mng):
    # test dry run for new asset
    data_path = os.path.join(test_path, "testdata", "some_data.json")
    mng.storage_provider.new(data_path,
                             "category-test/some-data",
                             "0.0",
                             dry_run=True)
    with pytest.raises(Exception):
        mng.fetch_asset("category-test/some-data")

    # test updating an inexistant asset
    data_path = os.path.join(test_path, "testdata", "some_data.json")
    with pytest.raises(errors.AssetDoesNotExistError):
        mng.storage_provider.update(data_path,
                                    "category-test/some-data",
                                    version="0.0")

    # create the asset
    mng.storage_provider.new(data_path, "category-test/some-data", "0.0")
    # check metadata
    mng.storage_provider.get_asset_meta("category-test/some-data", "0.0")

    # test dry run for update asset
    mng.storage_provider.update(data_path,
                                "category-test/some-data",
                                version="0.1",
                                dry_run=True)
    with pytest.raises(Exception):
        mng.fetch_asset("category-test/some-data:0.1")

    # update the asset
    mng.storage_provider.update(data_path,
                                "category-test/some-data",
                                version="0.1")

    # check that it is present
    mng.storage_provider.get_asset_meta("category-test/some-data", "0.1")

    # rewrite the asset fails
    with pytest.raises(errors.AssetAlreadyExistsError):
        mng.storage_provider.update(data_path,
                                    "category-test/some-data",
                                    version="0.1")

    # pushing via new fails
    with pytest.raises(errors.AssetAlreadyExistsError):
        mng.storage_provider.new(data_path,
                                 "category-test/some-data",
                                 version="0.0")

    mng.storage_provider.update(data_path,
                                "category-test/some-data",
                                version="1.0")

    # # update a major version that does not exist
    # with pytest.raises(errors.AssetMajorVersionDoesNotExistError):
    #     mng.storage_provider.update(data_path, "category-test/some-data", major="10")

    # check that it is present
    mng.storage_provider.get_asset_meta("category-test/some-data", "1.0")

    # fetch the pinned asset
    fetched_path = mng.fetch_asset("category-test/some-data:1.0")
    assert filecmp.cmp(fetched_path, data_path)
    # fetch the major version asset
    fetched_path = mng.fetch_asset("category-test/some-data:1")
    assert filecmp.cmp(fetched_path, data_path)
    # fetch the latest asset
    fetched_path = mng.fetch_asset("category-test/some-data")
    assert filecmp.cmp(fetched_path, data_path)

    # fetch the latest asset from cache with full info
    fetched_asset_dict = mng.fetch_asset("category-test/some-data",
                                         return_info=True)
    assert fetched_asset_dict["path"], fetched_path
    assert fetched_asset_dict["from_cache"] is True
    assert fetched_asset_dict["version"] == "1.0"

    assert list(mng.storage_provider.iterate_assets()) == [
        ("category-test/some-data", ["1.0", "0.1", "0.0"])
    ]

    # pushing via new works
    mng.storage_provider.update(data_path,
                                "category-test/some-data",
                                version="1.1")

    # check that it is present
    mng.storage_provider.get_asset_meta("category-test/some-data", "1.1")

    fetched_asset_dict = mng.fetch_asset("category-test/some-data",
                                         return_info=True)
    assert fetched_asset_dict["path"], fetched_path
    assert fetched_asset_dict["from_cache"] is False
    assert fetched_asset_dict["version"] == "1.1"

    assert list(mng.storage_provider.iterate_assets()) == [
        ("category-test/some-data", ["1.1", "1.0", "0.1", "0.0"]),
    ]
def test_bulk_put_get_delete_with_exclusions(object_storage_client):
    exclusion_test_folder = os.path.join('tests', 'temp',
                                         'os_bulk_upload_exclusion_test')
    if not os.path.exists(exclusion_test_folder):
        os.makedirs(exclusion_test_folder)

    # Make some files for include/exclude
    folders_to_files = {
        '': ['test_file1.txt', 'test_file2.png'],
        'subfolder': ['blah.pdf', 'hello.txt', 'testfile3.png'],
        'subfolder/subfolder2':
        ['xyz.jpg', 'blag.txt', 'byz.jpg', 'testfile4.png']
    }
    for folder, files in six.iteritems(folders_to_files):
        folder_path = os.path.join(exclusion_test_folder, folder)
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)

        for file in files:
            file_path = os.path.join(folder_path, file)
            with open(file_path, 'w') as f:
                # For non-text extension types this won't create a valid file, but for testing is probably OK
                f.write(generate_random_string(CONTENT_STRING_LENGTH))

    result = invoke([
        'os',
        'object',
        'bulk-upload',
        '--namespace',
        util.NAMESPACE,
        '--bucket-name',
        bulk_put_bucket_name,
        '--src-dir',
        exclusion_test_folder,
        '--object-prefix',
        'exclusion_test/',
        '--exclude',
        '*.txt',
        '--exclude',
        '*.ps1',  # Shouldn't match anything
        '--exclude',
        'subfolder/subfolder2/xyz.jpg',
        '--exclude',
        'subfolder/[spqr]lah.pdf'  # blah.pdf should still be included because it's not slah.pdf, plah.pdf, qlah.pdf or rlah.pdf
    ])
    parsed_result = parse_json_response_from_mixed_output(result.output)
    assert parsed_result['skipped-objects'] == []
    assert parsed_result['upload-failures'] == {}

    expected_uploaded_files = [
        '{}{}'.format('exclusion_test/',
                      'test_file2.png'), '{}{}'.format('exclusion_test/',
                                                       'subfolder/blah.pdf'),
        '{}{}'.format('exclusion_test/', 'subfolder/testfile3.png'),
        '{}{}'.format('exclusion_test/', 'subfolder/subfolder2/byz.jpg'),
        '{}{}'.format('exclusion_test/', 'subfolder/subfolder2/testfile4.png')
    ]

    # Check that we uploaded what we said we did
    assert len(
        parsed_result['uploaded-objects']) == len(expected_uploaded_files)
    for f in expected_uploaded_files:
        assert f in parsed_result['uploaded-objects']

    download_folder_base = os.path.join(
        'tests', 'temp', 'verify_os_bulk_upload_exclusion_test')
    verify_downloaded_folders_for_inclusion_exclusion_tests(
        expected_uploaded_files=expected_uploaded_files,
        source_folder=exclusion_test_folder,
        download_folder=download_folder_base,
        download_prefix_no_slash='exclusion_test')

    # Download objects with exclusions to make sure that works
    target_download_folder = os.path.join(download_folder_base,
                                          'get_with_exclude')
    invoke([
        'os',
        'object',
        'bulk-download',
        '--namespace',
        util.NAMESPACE,
        '--bucket-name',
        bulk_put_bucket_name,
        '--download-dir',
        target_download_folder,
        '--prefix',
        'exclusion_test/',
        '--exclude',
        '*.jpg',
        '--exclude',
        'subfolder/subfolder2/*.png',
        '--exclude',
        'subfolder/blah.pdf',
    ])

    assert not os.path.exists(
        os.path.join(target_download_folder, 'exclusion_test', 'subfolder',
                     'blah.pdf'))
    assert not os.path.exists(
        os.path.join(target_download_folder, 'exclusion_test', 'subfolder',
                     'subfolder2', 'byz.jpg'))
    assert not os.path.exists(
        os.path.join(target_download_folder, 'exclusion_test', 'subfolder',
                     'subfolder2', 'testfile4.png'))

    assert get_count_of_files_in_folder_and_subfolders(
        target_download_folder) == 2
    assert os.path.exists(
        os.path.join(target_download_folder, 'exclusion_test',
                     'test_file2.png'))
    assert os.path.exists(
        os.path.join(target_download_folder, 'exclusion_test', 'subfolder',
                     'testfile3.png'))

    assert filecmp.cmp(
        os.path.join(exclusion_test_folder, 'test_file2.png'),
        os.path.join(target_download_folder, 'exclusion_test',
                     'test_file2.png'))
    assert filecmp.cmp(
        os.path.join(exclusion_test_folder, 'subfolder', 'testfile3.png'),
        os.path.join(target_download_folder, 'exclusion_test', 'subfolder',
                     'testfile3.png'))

    # Delete objects with exclusions
    result = invoke([
        'os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE,
        '--bucket-name', bulk_put_bucket_name, '--prefix', 'exclusion_test/',
        '--exclude', '*.jpg', '--exclude', 'subfolder/blah.pdf', '--dry-run'
    ])
    parsed_dry_run_result = parse_json_response_from_mixed_output(
        result.output)
    assert len(parsed_dry_run_result['deleted-objects']) == 3

    result = invoke([
        'os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE,
        '--bucket-name', bulk_put_bucket_name, '--prefix', 'exclusion_test/',
        '--exclude', '*.jpg', '--exclude', 'subfolder/blah.pdf', '--force'
    ])
    parsed_result = parse_json_response_from_mixed_output(result.output)
    assert parsed_result['delete-failures'] == {}
    assert set(parsed_result['deleted-objects']) == set(
        parsed_dry_run_result['deleted-objects'])

    list_objects_responses = oci_cli.objectstorage_cli_extended.retrying_list_objects(
        client=object_storage_client,
        request_id=None,
        namespace=util.NAMESPACE,
        bucket_name=bulk_put_bucket_name,
        prefix='exclusion_test/',
        start=None,
        end=None,
        limit=1000,
        delimiter=None,
        fields='name',
        retrieve_all=True)
    remaining_objects = []
    for response in list_objects_responses:
        remaining_objects.extend(
            map(lambda obj: obj.name, response.data.objects))
    assert len(remaining_objects) == 2
    assert '{}{}'.format('exclusion_test/',
                         'subfolder/blah.pdf') in remaining_objects
    assert '{}{}'.format('exclusion_test/',
                         'subfolder/subfolder2/byz.jpg') in remaining_objects

    shutil.rmtree(target_download_folder)
    shutil.rmtree(exclusion_test_folder)
Beispiel #57
0
    def run(self, server):
        """Execute the test assuming it's a python program.
        If the test aborts, print its output to stdout, and raise
        an exception. Else, comprare result and reject files.
        If there is a difference, print it to stdout and raise an
        exception. The exception is raised only if is_force flag is
        not set."""
        diagnostics = "unknown"
        save_stdout = sys.stdout
        try:
            self.skip = 0
            if os.path.exists(self.skip_cond):
                sys.stdout = FilteredStream(self.tmp_result)
                stdout_fileno = sys.stdout.stream.fileno()
                execfile(self.skip_cond, dict(locals(), **server.__dict__))
                sys.stdout.close()
                sys.stdout = save_stdout
            if not self.skip:
                sys.stdout = FilteredStream(self.tmp_result)
                stdout_fileno = sys.stdout.stream.fileno()
                self.execute(server)
            self.is_executed_ok = True
        except Exception as e:
            traceback.print_exc(e)
            diagnostics = str(e)
        finally:
            if sys.stdout and sys.stdout != save_stdout:
                sys.stdout.close()
            sys.stdout = save_stdout;
        self.is_executed = True

        if not self.skip:
            if self.is_executed_ok and os.path.isfile(self.result):
                self.is_equal_result = filecmp.cmp(self.result, self.tmp_result)
        else:
            self.is_equal_result = 1

        if self.args.valgrind:
            self.is_valgrind_clean = \
            check_valgrind_log(server.valgrind_log) == False

        if self.skip:
            print "[ skip ]"
            if os.path.exists(self.tmp_result):
                os.remove(self.tmp_result)
        elif self.is_executed_ok and self.is_equal_result and self.is_valgrind_clean:
            print "[ pass ]"
            if os.path.exists(self.tmp_result):
                os.remove(self.tmp_result)
        elif (self.is_executed_ok and not self.is_equal_result and not
              os.path.isfile(self.result)):
            os.rename(self.tmp_result, self.result)
            print "[ NEW ]"
        else:
            os.rename(self.tmp_result, self.reject)
            print "[ fail ]"

            where = ""
            if not self.is_executed_ok:
                self.print_diagnostics(self.reject, "Test failed! Last 10 lines of the result file:")
                where = ": test execution aborted, reason '{0}'".format(diagnostics)
            elif not self.is_equal_result:
                self.print_unidiff()
                where = ": wrong test output"
            elif not self.is_valgrind_clean:
                os.remove(self.reject)
                self.print_diagnostics(server.valgrind_log, "Test failed! Last 10 lines of valgrind.log:")
                where = ": there were warnings in valgrind.log"

            if not self.args.is_force:
                raise RuntimeError("Failed to run test " + self.name + where)
Beispiel #58
0
def _ValidateShardMaps(args):
    """Validate that the shard maps, csv files, etc. are consistent."""
    del args
    errors = []

    tempdir = tempfile.mkdtemp()
    try:
        builders = _GetBuilderPlatforms(builders=None, waterfall='all')
        for builder in builders:
            output_file = os.path.join(
                tempdir, os.path.basename(builder.timing_file_path))
            _FilterTimingData(builder, output_file)
            if not filecmp.cmp(builder.timing_file_path, output_file):
                errors.append(
                    '{timing_data} is not up to date. Please run '
                    '`./generate_perf_sharding.py update-timing --filter-only` '
                    'to regenerate it.'.format(
                        timing_data=builder.timing_file_path))
    finally:
        shutil.rmtree(tempdir)

    # Check that bot_platforms.py matches the actual shard maps
    for platform in bot_platforms.ALL_PLATFORMS:
        platform_benchmark_names = set(
            b.name for b in platform.benchmark_configs) | set(
                e.name for e in platform.executables)
        shard_map_benchmark_names = _ParseBenchmarks(
            platform.shards_map_file_path)
        for benchmark in platform_benchmark_names - shard_map_benchmark_names:
            errors.append(
                'Benchmark {benchmark} is supposed to be scheduled on platform '
                '{platform} according to '
                'bot_platforms.py, but it is not yet scheduled. If this is a new '
                'benchmark, please rename it to UNSCHEDULED_{benchmark}, and then '
                'contact '
                'Telemetry and Chrome Client Infra team to schedule the benchmark. '
                'You can email chrome-benchmarking-request@ to get started.'.
                format(benchmark=benchmark, platform=platform.name))
        for benchmark in shard_map_benchmark_names - platform_benchmark_names:
            errors.append(
                'Benchmark {benchmark} is scheduled on shard map {path}, but '
                'bot_platforms.py '
                'says that it should not be on that shard map. This could be because '
                'the benchmark was deleted. If that is the case, you can use '
                '`generate_perf_sharding deschedule` to deschedule the benchmark '
                'from the shard map.'.format(
                    benchmark=benchmark, path=platform.shards_map_file_path))

    # Check that every official benchmark is scheduled on some shard map.
    # TODO(crbug.com/963614): Note that this check can be deleted if we
    # find some way other than naming the benchmark with prefix "UNSCHEDULED_"
    # to make it clear that a benchmark is not running.
    scheduled_benchmarks = set()
    for platform in bot_platforms.ALL_PLATFORMS:
        scheduled_benchmarks = scheduled_benchmarks | _ParseBenchmarks(
            platform.shards_map_file_path)
    for benchmark in (bot_platforms.OFFICIAL_BENCHMARK_NAMES -
                      scheduled_benchmarks):
        errors.append(
            'Benchmark {benchmark} is an official benchmark, but it is not '
            'scheduled to run anywhere. please rename it to '
            'UNSCHEDULED_{benchmark}'.format(benchmark=benchmark))

    for error in errors:
        print('*', textwrap.fill(error, 70), '\n', file=sys.stderr)
    if errors:
        return 1
    return 0
Beispiel #59
0
def _main():
    out_dir = args.out_dir
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    log_file = args.log_file if args.log_file is not None else os.path.join(
        out_dir, "image_organization.log")
    logger = create_logger(log_file, "image_organizer")

    logger.info("started new session")

    print("Listing subtree...")
    all_files = list_subtree(args.source_dir, recursive=args.recursive)

    media_files = []
    for f in tqdm(all_files, desc="Filtering non-media files"):
        try:
            if is_media(f):
                media_files.append(f)
        except OSError:
            logger.warning(f"OS error while checking if '{f}' is a media file")

    for src_file in tqdm(media_files, desc="Organizing Media"):
        try:
            time_taken = get_media_time(src_file, args.valid_mod_time)
        except ValueError:
            logger.warning(f"failed to get time from '{src_file}'")
            continue

        dst_dir = os.path.join(out_dir,
                               f"{time_taken.year:04}_{time_taken.month:02}")
        if not os.path.exists(dst_dir):
            os.mkdir(dst_dir)
        dst_filename = os.path.basename(src_file)
        dst_file = os.path.join(dst_dir, dst_filename)

        if os.path.exists(dst_file):
            if filecmp.cmp(src_file, dst_file):
                if not args.copy:
                    if args.dry_run:
                        logger.info(
                            f"Would remove '{src_file}' - duplicate of '{dst_file}'"
                        )
                    else:
                        os.remove(src_file)
                        logger.info(
                            f"Remove '{src_file}' - duplicate of '{dst_file}'")
                else:
                    logger.info(
                        f"Ignoring '{src_file}' - duplicate of '{dst_file}'")
            else:
                logger.warning(
                    f"failed to handle '{src_file}' - destination file '{dst_file}' already exists"
                )
            continue

        if args.dry_run:
            if args.copy:
                logger.info(f"Would copy '{src_file}' to '{dst_file}'")
            else:
                logger.info(f"Would move '{src_file}' to '{dst_file}'")
        else:
            try:
                if args.copy:
                    logger.info(f"Copy '{src_file}' to '{dst_file}'")
                    shutil.copy2(src_file, dst_file)
                else:
                    logger.info(f"Move '{src_file}' to '{dst_file}'")
                    shutil.move(src_file, dst_file)
            except:
                logger.error(f"Error copying/moving {src_file} -> {dst_file}")

    if not args.copy and not args.dry_run:
        delete_empty_dirs(args.source_dir, args.recursive)
Beispiel #60
0
                                                   not isSingleFolderUpdate)

    #Get res directory
    targetResDirectory = helper.GetResDirectory(options.path)

    #Copy the files
    if not len(matchingFiles):
        print "No matching files found"

    for k, v in matchingFiles.iteritems():
        targetStringsXml = helper.GetTargetStringsXml(targetResDirectory, k)

        if not os.path.exists(os.path.dirname(targetStringsXml)):
            os.makedirs(os.path.dirname(targetStringsXml))

        try:
            if not os.path.exists(targetStringsXml) or not filecmp.cmp(
                    matchingFiles[k], targetStringsXml):
                print "Replacing", targetStringsXml
                shutil.copy(matchingFiles[k], targetStringsXml)
            else:
                print "Skipping", targetStringsXml, ", it is already up to date"
        except:
            print "Error. No Crowdin file for ", targetStringsXml

    print "Deleting", extractDir
    shutil.rmtree(extractDir)

    print "Deleting", zipPath[0]
    os.remove(zipPath[0])