예제 #1
0
def main():
  parser = argparse.ArgumentParser(
      description="Update the pinned version of the network service " +
                  "and the corresponding checked out mojoms.")
  parser.add_argument(
      "--custom-build", action="store_true",
      help="Indicates that this is a build with change that is not committed. "
           "The change must be uploaded to Rietveld.")
  parser.add_argument(
      "version",
      help="Version to roll to. If --custom-build is not specified, this "
           "should be a Chromium origin/master commit; otherwise, this should "
           "be in the format of custom_build_base_<base_commit>_"
           "issue_<rietveld_issue>_patchset_<rietveld_patchset>.")
  args = parser.parse_args()

  roll(args.version, args.custom_build)

  try:
    patch.patch("network_service_patches")
  except subprocess.CalledProcessError:
    print "ERROR: Roll failed due to a patch not applying"
    print "Fix the patch to apply, commit the result, and re-run this script"
    return 1

  return 0
예제 #2
0
def main():
    parser = argparse.ArgumentParser(
        description="Update the mojo repo's " +
        "snapshot of things imported from chromium.")
    parser.add_argument("chromium_dir", help="chromium source dir")
    args = parser.parse_args()
    rev(args.chromium_dir)
    patch.patch()
    return 0
def segment(imagename, csvfile, destinaton, imagenumber=1, oversample=1):
    totalDataset = []
    msg('starting blobDetection and the image is ' + imagename)
    sleep(3)
    bloblist = blobDetection.segment(imagename)
    #print("bloblist",bloblist)
    #print("creating patches of the images")
    #t = time()
    #imagelist = patch.patch(imagename,bloblist,True)
    #t = time() - t
    #print("Time taken for patching is " +str(t) + "secs")
    #imagelist = []
    #print("fetching the coordinates of mitotic nuclues from the .csv files in dataset")
    msg('fetching mitotic nuclei')
    t = time()
    mitoticcentroidlist = showMitoticNucleus.getCentroidList(csvfile)
    t = time() - t
    #print("Time taken for fetching mitotic nuclei is " +str(t) + "secs")
    #print("labelling the images")
    msg('labelling and the size of mitotic nuclei is ' + str(len(bloblist)))
    sleep(2)
    t = time()
    mitotic_list, nonmitotic_list, misCount = labeller.labeller(
        bloblist, mitoticcentroidlist)
    t = time() - t
    #print("Time taken for Labelling is " +str(t) + "secs")
    msg('creating image patches')
    t = time()
    mitotic_imagelist = patch.patch(imagename, mitotic_list, True, oversample)
    nonmitotic_imagelist = patch.patch(imagename, nonmitotic_list, False)
    t = time() - t
    #print("Time taken for patching images is " +str(t)+"secs")
    msg('saving images and the size is ')
    for i in range(len(mitotic_imagelist)):
        address = destinaton + 'mitotic/img_'
        address += str(imagenumber) + "_"
        address += str(i) + ".jpg"
        cv.imwrite(address, mitotic_imagelist[i])

    for j in range(len(nonmitotic_imagelist)):
        d = str(nonmitotic_list[j])
        address = destinaton + 'non_mitotic/img_'
        address += str(imagenumber) + "_"
        address += str(j) + ".jpg"
        #command = "mv "+address+" ./segmented_data/non_mitotic/"
        cv.imwrite(address, nonmitotic_imagelist[j])
        #system(command)
        #msg(address+str(nonmitotic_imagelist[j].shape))
        percentage = getPercentage(j, len(nonmitotic_imagelist))
        msg("Saving segmented non-mitotic images" + str(percentage) + "%")
        sleep(0.001)
#print('misCount',misCount)
#msg('images saved to '+destinaton)
    return misCount
예제 #4
0
 def test_connect_is_called(self):
     with patch('parsing.connector.insert_links_ips',
                MagicMock()) as url_links, patch(
         'parsing.connector.insert_link', MagicMock()) as link, \
             patch('parsing.connector.insert_ip', MagicMock()) as ip, \
             patch('parsing.connector.insert_url', MagicMock()) as url:
         insert('a', 'b', 'c', 'd', FakeConnector())
         self.assertEqual(url_links.call_count, 1)
         self.assertEqual(link.call_count, 1)
         self.assertEqual(ip.call_count, 1)
         self.assertEqual(url.call_count, 1)
예제 #5
0
def extractPackage(package, tarballsDir, sourcesDir, patchesDir):
    if not isdir(sourcesDir):
        makedirs(sourcesDir)
    sourceDirName = package.getSourceDirName()
    packageSrcDir = joinpath(sourcesDir, sourceDirName)
    if isdir(packageSrcDir):
        rmtree(packageSrcDir)
    extract(joinpath(tarballsDir, package.getTarballName()), sourcesDir, TopLevelDirRenamer(sourceDirName))
    diffPath = joinpath(patchesDir, sourceDirName + ".diff")
    if isfile(diffPath):
        for diff in Diff.load(diffPath):
            patch(diff, sourcesDir)
            print "Patched:", diff.getPath()
예제 #6
0
def extractPackage(package, tarballsDir, sourcesDir, patchesDir):
    if not isdir(sourcesDir):
        makedirs(sourcesDir)
    sourceDirName = package.getSourceDirName()
    packageSrcDir = joinpath(sourcesDir, sourceDirName)
    if isdir(packageSrcDir):
        rmtree(packageSrcDir)
    extract(joinpath(tarballsDir, package.getTarballName()), sourcesDir,
            TopLevelDirRenamer(sourceDirName))
    diffPath = joinpath(patchesDir, sourceDirName + '.diff')
    if isfile(diffPath):
        for diff in Diff.load(diffPath):
            patch(diff, sourcesDir)
            print 'Patched:', diff.getPath()
예제 #7
0
def segment(imagename, csvfile, mama):
    totalDataset = []
    #imagename = "actualImage.jpg"
    #csvfile = "A00_01.csv"
    #print("segmenting the image into images of nucleui")
    #print('in blobDetection')
    #print(imagename)

    bloblist = blobDetection.segment(imagename)
    #print("bloblist",bloblist)
    #print("creating patches of the images")
    #t = time()
    #imagelist = patch.patch(imagename,bloblist,True)
    #t = time() - t
    #print("Time taken for patching is " +str(t) + "secs")
    #imagelist = []
    #print("fetching the coordinates of mitotic nuclues from the .csv files in dataset")
    t = time()
    mitoticcentroidlist = showMitoticNucleus.getCentroidList(csvfile)
    t = time() - t
    print("Time taken for fetching mitotic nuclei is " + str(t) + "secs")
    #print("labelling the images")
    t = time()
    mitotic_list, nonmitotic_list, misCount = labeller.labeller(
        bloblist, mitoticcentroidlist)
    t = time() - t
    print("Time taken for Labelling is " + str(t) + "secs")
    #print(labeledlist)
    count = 0
    #print(len(mitotic_list))
    #print('size of the non_mitotic list is '+str(len(nonmitotic_list)))
    t = time()
    mitotic_imagelist = patch.patch(imagename, mitotic_list, True)
    nonmitotic_imagelist = patch.patch(imagename, nonmitotic_list, False)
    t = time() - t
    print("Time taken for patching images is " + str(t) + "secs")

    #print(len(mitotic_list),len(mitotic_imagelist))

    for i in range(len(mitotic_imagelist)):
        cv.imwrite(
            "segmented_data/mitotic/" + "img" + str(mitotic_list[i]) + str(i) +
            ".jpg", mitotic_imagelist[i])

    for j in range(len(nonmitotic_imagelist)):
        cv.imwrite(
            "segmented_data/non_mitotic/" + "img" + str(nonmitotic_list[j]) +
            str(j) + ".jpg", nonmitotic_imagelist[j])
    print('misCount', misCount)
예제 #8
0
def build():
    process_configurations()
    make_common_file()
    make_proof_files()
    try:
        create_cbmc_yaml_files()
    except CalledProcessError as e:
        logging.error(textwrap.dedent("""\
            An error occured during cbmc-batch generation.
            The error message is: {}
            """.format(str(e))))
        exit(1)

    # Patch headers directly instead of creating patch files.
    patch.patch()
    patch_headers(HEADERS)
예제 #9
0
def migrateFile(
                filePath, compiledPatches, compiledInfos,
                hasPatchModule=False, options=None, encoding="UTF-8"):

    logging.info("  - File: %s" % filePath)

    # Read in original content
    fileContent = filetool.read(filePath, encoding)

    fileId = extractFileContentId(fileContent);

    # Apply patches
    patchedContent = fileContent

    if hasPatchModule and fileId is not None:

        import patch
        tree = treegenerator.createFileTree(tokenizer.parseStream(fileContent))

        # If there were any changes, compile the result
        if patch.patch(fileId, tree):
            options.prettyPrint = True  # make sure it's set
            result = [u'']
            result = pretty.prettyNode(tree, options, result)
            patchedContent = u''.join(result)

    # apply RE patches
    patchedContent = regtool(patchedContent, compiledPatches, True, filePath)
    patchedContent = regtool(patchedContent, compiledInfos, False, filePath)

    # Write file
    if patchedContent != fileContent:
        logging.info("    - %s has been modified. Storing modifications ..." % filePath)
        filetool.save(filePath, patchedContent, encoding)
예제 #10
0
파일: migrator.py 프로젝트: Wkasel/qooxdoo
def migrateFile(
                filePath, compiledPatches, compiledInfos,
                hasPatchModule=False, options=None, encoding="UTF-8"):

    logging.info("  - File: %s" % filePath)

    # Read in original content
    fileContent = filetool.read(filePath, encoding)

    fileId = extractFileContentId(fileContent);

    # Apply patches
    patchedContent = fileContent

    if hasPatchModule and fileId is not None:

        import patch
        tree = treegenerator.createSyntaxTree(tokenizer.parseStream(fileContent))

        # If there were any changes, compile the result
        if patch.patch(fileId, tree):
            options.prettyPrint = True  # make sure it's set
            result = [u'']
            result = pretty.prettyNode(tree, options, result)
            patchedContent = u''.join(result)

    # apply RE patches
    patchedContent = regtool(patchedContent, compiledPatches, True, filePath)
    patchedContent = regtool(patchedContent, compiledInfos, False, filePath)

    # Write file
    if patchedContent != fileContent:
        logging.info("    - %s has been modified. Storing modifications ..." % filePath)
        filetool.save(filePath, patchedContent, encoding)
예제 #11
0
    def test_config_shorthand(self):
        with patch(sys, "platform", "darwin"), CaptureStdout() as stdout:
            self.autobuild_call(self.find_data("darwin"), "RelWithDebInfo")
        stdout = stdout.getvalue()
        assert_found_assignment("LL_BUILD_DARWIN_RELEASE", "darwin release",
                                stdout)
        assert_found_assignment("LL_BUILD_RELEASE", "darwin release", stdout)
        assert_found_assignment("SOMETHING_ELSE", "something else", stdout)
        assert_not_found_in(r'^ *LL_BUILD=', stdout)

        with patch(sys, "platform", "darwin"), CaptureStdout() as stdout:
            self.autobuild_call(self.find_data("darwin"), "Release")
        stdout = stdout.getvalue()
        assert_found_assignment("LL_BUILD_DARWIN_RELEASE", "darwin release",
                                stdout)
        assert_found_assignment("LL_BUILD_RELEASE", "darwin release", stdout)
        assert_found_assignment("LL_BUILD", "darwin release", stdout)
        assert_found_assignment("SOMETHING_ELSE", "something else", stdout)
예제 #12
0
def create_patch(benchmark, seeds):
    try:
        # Determine all paths
        base_data = os.path.join(support.create_path_for_seeds(config.data_dir), benchmark)
        div_symfile_path = os.path.join(support.create_path_for_seeds(config.data_dir, *seeds), benchmark, 'symfile')
        patch_dir = os.path.join(support.create_path_for_seeds(config.patches_dir, *seeds), benchmark)
        print('************ Creating patch for benchmark ' + patch_dir + ' **********')

        # Make the subdirectories and create the patch
        os.makedirs(patch_dir)
        patch.patch(base_data, seeds, div_symfile_path=div_symfile_path, output_dir=patch_dir)

        # Verify the patch is correct
        if not patch.patch(base_data, seeds, div_symfile_path=div_symfile_path, patch_path=os.path.join(patch_dir, 'patch')):
            os.remove(os.path.join(patch_dir, 'patch'))
            logging.getLogger().error('Patch verification failed for ' + patch_dir)
    except Exception:
        logging.getLogger().exception('Patch creation failed for ' + patch_dir)
예제 #13
0
def main():
    parser = argparse.ArgumentParser(
        description="Update the mojo repo's " +
        "snapshot of things imported from chromium.")
    parser.add_argument("chromium_dir", help="chromium source dir")
    args = parser.parse_args()
    pre_roll_commit = system(["git", "rev-parse", "HEAD"],
                             cwd=mojo_root_dir).strip()

    rev(args.chromium_dir)
    patch.patch()

    print "Restoring files whose contents don't track Chromium"
    for f in files_not_to_roll:
        system(["git", "checkout", pre_roll_commit, "--", f],
               cwd=mojo_root_dir)
    if files_not_to_roll:
        commit("Restored pre-roll versions of files that don't get rolled")
    return 0
예제 #14
0
def test_register():
    # We're going to mess with the registry of updaters; restore it when we're
    # done to avoid breaking other tests that rely on the real values.
    with patch(update, "_updaters", {}):
        update._register("1.1", "1.2", lambda config: config + ["to 1.2"])
        update._register("1.1", "1.3", lambda config: config + ["to 1.3"])
        update._register("1.2", "1.4", lambda config: config + ["to 1.4"])

        # directly examining _updaters is fragile; update.py maintenance may
        # require changing this test; but _register() has no other observable
        # side effects
        assert_equals(len(update._updaters["1.1"]), 2)
예제 #15
0
파일: mail.py 프로젝트: taget/auto_fetch
	def get_mailobjs(self, subject):
		'''
		get mail obj
		'''
		mailobjs = []
		number = 0
		
		p = imaplib.IMAP4_SSL(self._mailserver, self._port)
		p.login(self._user, self._password)
		
		p.select(self._maildir)	
		
		# Read Unseen mail
		# Marked as seen
		# Find $subject and mark Flagged
		mailresp, mailitmes = p.search(None, "UNSEEN")
		for num in mailitmes[0].split():
			typ, data = p.fetch(num, '(BODY.PEEK[])')
			
			mailText = data[0][1]
			mail_message = \
			              email.message_from_string(mailText)
			              
			number = number + 1
			# find subject
			mail_subject = mail_message['subject']
			if re.search(subject, mail_subject):
				try:
					obj = patch(mail_message)
					mailobjs.append(obj)
				except:
					self.logger.info("%s seems not a\
					            valid patch" % mail_subject)
				else:
					# Mark Flagged
					self.logger.debug("Flagged [%s]" \
					                 % mail_subject)
					p.store(num, '+FLAGS','\\Flagged')
			#p.store(num, '+FLAGS','\\Seen')
			
		p.close()
		p.logout()
		
		self.logger.debug("received [%d] maile(s)" % \
		                                               number)
		self.logger.debug("created [%d] patch(es)" % \
		                                               len(mailobjs))
		return mailobjs

		def send_mail(self):
			pass
예제 #16
0
 def test_bad_platform_warning(self):
     # For autobuild_call(), capturing warning output isn't as
     # straightforward as CaptureStdout or CaptureStderr because the logger
     # output isn't necessarily flushed to stderr yet. So to capture
     # warnings emitted by an autobuild_call() call, temporarily attach an
     # extra handler to the logger used by autobuild source_environment.
     stream = StringIO()
     handler = logging.StreamHandler(stream)
     atse.logger.addHandler(handler)
     try:
         with patch(sys, "platform", "strange"):
             self.autobuild_call(self.find_data("darwin"))
     finally:
         atse.logger.removeHandler(handler)
     stderr = stream.getvalue()
     assert_in("platform", stderr)
     assert_in("strange", stderr)
예제 #17
0
def main():
    """
    Main function of the program
    """
    urls = sys.argv[1:]

    class Response(object):
        def __init__(self, str):
            self.content = str

    def wrapper(*args, **kwargs):
        return Response("""<a href="vk.co"></a>
        <a href="bb.com"></a>
        """)

    with patch('requests.get', wrapper):
        for url, data in data_from_urls(urls):
            print url, data
예제 #18
0
 def test_platform_shorthand(self):
     # Seems like we shouldn't be able to overwrite sys.platform! But since
     # we can, that seems preferable to deriving the variable name for the
     # current platform in parallel with the autobuild implementation.
     # Initially I patched sys.platform to "win32", figuring that Windows
     # is our most important platform. The trouble with that is that on any
     # other platform, autobuild source_environment naturally complains
     # that Visual Studio isn't installed! So go for "darwin" instead.
     with patch(sys, "platform", "darwin"), CaptureStdout() as stdout:
         # Patching sys.platform of course only affects THIS process.
         # Therefore we must use autobuild_call() rather than any of the
         # methods that run autobuild source_environment as a child
         # process.
         self.autobuild_call(self.find_data("darwin"))
     stdout = stdout.getvalue()
     assert_found_assignment("LL_BUILD_DARWIN_RELEASE", "darwin release",
                             stdout)
     assert_found_assignment("LL_BUILD_RELEASE", "darwin release", stdout)
     assert_found_assignment("SOMETHING_ELSE", "something else", stdout)
     assert_not_found_in(r'^ *LL_BUILD=', stdout)
예제 #19
0
 def time_function2():
     patch.patch(base_data,
                 seeds,
                 patch_path=os.path.join(config.tmp_dir,
                                         'patch.' + name),
                 output_dir=config.tmp_dir)
예제 #20
0
    (Age, False),
    (CheckingAccountStatus, True),
    (CreditHistory, True),
    (Savings, True),
    (Property, True),
    (Age, False),
    (CreditAmount, False),
    (Duration, False),
    (Age, False),
]

########################################################
# Repair wrt German dataset
########################################################


def parse_args():
    parser = eutil.create_base_parser(description='Patch German dataset.',
                                      sensitive_attrs_default="['Sex']",
                                      dataset_default='german.data')
    args = parser.parse_args()
    evalu = eutil.EvalUtil(args)
    random.seed(args.random_seed)
    return evalu


if __name__ == '__main__':
    evalu = parse_args()
    patch.patch(evalu, cols, attr_map, refine_heuristics)
    evalu.save_vals()
예제 #21
0
 def patch(self, request, pk):
     return patch(request, pk)
예제 #22
0
def handle(fileList, fileDb, options):
  confPath = os.path.join(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "migration"), options.migrationTarget)

  infoPath = os.path.join(confPath, "info")
  patchPath = os.path.join(confPath, "patches")

  importedModule = False
  infoList = []
  patchList = []
  htmlList = getHtmlList(options)




  print "  * Number of script input files: %s" % len(fileList)
  print "  * Number of HTML input files: %s" % len(htmlList)
  print "  * Update to version: %s" % options.migrationTarget



  print "  * Searching for patch module..."

  for root, dirs, files in os.walk(confPath):

    # Filter ignored directories
    for ignoredDir in config.DIRIGNORE:
      if ignoredDir in dirs:
        dirs.remove(ignoredDir)

    # Searching for files
    for fileName in files:
      filePath = os.path.join(root, fileName)

      if os.path.splitext(fileName)[1] != config.PYEXT:
        continue

      if fileName == "patch.py":
        print "    - Importing..."

        if not root in sys.path:
          sys.path.insert(0, root)

        import patch
        importedModule = True







  emptyLine = re.compile("^\s*$")



  print "  * Searching for info expression data..."

  for root, dirs, files in os.walk(infoPath):

    # Filter ignored directories
    for ignoredDir in config.DIRIGNORE:
      if ignoredDir in dirs:
        dirs.remove(ignoredDir)

    # Searching for files
    for fileName in files:
      filePath = os.path.join(root, fileName)

      fileContent = textutil.any2Unix(filetool.read(filePath, "utf-8"))
      infoList.append({"path":filePath, "content":fileContent.split("\n")})

      if options.verbose:
        print "    - %s" % filePath

  print "    - Number of info files: %s" % len(infoList)

  print "    - Compiling expressions..."

  compiledInfos = []

  for infoFile in infoList:
    print "      - %s" % os.path.basename(infoFile["path"])
    for line in infoFile["content"]:
      if emptyLine.match(line) or line.startswith("#") or line.startswith("//"):
        continue

      compiled = entryCompiler(line)
      if compiled != None:
        compiledInfos.append(compiled)

  print "    - Number of infos: %s" % len(compiledInfos)




  print "  * Searching for patch expression data..."

  for root, dirs, files in os.walk(patchPath):

    # Filter ignored directories
    for ignoredDir in config.DIRIGNORE:
      if ignoredDir in dirs:
        dirs.remove(ignoredDir)

    # Searching for files
    for fileName in files:
      filePath = os.path.join(root, fileName)

      fileContent = textutil.any2Unix(filetool.read(filePath, "utf-8"))
      patchList.append({"path":filePath, "content":fileContent.split("\n")})

      if options.verbose:
        print "    - %s" % filePath

  print "    - Number of patch files: %s" % len(patchList)

  print "    - Compiling expressions..."

  compiledPatches = []

  for patchFile in patchList:
    print "      - %s" % os.path.basename(patchFile["path"])
    for line in patchFile["content"]:
      if emptyLine.match(line) or line.startswith("#") or line.startswith("//"):
        continue

      compiled = entryCompiler(line)
      if compiled != None:
        compiledPatches.append(compiled)

  print "    - Number of patches: %s" % len(compiledPatches)








  print
  print "  FILE PROCESSING:"
  print "----------------------------------------------------------------------------"

  if len(fileList) > 0:
    print "  * Processing script files:"

    for fileId in fileList:
      fileEntry = fileDb[fileId]

      filePath = fileEntry["path"]
      fileEncoding = fileEntry["encoding"]

      print "    - %s" % fileId

      # Read in original content
      fileContent = filetool.read(filePath, fileEncoding)
      patchedContent = fileContent

      # Apply patches
      if importedModule:
        tree = treegenerator.createSyntaxTree(tokenizer.parseStream(patchedContent))

        # If there were any changes, compile the result
        if patch.patch(fileId, tree):
          patchedContent = compiler.compile(tree, True)

      patchedContent = regtool(patchedContent, compiledPatches, True, options)
      patchedContent = regtool(patchedContent, compiledInfos, False, options)

      # Write file
      if patchedContent != fileContent:
        print "      - Store modifications..."
        filetool.save(filePath, patchedContent, fileEncoding)

    print "  * Done"



  if len(htmlList) > 0:
    print "  * Processing HTML files:"

    for filePath in htmlList:
      print "    - %s" % filePath

      # Read in original content
      fileContent = filetool.read(filePath)

      patchedContent = fileContent
      patchedContent = regtool(patchedContent, compiledPatches, True, options)
      patchedContent = regtool(patchedContent, compiledInfos, False, options)

      # Write file
      if patchedContent != fileContent:
        print "      - Store modifications..."
        filetool.save(filePath, patchedContent)

    print "  * Done"
예제 #23
0
# -*- coding:utf-8 -*-
from patch import patch

with patch("__builtin__.open") as m:
    m.return_value.read.return_value = "data"
    result = open("test.txt").read()
    assert result == "data"

def now():
    "属性が設定できないのはC拡張のtypeでした"
    from datetime import datetime
    return datetime.now()

try:
    with patch("__main__.now") as m:
        m.return_value = "now!"
        result = now()
        assert result == "now!"
        raise Exception
except:
    pass
now() # => 2013-09-08 11:30:15.572884
예제 #24
0
                print(f'{(time.time() - t) * 1000:.3f} ms {msg}')

                self.queue.task_done()
        except asyncio.CancelledError:
            print('listen cancelled')

    async def talk(self, msg):
        for count in range(10):
            delay = random.uniform(0, 2)
            await asyncio.sleep(delay)

            await self.post(f'{msg} {count}')

        print(msg, 'talk done')


def main():
    try:
        loop = asyncio.get_event_loop()
    except:
        loop = asyncio.new_event_loop()

    c = Crazy(loop)
    c.run()


if __name__ == '__main__':
    patch.patch()
    main()
예제 #25
0
def gen_binary(input, output, lpc_patch=False):
    run_cmd([FROMELF, '--bin', '--output=%s' % output, input])
    if lpc_patch == True:
        patch(output)
예제 #26
0
파일: utils.py 프로젝트: Cheong2K/CMSIS-DAP
def gen_binary(input, output, lpc_patch=False):
    run_cmd([FROMELF, '--bin', '--output=%s' % output, input])
    if lpc_patch == True:
        patch(output)
예제 #27
0
# -*- coding:utf-8 -*-
from patch import patch
from target import whattime_is_it, notify


def greeting(name):
    hour = whattime_is_it()
    if 0 <= hour <= 4:
        fmt = "{name}, Goto Bed!!"
        notify(fmt.format(name=name))


# hmm.. not decorator
with patch("__main__.whattime_is_it") as m0:
    m0.return_value = 1
    with patch("__main__.notify") as m1:
        greeting("Foo")
        m1.assert_called_with("Foo, Goto Bed!!")
예제 #28
0
 def time_function1():
     patch.patch(base_data,
                 seeds,
                 div_symfile_path=div_symfile_path,
                 output_dir=config.tmp_dir)
예제 #29
0
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import patch
import rev
import sys

if len(sys.argv) != 2:
  print "usage: updatemojo.py <chromium source dir>"
  sys.exit(1)

chromium_dir = sys.argv[1]

print "Updating from %s" % chromium_dir

rev.rev(chromium_dir)
patch.patch()
예제 #30
0
    import os, sys
    sys.path.insert(0,
        os.path.join(os.path.dirname(__file__), os.pardir, 'holon')) # pythonpath

    import reaktor, patch

    reaktor.REAKTOR_HOST = u"staging.txtr.com"
    reaktor.REAKTOR_PORT = 80
    reaktor.REAKTOR_SSL  = False
    reaktor.REAKTOR_PATH = u"/json/rpc?v=2"

    reaktor = reaktor.Reaktor()
    token, docId = "txtr.de", "bfzs289"
    keepIds = False

    doc = reaktor.WSDocMgmt.getDocument(token, docId)
    # access author via keys of dicts
    print doc["attributes"]["20514d7d-7591-49a4-a62d-f5c02a8f5edd"]
    # the same with attributes
    # sadly python doesn't accept attributes with "-" in its names
    print doc.attributes["20514d7d-7591-49a4-a62d-f5c02a8f5edd"]

    patch.patch(doc, keepIds)
    # access author via keys of dicts
    print doc["attributes"]["author"]
    # the same with attributes
    print doc.attributes.author

    # You should now have seen for times the author 'Hart, Maarten &apos;t'