Example #1
0
def create_po():
    tm = datetime.datetime.now()
    postfix = '%s%02d%02d.%02d%02d%02d' % tuple(tm.timetuple())[:6]

    home = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

    os.chdir(os.path.join(home, 'scripts'))

    cmd = "python %s %s %s" % ('pygettext.py', os.path.join(home, 'ui', '*.py'), os.path.join(home, 'updater.py'))
    print cmd
    os.system(cmd)

    cmd = 'move messages.pot youmoney.pot'
    print cmd
    shutil.move('messages.pot', 'youmoney.pot')
    
    #dstnames = ['youmoney_zh_CN', 'youmoney_ja_JP']
    global dstnames 
    for name in dstnames:
        dstfile = name + '.po'
        print dstfile
        # backup old file
        if os.path.isfile(dstfile):
            shutil.move(dstfile, '%s.%s.%d' % (dstfile, postfix, random.randint(0, 10000)))

        merge.merge(name + '.sample', "youmoney.pot", dstfile)
Example #2
0
    def testMergeMainAndCommand(self):
        # Ensure the main attribute doesn't get promoted over the command

        # Case 1: the group already has a suitable main and command.
        # We simply add the new implementation to the group, without its own main.
        master_xml = merge.merge(
            header + """
  <group main='main'>
    <command name='run' path='run.sh'/>
    <implementation id="sha1=001" version="0.1"/>
  </group>
  """ + footer, local_file_main_and_command)
        feed = parse(master_xml)

        assert feed.implementations['sha1=001'].main == "run.sh"
        assert feed.implementations['sha1=002'].main == "run.sh"

        # Case 2: the group doesn't specify a main.
        # We need to create a sub-group for it.
        master_xml = merge.merge(
            header + """
  <group>
    <command name='run' path='run.sh'/>
    <implementation id="sha1=001" version="0.1"/>
  </group>
  """ + footer, local_file_main_and_command)
        feed = parse(master_xml)

        assert feed.implementations['sha1=001'].main == "run.sh"
        assert feed.implementations['sha1=002'].main == "run.sh"
Example #3
0
	def testNotSubset(self):
		master = parse(merge.merge(header + "<group a='a'>\n    <implementation id='sha1=123' version='1'/>\n  </group>" + footer, local_file))
		assert master.uri == 'http://test/hello.xml', master
		assert len(master.implementations) == 2
		assert master.implementations['sha1=123'].metadata.get('a', None) == 'a'
		assert master.implementations['sha1=002'].metadata.get('a', None) == None

		master = parse(merge.merge(header + """\n
  <group>
    <requires interface='http://foo' meta='foo'/>
    <implementation id='sha1=004' version='1'/>
  </group>
  <group>
    <requires interface='http://foo'>
      <version before='1'/>
    </requires>
    <implementation id='sha1=001' version='1'/>
  </group>""" + footer, local_file_req))
		assert len(master.implementations['sha1=001'].requires[0].restrictions) == 1
		assert len(master.implementations['sha1=003'].requires[0].restrictions) == 0

		assert master.implementations['sha1=004'].requires[0].metadata.get('meta', None) == 'foo'
		assert master.implementations['sha1=003'].requires[0].metadata.get('meta', None) == None

		assert master.implementations['sha1=003'].main == 'hello'
Example #4
0
    def testMergeIf0installVersion(self):
        master_xml = merge.merge(
            header + """
  <group>
    <command name='run' path='run.sh'/>
    <implementation id="sha1=004" version="0.4"/>
  </group>
  """ + footer, local_file_if)
        doc = minidom.parseString(master_xml)

        n_commands = len(doc.getElementsByTagName("command"))
        assert n_commands == 3

        # We can share the run-old.sh <command>
        master_xml = merge.merge(
            header + """
  <group>
    <command name='run' path='run-old.sh' if-0install-version='..!2'/>
    <command name='run' path='run-mid.sh' if-0install-version='2..'/>
    <implementation id="sha1=004" version="0.4"/>
  </group>
  """ + footer, local_file_if)
        doc = minidom.parseString(master_xml)

        n_commands = len(doc.getElementsByTagName("command"))
        assert n_commands == 3
Example #5
0
def main():
    creds = config.get_creds()
    sftp.download(creds.get("sftp_url"), creds.get("sftp_username"),
                  creds.get("sftp_password"), creds.get("localpath"))
    cleaner.clean(creds.get("localpath"))
    merge.merge(creds.get("localpath"))
    scraper.scrape(creds)
Example #6
0
def main():
    args = sys.argv[1:]

    if len(args) < 1:
        logging.warning('Usage: python <filename> <function> <parameters..>')

    elif args[0] == 'init':
        init()

    elif args[0] == 'add':

        data_to_copy_path = args[1]
        add(data_to_copy_path)

    elif args[0] == 'commit':
        message = args[1]
        commit(message)

    elif args[0] == 'status':
        status()

    elif args[0] == 'checkout':
        commit_id = args[1]
        checkout(commit_id)

    elif args[0] == 'graph':
        graph()

    elif args[0] == 'branch':
        branch_name = args[1]
        branch(branch_name)

    elif args[0] == 'merge':
        branch_name = args[1]
        merge()
def main():
    args = sys.argv[1:]
    if not args:
        print("error: must specify one or more dirs")
        sys.exit(1)

    for dirname in args:
        # apply a band-pass filter
        dest_path = os.path.join(dirname, 'bp_filtered')
        bpfilter.filter(dirname, dest_path)

        # first pass through files to eliminate noise files
        source_path = dest_path
        dest_path = os.path.join(dirname, 'filtered')
        filter_empty_audio(source_path, dest_path, MEAN_VOLUME_CEILING,
                           PEAK_VOLUME_CEILING)

        # first pass to cut pure silence in audio files
        source_path = dest_path
        dest_path = os.path.join(dirname, 'silence_removed')
        remove_silence(source_path, dest_path, HIGH_THRESHOLD)

        # merge the files to ideally 5min lengths, but up to 10min
        source_path = dest_path
        dest_path = os.path.join(dirname, 'merged')
        merge.merge(source_path, dest_path)

        # second pass to cut pure silence in audio files (to get rid of concat silence)
        source_path = dest_path
        dest_path = os.path.join(dirname, 'processed')
        remove_silence(source_path, dest_path, HIGH_THRESHOLD)
Example #8
0
	def testMergeBest(self):
		master_xml = merge.merge(header + """\n
  <group>
    <implementation id='sha1=123' version='1'/>
  </group>
  <group>
    <requires interface='http://foo'/>
    <implementation id='sha1=002' version='2'/>
  </group>""" + footer, local_file_req)
		master = parse(master_xml)
		assert master.uri == 'http://test/hello.xml', master
		assert len(master.implementations) == 3
		deps = master.implementations['sha1=003'].requires
		assert len(deps) == 1
		assert deps[0].interface == 'http://foo', deps[0]

		assert len(minidom.parseString(master_xml).documentElement.getElementsByTagNameNS(XMLNS_IFACE, 'group')) == 2
	
		# Again, but with the groups the other way around
		master_xml = merge.merge(header + """\n
  <group>
    <requires interface='http://foo'/>
    <implementation id='sha1=002' version='2'/>
  </group>
  <group>
    <implementation id='sha1=123' version='1'/>
  </group>""" + footer, local_file_req)
		master = parse(master_xml)
		assert master.uri == 'http://test/hello.xml', master
		assert len(master.implementations) == 3
		deps = master.implementations['sha1=003'].requires
		assert len(deps) == 1
		assert deps[0].interface == 'http://foo', deps[0]

		assert len(minidom.parseString(master_xml).documentElement.getElementsByTagNameNS(XMLNS_IFACE, 'group')) == 2
Example #9
0
def merge(lib_img_path, macro, detract = 0):
  libimg = Image.open(dropBoxDir + lib_img_path)
  targimg = Image.open(dropBoxDir + 'target.jpg')
  backedby = Meme.objects.filter(classification = macro).count()
  m.merge(libimg, targimg, backedby - detract)
  if ".jpg" not in (dropBoxDir + lib_img_path):
    libimg.save(dropBoxDir + lib_img_path + ".jpg")
  libimg.save(dropBoxDir + lib_img_path)
Example #10
0
 def merge(self, other):
   if isinstance(other, Version):
     merge.merge(self, other)
   elif isinstance(other, Model):
     merge.merge(self, other.version)
   else:
     raise TypeError('Expected instance of %s or %s' % \
       (Version, self.__class__))
Example #11
0
def merge(lib_img_path, macro, detract=0):
    libimg = Image.open(dropBoxDir + lib_img_path)
    targimg = Image.open(dropBoxDir + 'target.jpg')
    backedby = Meme.objects.filter(classification=macro).count()
    m.merge(libimg, targimg, backedby - detract)
    if ".jpg" not in (dropBoxDir + lib_img_path):
        libimg.save(dropBoxDir + lib_img_path + ".jpg")
    libimg.save(dropBoxDir + lib_img_path)
Example #12
0
    def test_merged_file_contains_all_pages(self):
        front_pages = MockPdfReader()
        back_pages = MockPdfReader()

        merge.merge(front_pages, back_pages, 'fake_out', True, False)

        expected_len = len(front_pages.pages) + len(back_pages.pages)
        self.assertEqual(expected_len, len(self.outfile.pages))
Example #13
0
  def test_merged_file_contains_all_pages(self):
    front_pages = MockPdfReader()
    back_pages = MockPdfReader()

    merge.merge(front_pages, back_pages, 'fake_out', True, False)

    expected_len = len(front_pages.pages) + len(back_pages.pages)
    self.assertEqual(expected_len, len(self.outfile.pages))
Example #14
0
def agreement(P, config={}):
    '''
    P: 导频数
    config: 密钥生成配置项,包括:
        sampling_period:采样周期
        sampling_time:采样时间
        corr_ab: Alice和Bob的信道测量值的相关系数
        corr_ae: Alice和Eve的信道测量值的相关系数
        block_size:双阈值量化的子块采样点数
        coef: 双阈值量化的量化系数
        qtype: 均匀量化的编码方式。gray/natural
        order: 均匀量化的量化阶数
        mtype: 合并类型。RSSI/Phase/cross/and/or/xor
        iteration: 信息协调迭代次数
        m: 汉明码监督位m
    '''
    ''' 采样参数 '''
    sampling_period = config.get('sampling_period', 1)
    sampling_time = config.get('sampling_time', 3)
    corr_ab = config.get('corr_ab', 0.9)
    corr_ae = config.get('corr_ae', 0.4)
    ''' 量化参数 '''
    block_size = config.get('block_size', 25)
    coef = config.get('coef', 0.8)
    qtype = config.get('qtype', 'gray')
    order = config.get('order', 1)
    mtype = config.get('mtype', 'cross')
    ''' 信息协调参数 '''
    iteration = config.get('iteration', 2)
    m = config.get('m', 3)
    ''' 采样 '''
    rssi_A, rssi_B, rssi_E = sampling_RSSI(sampling_period, sampling_time,
                                           corr_ab, corr_ae)
    phase_A, phase_B, phase_E = sampling_phase(sampling_period, sampling_time,
                                               corr_ab, corr_ae)
    ''' RSSI量化 '''
    bits_A_rssi, drop_list_A = quantize_ASBG_1bit(rssi_A, block_size, coef)
    bits_B_rssi, drop_list_B = quantize_ASBG_1bit(rssi_B, block_size, coef)
    bits_E_rssi, drop_list_E = quantize_ASBG_1bit(rssi_E, block_size, coef)
    bits_A_rssi = remain(bits_A_rssi, drop_list_B)
    bits_B_rssi = remain(bits_B_rssi, drop_list_A)
    bits_E_rssi = remain(bits_E_rssi, drop_list_A)
    ''' Phase量化 '''
    bits_A_phase = quantize_phase(phase_A, qtype, order)
    bits_B_phase = quantize_phase(phase_B, qtype, order)
    bits_E_phase = quantize_phase(phase_E, qtype, order)
    ''' 合并 '''
    bits_A = merge(bits_A_rssi, bits_A_phase, mtype)
    bits_B = merge(bits_B_rssi, bits_B_phase, mtype)
    bits_E = merge(bits_E_rssi, bits_E_phase, mtype)
    ''' 信息协调 '''
    bits_A, bits_B = winnow(bits_A, bits_B, iteration, m)
    ''' 生成导频 '''
    pos_A = encode(bits_A, P)
    pos_B = encode(bits_B, P)
    pos_E = encode(bits_E, P)

    return pos_A, pos_B, pos_E
Example #15
0
 def test_merge(self):
     self.assertEqual(merge(self.case_long_0), self.case_long_0_res)
     self.assertEqual(merge(self.case_long_1), self.case_long_1_res)
     self.assertEqual(merge(self.case_long_2), self.case_long_2_res)
     self.assertEqual(merge(self.case_long_3), self.case_long_3_res)
     self.assertRaisesRegex(InputError, "invalid input", merge,
                            self.case_invalid_0)
     self.assertRaisesRegex(InputError, "invalid input", merge,
                            self.case_invalid_1)
Example #16
0
 def testMergeTwice(self):
     try:
         once = merge.merge(
             header + "<implementation id='sha1=123' version='1'/>" +
             footer, local_file)
         twice = merge.merge(once, local_file)
         assert 0
     except Exception as ex:
         assert 'Duplicate ID' in str(ex)
Example #17
0
def agreement(P,mtype='cross',iteration=2,corr_ab=0.9,corr_ae=0.4):
    '''
    P: 导频数
    mtype: 合并类型。RSSI/Phase/cross/and/or/xor
    iteration: winnow迭代次数
    corr_ab: Alice和Bob的信道测量值的相关系数
    corr_ae: Alice和Eve的信道测量值的相关系数
    '''
    
    ''' 采样参数 '''
    sampling_period = 1
    sampling_time = 3
    
    ''' 量化参数 '''
    block_size = 25
    coef = 0.8
    qtype = 'gray'
    order = 1
    
    ''' 采样 ''' 
    rssi_A,rssi_B,rssi_E = sampling('RSSI',sampling_period,sampling_time,corr_ab,corr_ae)  
    phase_A,phase_B,phase_E = mod(sampling('Phase',sampling_period,sampling_time,corr_ab,corr_ae),2*pi)
    #print 'corrcoef of rssi  between AB and AE:',corrcoef(rssi_A, rssi_B, rowvar=0)[0,1],corrcoef(rssi_A, rssi_E, rowvar=0)[0,1]
    #print 'corrcoef of phase between AB and AE:',corrcoef(phase_A,phase_B,rowvar=0)[0,1],corrcoef(phase_A,phase_E,rowvar=0)[0,1]
        
    ''' RSSI量化 '''
    bits_A_rssi,drop_list_A = quantization_thre(rssi_A,block_size,coef)
    bits_B_rssi,drop_list_B = quantization_thre(rssi_B,block_size,coef)
    bits_E_rssi,drop_list_E = quantization_thre(rssi_E,block_size,coef)
    bits_A_rssi = remain(bits_A_rssi,drop_list_A,drop_list_B)
    bits_B_rssi = remain(bits_B_rssi,drop_list_A,drop_list_B)
    bits_E_rssi = remain(bits_E_rssi,drop_list_A,drop_list_E)
    #print 'BMR of RSSI before winnow between AB',BMR(bits_A_rssi,bits_B_rssi)
    
    ''' Phase量化 '''
    bits_A_phase = quantization_even('Phase',phase_A,size(phase_A),qtype,order)
    bits_B_phase = quantization_even('Phase',phase_B,size(phase_B),qtype,order)
    bits_E_phase = quantization_even('Phase',phase_E,size(phase_E),qtype,order)
    #print 'BMR of phase before winnow between AB',BMR(bits_A_phase,bits_B_phase)
    
    ''' 合并 '''
    bits_A = merge(bits_A_rssi,bits_A_phase,mtype)
    bits_B = merge(bits_B_rssi,bits_B_phase,mtype)
    bits_E = merge(bits_E_rssi,bits_E_phase,mtype)
    #print 'BMR of merge before winnow between AB',BMR(bits_A,bits_B)
    
    ''' winnow信息协调 '''
    bits_A, bits_B = winnow(bits_A,bits_B,iteration)
    #print 'BMR of merge after winnow between AB',BMR(bits_A,bits_B)
    
    ''' 生成导频 '''
    pos_A = encode(bits_A,P)
    pos_B = encode(bits_B,P)
    pos_E = encode(bits_E,P)

    return pos_A,pos_B,pos_E
Example #18
0
def _sort(destination, source, first, last):
	length = last - first
	if length <= 1: return
	middle = first + length//2

	_sort(source, destination, first, middle)
	_sort(source, destination, middle, last)

	if source[middle - 1] > source[middle]:
		merge.merge(destination, source, first, middle, last)
Example #19
0
def run():
    merge.merge()
    out.out()
    ac = ASSR.AudioCorrection(
        'path.wav', 'tfSessions/2018-10-13-01:40:12-0.8486092/session.ckpt')
    ac.process()
    ac.saveCorrectedAudio()
    time.sleep(1)
    wit.wit()
    return 1
def mergesort_three(A):
    if len(A) <= 1:  # returns the list when is one element
        return A
    mid = len(A) // 3
    if mid == 0:  # case when list is 2 elements
        mid = 1
    primera = mergesort_three(A[:mid])
    segunda = mergesort_three(A[mid:mid * 2])
    tercera = mergesort_three(A[mid * 2:])
    return merge(merge(primera, segunda), tercera)
Example #21
0
def main():
	global stop_set, docs_max, file_count,cont
	global dict_title, dict_infobox, dict_category, dict_ref, dict_links, dict_body
	global file_infobox, file_title, file_category, file_ref, file_body, file_links
#		global total_no_document

	stop_words_set()	
	# create an XMLReader
	parser = xml.sax.make_parser()
	# turn off namepsaces
	parser.setFeature(xml.sax.handler.feature_namespaces, 0)
	# override the default ContextHandler
	Handler = My_Wiki_Handler()
	parser.setContentHandler( Handler )
	parser.parse("corpus2.xml")

	string_infobox = ""
	string_title = ""
	string_category = ""
	string_body = ""
	string_ref = ""
	string_links = ""

	for word in sorted( dict_title.keys() ):
		string_title = word + "=" + dict_title[word] + '\n'
		file_title.write(string_title)

	for word in sorted( dict_infobox.keys() ):
		string_infobox = word + "=" + dict_infobox[word] + '\n'
		file_infobox.write(string_infobox)

	for word in sorted( dict_category.keys() ):
		string_category = word + "=" + dict_category[word] + '\n'
		file_category.write(string_category)

	for word in sorted( dict_body.keys() ):
		string_body = word + "=" + dict_body[word] + '\n'
		file_body.write(string_body)

	for word in sorted( dict_ref.keys() ):
		string_ref = word + "=" + dict_ref[word] + '\n'
		file_ref.write(string_ref)

	for word in sorted( dict_links.keys() ):
		string_links = word + "=" + dict_links[word] + '\n'
		file_links.write(string_links)

	clear_dict()	
	close_file()
	print "Merging Begins"
	merge.merge(file_count)
	os.system("rm index/body* index/category* index/infobox* index/title* index/ref* index/links*")

	print cont
Example #22
0
    def test_merged_file_contains_pages_in_correct_order(self):
        front_pages = MockPdfReader([MockPdfReader() for i in range(3)])
        back_pages = MockPdfReader([MockPdfReader() for i in range(3)])

        merge.merge(front_pages, back_pages, 'fake_out', True, False)

        for i, page in enumerate(self.outfile.pages):
            if i % 2 == 0:
                expected_page = front_pages.pages[i / 2]
            else:
                expected_page = back_pages.pages[i / 2]

            self.assertEqual(expected_page, page)
Example #23
0
  def test_merged_file_contains_pages_in_correct_order(self):
    front_pages = MockPdfReader([MockPdfReader() for i in range(3)])
    back_pages = MockPdfReader([MockPdfReader() for i in range(3)])

    merge.merge(front_pages, back_pages, 'fake_out', True, False)

    for i, page in enumerate(self.outfile.pages):
      if i % 2 == 0:
        expected_page = front_pages.pages[i / 2]
      else:
        expected_page = back_pages.pages[i / 2]

      self.assertEqual(expected_page, page)
Example #24
0
 def test_list_key_type_mismatch(self):
     input = {
         '__all__': {
             'groups': ['sudo'],
             'shell': '/bin/bash'
         },
         'alice': {},
         'bob': {
             'groups': 'users',
         }
     }
     with self.assertRaises(errors.AnsibleFilterError):
         merge(input)
Example #25
0
    def test_merging_fed_backwards_correctly_orders_pages(self):
        front_pages = MockPdfReader([MockPdfReader() for i in range(3)])
        back_pages = MockPdfReader([MockPdfReader() for i in range(3)])

        merge.merge(front_pages, back_pages, 'fake_out', True, True)

        bp_last_index = len(back_pages.pages) - 1
        for i, page in enumerate(self.outfile.pages):
            if i % 2 == 0:
                expected_page = front_pages.pages[i / 2]
            else:
                expected_page = back_pages.pages[bp_last_index - i / 2]

            self.assertEqual(expected_page, page)
def test_merge():
	A = [1,2,4,None,None]
	B = [3,6]
	
	merge(A,B)
	
	assert(A == [1,2,3,4,6])
	
	A = [12,14,15,None,None]
	B = [1,2]
	
	merge(A,B)
	
	assert(A == [1,2,12,14,15])
Example #27
0
    def test_identity(self):
        identity = {}

        other = {}
        merged = merge(identity, other)
        self.assertDictEqual(merged, other)

        other = {'foo': 1}
        merged = merge(identity, other)
        self.assertDictEqual(merged, other)

        other = {'foo': {'bar': 1}}
        merged = merge(identity, other)
        self.assertDictEqual(merged, other)
Example #28
0
  def test_merging_fed_backwards_correctly_orders_pages(self):
    front_pages = MockPdfReader([MockPdfReader() for i in range(3)])
    back_pages = MockPdfReader([MockPdfReader() for i in range(3)])

    merge.merge(front_pages, back_pages, 'fake_out', True, True)

    bp_last_index = len(back_pages.pages) - 1
    for i, page in enumerate(self.outfile.pages):
      if i % 2 == 0:
        expected_page = front_pages.pages[i / 2]
      else:
        expected_page = back_pages.pages[bp_last_index - i / 2]

      self.assertEqual(expected_page, page)
Example #29
0
def test_error_passed_number():
    with pytest.raises(TypeError):
        #int
        merge([1, 2], 0, func)
        merge(1, 0, func)
        # float
        merge([1, 2], 12.22, func)
        merge(12.22, 12.22, func)
Example #30
0
def _find_graphs_helper(args):
    merge_rules, time, min_length, lite = args
    files, dest = merge_rules
    log = logging.getLogger("brorecords")

    # First check and see if there is already a pickled version of
    # extracted graphs from this given work set.  If so, we can quick out
    # here.  For simplicty sake, we just append .pickle to the name of the
    # path for the combined bro records
    tmp_path = "{0}.pickles.tmp".format(dest)
    final_path = "{0}.pickles".format(dest)
    if os.path.isfile(final_path):
        log.info("Found picked records already at {0}".format(final_path))
        return final_path

    log.info("Merging {0} files into {1}".format(len(files), dest))

    if not merge.merge(files, dest):
        return None

    log.info("{0}: Begining parsing".format(dest))
    graph_count = 0
    with open(dest, 'r') as source_h, open(tmp_path, 'w') as dest_h:
        try:
            for g in graphs(source_h, time=time, record_filter=record_filter):
                graph_count += 1
                if len(g) < min_length:
                    continue
                pickle.dump(g, dest_h)
        except Exception, e:
            err = "Ignoring {0}: formatting errors in the log".format(dest)
            log.error(err)
            raise e
            return None
Example #31
0
def _find_graphs_helper(args):
    merge_rules, time, min_length, lite = args
    files, dest = merge_rules
    log = logging.getLogger("brorecords")

    # First check and see if there is already a pickled version of
    # extracted graphs from this given work set.  If so, we can quick out
    # here.  For simplicty sake, we just append .pickle to the name of the
    # path for the combined bro records
    tmp_path = "{0}.pickles.tmp".format(dest)
    final_path = "{0}.pickles".format(dest)
    if os.path.isfile(final_path):
        log.info("Found picked records already at {0}".format(final_path))
        return final_path

    log.info("Merging {0} files into {1}".format(len(files), dest))

    if not merge.merge(files, dest):
        return None

    log.info("{0}: Begining parsing".format(dest))
    graph_count = 0
    with open(dest, 'r') as source_h, open(tmp_path, 'w') as dest_h:
        try:
            for g in graphs(source_h, time=time, record_filter=record_filter):
                graph_count += 1
                if len(g) < min_length:
                    continue
                pickle.dump(g, dest_h)
        except Exception, e:
            err = "Ignoring {0}: formatting errors in the log".format(dest)
            log.error(err)
            raise e
            return None
Example #32
0
 def testMergeSecond(self):
     master = parse(
         merge.merge(
             header + "<implementation id='sha1=123' version='1'/>" +
             footer, local_file))
     assert master.url == 'http://test/hello.xml', master
     assert len(master.implementations) == 2
Example #33
0
def process_image(img_path, numk=200, compactness=20, percentage=0.95):
    img = cv2.imread(img_path)
    print('Processing %s (%dx%d).' % (img_path, img.shape[0], img.shape[1]))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    t = []
    t.append(time.time())
    labels = snic.snic(img, numk, compactness)
    utils.save_boundaries(img, labels,
                          img_path[:img_path.rfind('.')] + '_snic.png')
    t.append(time.time())
    labels = snic.snico(img, numk)
    utils.save_boundaries(img, labels,
                          img_path[:img_path.rfind('.')] + '_snico.png')
    t.append(time.time())
    labels = merge.merge(img, labels, percentage)
    utils.save_boundaries(img, labels,
                          img_path[:img_path.rfind('.')] + '_snico_merge.png')
    t.append(time.time())

    print('Time: ', end='')
    for i in range(1, len(t)):
        print('%.2f' % (t[i] - t[i - 1]), end=' ')
    print()


# data_folder = 'images'
# for fn in os.listdir(data_folder):
#     if fn.find('_') != -1:
#         continue
#     process_image(data_folder + '/' + fn)

#process_image('images/118035.jpg', percentage=0.95)
#process_image('images/124084.jpg', percentage=0.95)
#process_image('images/135069.jpg', percentage=0.98)
Example #34
0
def test_merge(x):
    for i in range(10):
        l1 = sorted(
            random.randint(1, 10) for _ in range(random.randint(1, 10)))
        l2 = sorted(
            random.randint(1, 10) for _ in range(random.randint(1, 10)))
        assert merge.merge(l1, l2) == sorted(l1 + l2)
def mask_bright_objs(image, master, lower_limit, bg): #This routine generates masks for n number of objects

    #for y in range (0,n_obj):                   #Dictates how many loops of the cycle are done
    while image.max()>lower_limit:

        max = image.max()                       #finds the max value in the image
        list = image.flatten()                  #flattens the image into a 1D list
        location = np.where(list == max)[0]     #finds the position of all the maxima

        length = location.size                  #calculates how many maxima are present

        for z in range (0, length):             #Loop which repeats as many times as there are maxima

            ycoord = int(location[z]/2570)      #calculates the x and y co-ordinates
            xcoord = location[z]-(2570*ycoord)  #using the fact we know the shape of the original image

            pos = [ycoord, xcoord]              #stores the xy co-ordinates in pos
            #print pos                           #print position so we know which pixel is the problem if program fails
            new_mask = bld.obj_mask(image, pos, bg) #creates a circular mask over the image

            master = merge.merge(master, new_mask)  #merges the most recent mask to the master

            image.mask = master                 #applies the mask to the image so that we don't count the same objects when we repeat the loop

    return master                               #returns the master to mosaic
    def createAlgsList(self):
        # First we populate the list of algorithms with those created
        # extending GeoAlgorithm directly (those that execute GDAL
        # using the console)
        self.preloadedAlgs = [nearblack(), information(), warp(), translate(),
            rgb2pct(), pct2rgb(), merge(), polygonize(), gdaladdo(),
            ClipByExtent(), ClipByMask(), contour(), rasterize(), proximity(),
            sieve(), fillnodata(), ExtractProjection(), gdal2xyz(),
            hillshade(), slope(), aspect(), tri(), tpi(), roughness(),
            ColorRelief(), GridInvDist(), GridAverage(), GridNearest(),
            GridDataMetrics(),
            # ----- OGR tools -----
            OgrInfo(), Ogr2Ogr(), OgrSql(),
            ]

        # And then we add those that are created as python scripts
        folder = self.scriptsFolder()
        if os.path.exists(folder):
            for descriptionFile in os.listdir(folder):
                if descriptionFile.endswith('py'):
                    try:
                        fullpath = os.path.join(self.scriptsFolder(),
                                descriptionFile)
                        alg = GdalScriptAlgorithm(fullpath)
                        self.preloadedAlgs.append(alg)
                    except WrongScriptException, e:
                        ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, e.msg)
Example #37
0
def mergesort(x):
    global mergesortCalls
    mergesortCalls += 1

    if len(x) < 2:  # x is short --> already sorted --> return it
        y = x
        return y

    elif (len(x) == 2):  #A larger basis case of length 2
        y = x
        if (y[0] > y[1]):  #If the order needs to be swapped...
            y[0], y[1] = y[1], y[0]  #Tuple swap the array indexes

        #If the first index was already less than or = to the second, just return y
        return y

    elif (len(x) == 3):  #A larger basis case of length 3
        y = x
        if (y[0] > y[2]):
            y[0], y[2] = y[2], y[0]  #Swap them
        if (y[0] > y[1]):
            y[0], y[1] = y[1], y[0]
        if (y[1] > y[2]):
            y[1], y[2] = y[2], y[1]
        return y

    else:  # x needs to be sorted
        N = len(x)
        y1 = mergesort(x[0:int(N / 2)])  #sort first half
        y2 = mergesort(x[int(N / 2):N])  # second half
        y = merge.merge(y1, y2)
        return y
Example #38
0
    def createAlgsList(self):
        # First we populate the list of algorithms with those created
        # extending GeoAlgorithm directly (those that execute GDAL
        # using the console)
        self.preloadedAlgs = [nearblack(), information(), warp(), translate(),
                              rgb2pct(), pct2rgb(), merge(), buildvrt(), polygonize(), gdaladdo(),
                              ClipByExtent(), ClipByMask(), contour(), rasterize(), proximity(),
                              sieve(), fillnodata(), ExtractProjection(), gdal2xyz(),
                              hillshade(), slope(), aspect(), tri(), tpi(), roughness(),
                              ColorRelief(), GridInvDist(), GridAverage(), GridNearest(),
                              GridDataMetrics(), gdaltindex(), gdalcalc(), rasterize_over(),
                              # ----- OGR tools -----
                              OgrInfo(), Ogr2Ogr(), Ogr2OgrClip(), Ogr2OgrClipExtent(),
                              Ogr2OgrToPostGis(), Ogr2OgrToPostGisList(), Ogr2OgrPointsOnLines(),
                              Ogr2OgrBuffer(), Ogr2OgrDissolve(), Ogr2OgrOneSideBuffer(),
                              Ogr2OgrTableToPostGisList(), OgrSql(),
                              ]

        # And then we add those that are created as python scripts
        folder = self.scriptsFolder()
        if os.path.exists(folder):
            for descriptionFile in os.listdir(folder):
                if descriptionFile.endswith('py'):
                    try:
                        fullpath = os.path.join(self.scriptsFolder(),
                                                descriptionFile)
                        alg = GdalScriptAlgorithm(fullpath)
                        self.preloadedAlgs.append(alg)
                    except WrongScriptException as e:
                        ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, e.msg)
Example #39
0
def clean_conformers(atoms, energies, coordinates, states=None):

    # Total count
    N = len(energies)

    if states is not None:

        # Keep only converged states
        success = np.argwhere(states == 0)
        success = success.flatten()

        # Only looked at converged states, discard rest
        energies = energies[success]
        coordinates = coordinates[success]

    # TODO what about failed states?
    # TODO Check high energies

    # TODO change to asymetric merge (cleaner code)

    # Keep index for only unique
    idxs = merge.merge(atoms, energies, coordinates)

    # Here all cost is the same, so just take the first conformer
    idxs = [idx[0] for idx in idxs]

    return idxs
Example #40
0
def merge(PGL, c="%#$", o="%#$", delim=',', d=0):
    """Takes a PyGL object and returns a PyGL object resulting from the merge operation"""
    if _cs(PGL) != 0:
        print "The PyGL supplied is not sorted.  Please use a pyglSort."
        return
    args = {'c': c, 'o': o, 'delim': delim, 'd': d, 'noH': True}
    return _mer.merge(PGL, args, "")
Example #41
0
def run():
    from repeats import repeats
    from score import score
    from clean import clean
    from match import match
    from merge import merge
    from id_gen import id_gen
    import pandas as pd
    '''
  This is the new (Summer 2019) implementation of scoring, matching, and merging
  '''
    year = "19"
    season = "Sp"
    mergeName = 'QuaRCSLt2_' + season + year + '_merged.csv'
    PREdata = 'QuaRCSLt2_S19_PRE.csv'
    PSTdata = PREdata[:-7] + "POST.csv"
    stu_DB_name = "Student_ID_Database.csv"
    instr_DB_name = "Instr_ID_Database.csv"

    print("Scoring...")
    # Score PRE and PST
    PREdata = score(PREdata, 'PRE', year, season, 'answ.csv', PREdata[:-4])
    PSTdata = score(PSTdata, 'PST', year, season, 'answ.csv', PSTdata[:-4])

    # Clean PRE and PST
    #PREdata = PREdata[:-4] + "_scored.csv"
    #PSTdata = PSTdata[:-4] + "_scored.csv"
    print("Cleaning...")
    PREdata = clean(PREdata, 'PRE')
    PSTdata = clean(PSTdata, 'PST')

    # Generate IDs for PRE and PST
    # PREdata = PREdata[:-4] + "_cleaned.csv"
    # PSTdata = PSTdata[:-4] + "_cleaned.csv"

    print("Generating student and instructor IDs...")

    PREdata = id_gen(PREdata, 'PRE', year, season, stu_DB_name, instr_DB_name)
    PSTdata = id_gen(PSTdata, 'PST', year, season, stu_DB_name, instr_DB_name)

    # Split Repeats
    print("Splitting...")
    PREdata = repeats(PREdata, 'PRE')
    PSTdata = repeats(PSTdata, 'PST')

    # Match
    # PREdata = PREdata[:-4] + "_id.csv"
    # PSTdata = PSTdata[:-4] + "_id.csv"
    #PREdata = pd.read_csv(PREdata)
    #PSTdata = pd.read_csv(PSTdata)
    print("Matching...")
    PRE_not_matched, PST_not_matched, pairs, instructor_change = match(
        PREdata, PSTdata)

    # Merge
    print("Merging...")
    mergedData = merge(PRE_not_matched, PST_not_matched, PREdata, PSTdata,
                       pairs)
    mergedData.to_csv(mergeName, encoding='utf-8', index=False)
    print("Merged dataset saved to {0}".format(mergeName))
Example #42
0
    def _build(self, env, output_path, force, no_filters, parent_filters=[]):
        """Internal recursive build method.
        """

        # TODO: We could support a nested bundle downgrading it's debug
        # setting from "filters" to "merge only", i.e. enabling
        # ``no_filters``. We cannot support downgrading to
        # "full debug/no merge" (debug=True), of course.
        #
        # Right now we simply use the debug setting of the root bundle
        # we build, und it overrides all the nested bundles. If we
        # allow nested bundles to overwrite the debug value of parent
        # bundles, as described above, then we should also deal with
        # a child bundle enabling debug=True during a merge, i.e.
        # raising an error rather than ignoring it as we do now.
        resolved_contents = self.resolve_contents(env)
        if not resolved_contents:
            raise BuildError('empty bundle cannot be built')

        # Ensure that the filters are ready
        for filter in self.filters:
            filter.set_environment(env)

        # Apply input filters to all the contents. Note that we use
        # both this bundle's filters as well as those given to us by
        # the parent. We ONLY do those this for the input filters,
        # because we need them to be applied before the apply our own
        # output filters.
        # TODO: Note that merge_filters() removes duplicates. Is this
        # really the right thing to do, or does it just confuse things
        # due to there now being different kinds of behavior...
        combined_filters = merge_filters(self.filters, parent_filters)
        cache = get_cache(env)
        hunks = []
        for c in resolved_contents:
            if isinstance(c, Bundle):
                hunk = c._build(env, output_path, force, no_filters,
                                combined_filters)
                hunks.append(hunk)
            else:
                if is_url(c):
                    hunk = UrlHunk(c)
                else:
                    hunk = FileHunk(env.abspath(c))
                if no_filters:
                    hunks.append(hunk)
                else:
                    hunks.append(
                        apply_filters(hunk,
                                      combined_filters,
                                      'input',
                                      cache,
                                      output_path=output_path))

        # Return all source hunks as one, with output filters applied
        final = merge(hunks)
        if no_filters:
            return final
        else:
            return apply_filters(final, self.filters, 'output', cache)
Example #43
0
def mergeSort(array):
    if len(array) == 1:
        return array
    half = int(len(array) / 2)
    array1 = mergeSort(array[:half])
    array2 = mergeSort(array[half:])
    return merge(array1, array2)
Example #44
0
 def test_only_defaults_key(self):
     input = {
         '__all__': {
             'groups': ['sudo'],
         },
     }
     expected_output = {}
     self.assertEqual(merge(input), expected_output)
Example #45
0
def sort(a):
  if(len(a) < 7):
    return insert.sort(a)

  mid = len(a)/2
  left = merge.sort(a[:mid])
  right = merge.sort(a[mid:])
  return merge.merge(left,right)
def generate_log(by_uri, to_uri, data_uri, policy_uri,
                 by_label = None, to_label = None, data_label = None):

    logging.debug("data_uri: %s", data_uri)
    
    g = rdflib.Graph()
    g.bind("","http://dig.csail.mit.edu/2010/DHS-fusion/common/fusion_ONT#")
    g.bind("rdfify","{0}#".format(_rdfify_prefix))
    
    trans = rdflib.URIRef(_transaction_uri)
    
    g.add((trans, rdflib.namespace.RDF.type, rdflib.URIRef("{0}#Request".format(policy_uri))))
    g.add((trans, rdflib.namespace.RDF.type, rdflib.URIRef("{0}#Disseminate".format(policy_uri))))
    
    g.add((trans, rdflib.URIRef("{0}#by".format(policy_uri)), rdflib.URIRef(by_uri)))
    g.add((trans, rdflib.URIRef("{0}#to".format(policy_uri)), rdflib.URIRef(to_uri)))
    g.add((trans, rdflib.URIRef("{0}#data".format(policy_uri)), rdflib.URIRef(data_uri)))

    g.add((rdflib.URIRef(by_uri), rdflib.URIRef(_label_predicate), rdflib.Literal(by_label)))
    g.add((rdflib.URIRef(to_uri), rdflib.URIRef(_label_predicate), rdflib.Literal(to_label)))
    g.add((rdflib.URIRef(data_uri), rdflib.URIRef(_label_predicate), rdflib.Literal(data_label)))
    
    g2, s = merge.merge([by_uri, to_uri, data_uri], ["http://link.csail.mit.edu/projects/devel/2015/air-niem-compatibility-revised/xsd/niem/niem-core/3.0/niem-core.xsd"])
    
    stmp = tempfile.NamedTemporaryFile()
    gtmp = tempfile.NamedTemporaryFile()

    stmp.write(s.serialize(format='n3'))
    gtmp.write(g2.serialize(format='n3'))

    stmp.flush()
    gtmp.flush()
    
    g.parse(stmp.name, format='n3')
    g.parse(gtmp.name, format='n3')
    
    g2tmp = tempfile.NamedTemporaryFile(delete=False)
    
    g.add((rdflib.URIRef("{0}#this_graph".format(_rdfify_prefix)), rdflib.URIRef("{0}#uri".format(_rdfify_prefix)), rdflib.URIRef("file://{0}".format(g2tmp.name))))
    
    ## bzy HACK HACK HACK HACK HACK
    ## cwm gives us no way to up-cast URIs from strings
    ## so we have to do it the slow way ...
    to_remove = set()
    for s, p, o in g.triples((None, rdflib.URIRef("{0}PRIb7Policy".format(_extern_prefix)), None)):
        if isinstance(o, rdflib.Literal):
            to_remove.add((s, p, o))
            g.add((s, p, rdflib.URIRef(o.value)))
    for t in to_remove:
        g.remove(t)
    
    gstr = g.serialize(format='n3')
    
    g2tmp.write(gstr)
    g2tmp.flush()
    
    return gstr
Example #47
0
    def _build(self, env, output_path, force, no_filters, parent_filters=[]):
        """Internal recursive build method.
        """

        # TODO: We could support a nested bundle downgrading it's debug
        # setting from "filters" to "merge only", i.e. enabling
        # ``no_filters``. We cannot support downgrading to
        # "full debug/no merge" (debug=True), of course.
        #
        # Right now we simply use the debug setting of the root bundle
        # we build, und it overrides all the nested bundles. If we
        # allow nested bundles to overwrite the debug value of parent
        # bundles, as described above, then we should also deal with
        # a child bundle enabling debug=True during a merge, i.e.
        # raising an error rather than ignoring it as we do now.
        resolved_contents = self.resolve_contents(env)
        if not resolved_contents:
            raise BuildError('empty bundle cannot be built')

        # Ensure that the filters are ready
        for filter in self.filters:
            filter.set_environment(env)

        # Apply input filters to all the contents. Note that we use
        # both this bundle's filters as well as those given to us by
        # the parent. We ONLY do those this for the input filters,
        # because we need them to be applied before the apply our own
        # output filters.
        # TODO: Note that merge_filters() removes duplicates. Is this
        # really the right thing to do, or does it just confuse things
        # due to there now being different kinds of behavior...
        combined_filters = merge_filters(self.filters, parent_filters)
        cache = get_cache(env)
        hunks = []
        for c in resolved_contents:
            if isinstance(c, Bundle):
                hunk = c._build(env, output_path, force, no_filters,
                                combined_filters)
                hunks.append(hunk)
            else:
                if is_url(c):
                    hunk = UrlHunk(c)
                else:
                    hunk = FileHunk(env.abspath(c))
                if no_filters:
                    hunks.append(hunk)
                else:
                    hunks.append(apply_filters(
                        hunk, combined_filters, 'input', cache,
                        output_path=output_path))

        # Return all source hunks as one, with output filters applied
        final = merge(hunks)
        if no_filters:
            return final
        else:
            return apply_filters(final, self.filters, 'output', cache)
Example #48
0
	def testMergeLocalReq(self):
		master = parse(tap(merge.merge(header + "<group x='x'>\n    <implementation id='sha1=123' version='1'/>\n  </group>" + footer, local_file_req)))
		assert master.uri == 'http://test/hello.xml', master
		assert len(master.implementations) == 2
		deps = master.implementations['sha1=003'].requires
		assert len(deps) == 1
		assert deps[0].interface == 'http://foo', deps[0]

		assert master.implementations['sha1=003'].metadata['http://mynamespace/foo bob'] == 'bob'
Example #49
0
 def compile_ast(self):
     """
     Forces compilation of the internal ast tree.
     This must be done after any changes to the set of
     predicates.
     """
     if self.finalized:
         raise Exception("Cannot compile a finalized set!")
     merged = merge(list(self.predicates))
     self.ast = refactor(self, merged, self.settings)
Example #50
0
def regularlyCommit():
	import merge
	while True:
		message = None
		try:
			message = mergequeue.get()
			if message == 'done':
				print('done')
				break
			dest,source,inferior = message
			print(hex(dest),'inferior to',hex(source))
			merge.merge(source,dest,inferior)
			print('left',mergequeue.qsize())
		except Exception as e:
			import traceback
			traceback.print_exc()
		finally:
			if message:
				mergequeue.task_done()
Example #51
0
 def merge_and_compare(self, list1):
     """
     Check if there was a move
     """
     _temp = list1
     list1 = merge.merge(list1)
     if _temp == list1:
         return False
     else:
         return True
Example #52
0
 def __init__(self, remoteShell, domainAdmin="admin", domain=None):
     self.remoteShell = remoteShell
     self.vastoolPath = "/opt/quest/bin/vastool"     
     self.domainAdmin = domainAdmin
     self.defaultDomain = domain
     
     self.info = info.info(self.run)
     self.flush = flush.flush(self.run)
     self.create = create.create(self.run, self.defaultDomain)
     self.delete = delete.delete(self.run)
     self.timesync = timesync.timesync(self.run)
     self.nss = nss.nss(self.run)
     self.group = group.group(self.run)
     self.isvas = isvas.isvas(self.run)
     self.list = list.list(self.run)
     self.auth = auth.auth(self.run, self.defaultDomain)
     self.cache = cache.cache(self.run)
     self.configure = configure.configure(self.run)
     self.configureVas = configureVas.configureVas(self.run)
     self.schema = schema.schema(self.run)
     self.merge = merge.merge(self.run)
     self.unmerge = unmerge.unmerge(self.run)
     self.user = User.user(self.run)
     self.ktutil = ktutil.ktutil(self.run)
     self.load = load.load(self.run)
     self._license = License.License(self.run)
     self.License = self._license.License
     self.parseLicense = self._license.parseLicense
     self.compareLicenses = self._license.compareLicenses
     #self.vasUtilities = vasUtilities.vasUtilities(self.remoteShell)
     self.unconfigure = unconfigure.unconfigure(self.run)
     self.nssdiag = nssdiag(self.run)
     
     isinstance(self.info, info.info)
     isinstance(self.flush, flush.flush)
     isinstance(self.create, create.create)
     isinstance(self.delete, delete.delete)
     isinstance(self.timesync, timesync.timesync)
     isinstance(self.nss, nss.nss)
     isinstance(self.group, group.group)
     isinstance(self.isvas, isvas.isvas)
     isinstance(self.list, list.list)
     isinstance(self.auth, auth.auth)
     isinstance(self.cache, cache.cache)
     isinstance(self.configure, configure.configure)
     isinstance(self.configureVas, configureVas.configureVas)
     isinstance(self.schema, schema.schema)
     isinstance(self.merge, merge.merge)
     isinstance(self.unmerge, unmerge.unmerge)
     isinstance(self.user, User.user)
     isinstance(self.ktutil, ktutil.ktutil)
     isinstance(self.load, load.load)
     #isinstance(self.vasUtilities, vasUtilities.vasUtilities)
     isinstance(self.unconfigure, unconfigure.unconfigure)
     isinstance(self.nssdiag, nssdiag)
Example #53
0
    def move(self, direction):
        """
        Move all tiles in the given direction and add
        a new tile if any tiles moved.
        """
        if self.FINISHED == 1:
            return
        arr_offset = OFFSETS[direction]
        arr_start_index = self.zero_index[direction]
        arr_merge = []

        # get the UP grid merging
        for row, col in arr_start_index:
            i_count = 0
            if direction == UP or direction == DOWN:
                while i_count < self.get_grid_height():
                    arr_merge.append(self.get_tile(row + (arr_offset[0] * i_count),
                                                   col + (arr_offset[1] * i_count)))
                    i_count += 1
            else:
                while i_count < self.get_grid_width():
                    arr_merge.append(self.get_tile(row + (arr_offset[0] * i_count),
                                                   col + (arr_offset[1] * i_count)))
                    i_count += 1
            """
            for row_count in range(self.get_grid_height()):
                for col_count in range(self.get_grid_width()):
                    arr_merge.append(self.get_tile(row + (arr_offset[0] * row_count),
                                                   col + (arr_offset[1] * col_count)))
            """
            # print "row:%d col:%d" % (row, col)
            # print arr_merge
            arr_merged = merge(arr_merge)
            i_count = 0
            if direction == UP or direction == DOWN:
                while i_count < self.get_grid_height():
                    print "pre-merge:%d, post-merge:%d" % (arr_merge[i_count], arr_merged[i_count])
                    if arr_merge[i_count] != arr_merged[i_count]:
                        self.moved = 1
                    self.set_tile(row + (arr_offset[0] * i_count),
                                  col + (arr_offset[1] * i_count), arr_merged[i_count])
                    i_count += 1
            else:
                while i_count < self.get_grid_width():
                    print "pre-merge:%d, post-merge:%d" % (arr_merge[i_count], arr_merged[i_count])
                    if arr_merge[i_count] != arr_merged[i_count]:
                        self.moved = 1
                    self.set_tile(row + (arr_offset[0] * i_count),
                                  col + (arr_offset[1] * i_count), arr_merged[i_count])
                    i_count += 1
            del arr_merge[:]
            del arr_merged[:]
        if self.moved:
            self.moved = 0
            self.new_tile()
Example #54
0
 def move(self, direction):
     """
     Move all tiles in the given direction and add
     a new tile if any tiles moved.
     """
     tiles_moved = []
     if direction == 1:
         for column in range(self.get_grid_width()):
             temporary = []
             for row in range(self.get_grid_height()):
                 temporary.append(self._board[row][column])
             tiles_moved.append(self.merge_and_compare(temporary))
             temporary = merge.merge(temporary)
         
             for row in range(self.get_grid_height()):
                 self._board[row][column] = temporary[row]
     elif direction == 2:
         for column in range(self.get_grid_width()):
             temporary = []
             for row in range(self.get_grid_height() - 1, -1, -1):
                 temporary.append(self._board[row][column])
             tiles_moved.append(self.merge_and_compare(temporary))
             temporary = merge.merge(temporary)
             
             for row in range(self.get_grid_height() - 1, -1, -1):
                 self._board[row][column] = temporary[self.get_grid_height() - 1 - row]
     elif direction == 3:
         for row in range(self.get_grid_height()):
             temporary = self._board[row]
             tiles_moved.append(self.merge_and_compare(temporary))
             temporary = merge.merge(temporary)
             self._board[row] = temporary
     else:
         for row in range(self.get_grid_height()):
             temporary = self._board[row]
             temporary.reverse()
             tiles_moved.append(self.merge_and_compare(temporary))
             temporary = merge.merge(temporary)
             temporary.reverse()
             self._board[row] = temporary    
     if True in tiles_moved:
         self.new_tile()
Example #55
0
 def createAlgsList(self):
     # First we populate the list of algorithms with those created
     # extending GeoAlgorithm directly (those that execute GDAL
     # using the console)
     self.preloadedAlgs = [
         nearblack(),
         information(),
         warp(),
         translate(),
         rgb2pct(),
         pct2rgb(),
         merge(),
         buildvrt(),
         polygonize(),
         gdaladdo(),
         ClipByExtent(),
         ClipByMask(),
         contour(),
         rasterize(),
         proximity(),
         sieve(),
         fillnodata(),
         ExtractProjection(),
         gdal2xyz(),
         hillshade(),
         slope(),
         aspect(),
         tri(),
         tpi(),
         roughness(),
         ColorRelief(),
         GridInvDist(),
         GridAverage(),
         GridNearest(),
         GridDataMetrics(),
         gdaltindex(),
         gdalcalc(),
         rasterize_over(),
         retile(),
         gdal2tiles(),
         # ----- OGR tools -----
         OgrInfo(),
         Ogr2Ogr(),
         Ogr2OgrClip(),
         Ogr2OgrClipExtent(),
         Ogr2OgrToPostGis(),
         Ogr2OgrToPostGisList(),
         Ogr2OgrPointsOnLines(),
         Ogr2OgrBuffer(),
         Ogr2OgrDissolve(),
         Ogr2OgrOneSideBuffer(),
         Ogr2OgrTableToPostGisList(),
         OgrSql(),
     ]
Example #56
0
def getdata(video, domerge=True, mergemethod=None, mergethreshold=0.5,
        workers=None, groundplane=False):

    response = []
    if domerge:
        for boxes, paths in merge.merge(video.segments, 
                                        method=mergemethod,
                                        threshold = mergethreshold,
                                        groundplane = groundplane):
            trackworkers = list(set(x.job.workerid for x in paths))
            tracklet = Tracklet(
                paths[0].label.text,
                paths[0].labelid,
                paths[0].userid,
                paths,
                boxes,
                trackworkers,
                {}
            )
            response.append(tracklet)
    else:
        for segment in video.segments:
            for job in segment.jobs:
                if not job.useful:
                    continue
                worker = job.workerid
                for path in job.paths:
                    tracklet = Tracklet(
                        path.label.text,
                        path.labelid,
                        path.userid, 
                        [path],
                        path.getboxes(),
                        [worker],
                        {}
                    )
                    response.append(tracklet)

    if workers:
        response = [x for x in response if set(x.workers) & workers]

    interpolated = []
    for track in response:
        path = vision.track.interpolation.LinearFill(track.boxes)
        velocities = velocity.velocityforboxes(path)
        tracklet = Tracklet(track.label, track.labelid, track.userid,
                                        track.paths, path, track.workers, velocities)
        interpolated.append(tracklet)
    response = interpolated

    for tracklet in response:
        tracklet.bind()

    return response
Example #57
0
def reformat(view, edit):
    vsize = view.size()
    region = sublime.Region(0, vsize)
    if region.empty():
        sublime.status_message("Empty document!")
        return

        # assign the external program
    program = getExecutable()
    if not program:
        return

        # specify the language override (because input is from stdin)
    lang = getLanguage(view)
    if not lang:
        return

        # specify the config file:
    config = getConfigByFilter(view.file_name())
    if not config:
        return
    if config == "none":
        config = getConfigByLang(lang)
        if not config:
            return
    if config == "none":
        config = getConfig()
        if not config:
            return

    command = [program, "-l", lang, "-c", config]

    # dump command to console
    msg = " ".join(command)
    print("> " + msg)
    sublime.status_message(msg)

    try:
        proc = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

        content = view.substr(region).encode("utf-8")
        out, err = proc.communicate(input=content)

        return_code = proc.poll()
        if return_code != 0:
            sublime.error_message("Uncrustify error #%d:\n%s" % (return_code, err.decode("utf-8")))
            return

        dirty, err = merge(view, vsize, out.decode("utf-8"), edit)
        if err:
            sublime.error_message("Uncrustify merge error:\n%s" % (err))

    except (OSError, ValueError, subprocess.CalledProcessError, Exception) as e:
        sublime.error_message("Cannot execute '%s'\n\n%s" % (command[0], e))