Exemplo n.º 1
0
	def aggregate(self, video, aggregated):
		cls = BatchIVA

		batch = Batch()

		ts_set = []
		coder_dat_buf = dict()
		coder_dat_len = dict()
		videoL = 999999999999L

		for c in cls.coder_set:
			name = "+".join([c, video])
			filename = os.path.join(cls.feedback_folder, "feedback/"+name+".txt")
			if not os.path.exists(filename):
				print filename, "not exists"
				continue
			feedback = []
			videoLen = batch._processFeedbackFile( filename, feedback )
			coder_dat_buf[name] = feedback
			coder_dat_len[name] = videoLen
			
			if videoLen < videoL: videoL = videoLen
		
		try:
			for k in coder_dat_buf.iterkeys():
				v = coder_dat_buf[k]
				if len(v) == 0:
					pass
				elif coder_dat_len[k] > videoL + 1500:
					print coder_dat_len[k], ";", videoL, " [aggregate] ", video, k, " is outlier"
				else:
					ts_set.append(v)
		except Exception, exception:
			raise exception
Exemplo n.º 2
0
	def _matchWithFeature(self, c, v, coder_feature, window=1000):
		cls   = BatchIVA
		batch = Batch()

		name = "+".join( [c, v] )
		filename = os.path.join( cls.feedback_folder, "feedback/"+name+".txt" )
		if not os.path.exists(filename):
			print filename, "not exists"
			return

		feedback = []
		videoLen = batch._processFeedbackFile(filename, feedback)

		if c not in cls.coder_backchannel_num:
			cls.coder_backchannel_num[c] = len(feedback)
		else:
			cls.coder_backchannel_num[c] = cls.coder_backchannel_num[c] + len(feedback)

		try:
			for feature_name in cls.video_info_buffer[v].iterkeys():
				feature_values = cls.video_info_buffer[v][feature_name]
				for feature_value in feature_values:
					for f in feedback:
						if feature_value[0] <= f and f <= feature_value[1]+window:
							if feature_name in coder_feature:
								coder_feature[feature_name] = coder_feature[feature_name]+1
							else:
								coder_feature[feature_name] = 1
		except Exception, exception:
			print exception
Exemplo n.º 3
0
	def load_keyboard_consensus(self, speaker_id):
		result = []
		data_root = os.path.join(os.path.dirname(__file__), "../analysis/data/")
		batch = Batch()
		batch.load(data_root)
		batch._getDataOfVideo(speaker_id, result)
		return result
Exemplo n.º 4
0
def driver_listener(transaction_queue):
    start = time()
    driver = Driver()
    i = 0
    while True:
        batch_file = transaction_queue.get()
        batch = Batch()
        batch.load(batch_file)
        for transaction in batch.items:
            try:
                added = driver.run(transaction)
                duration = time() - start
                total = len(driver.hset) + len(driver.lset)
                print('Driver rate: {} of {} ({}|{})\r'.format(round(total / duration, 3), total, len(driver.hset), len(driver.lset)), flush=True, end='')
                if added:
                    i += 1
            except KeyboardInterrupt:
                raise KeyboardInterrupt
            except neobolt.exceptions.CypherSyntaxError:
                pass
            except Exception as e:
                print(e, flush=True)
                print(transaction.in_label, flush=True)
                print(transaction.out_label, flush=True)
                print(transaction.uuid, flush=True)
                print(transaction.from_uuid, flush=True)
                print(transaction.data, flush=True)
Exemplo n.º 5
0
    def __init__(self, dbname, rounds):

        isp = dbname[3:7]
        self.isp = isp
        self.ISP_edges_file = os.getcwd () + '/ISP_topo/'+str (isp) +'_edges.txt'
        self.ISP_nodes_file = os.getcwd () + '/ISP_topo/'+str (isp)+'_nodes.txt'
        self.rib_prefixes_file = os.getcwd() + '/rib_feeds/' + "rib20011204_prefixes.txt"
        self.rib_peerIPs_file = os.getcwd() + '/rib_feeds/' + "rib20011204_nodes.txt"

        print "for database " + dbname + "--------------------"
        create_db (dbname, Batch.username) # add comments

        if database_exists == 0:
            add_pgrouting_plpy_plsh_extension (dbname, Batch.username)
            load_schema (dbname, Batch.username, "/home/mininet/ravel/sql_scripts/primitive.sql")
            Batch_isp.init_ISP_topo (self, dbname)

        Batch.__init__(self,dbname, rounds)
        remove_profile_schema (self.cur)

        rib_feeds_all = os.getcwd() + '/rib_feeds/rib20011204_edges.txt'
        feeds = dbname[8:]
        self.rib_edges_file = os.getcwd() + '/rib_feeds/rib20011204_edges_' + str (feeds) + '.txt'
        os.system ("head -n " + str(feeds) + " " + rib_feeds_all + " > " + self.rib_edges_file)

        Batch_isp.init_rib (self)
Exemplo n.º 6
0
    def fetch(self):
        Batch.fetch(self)

        self.cur.execute("SELECT * FROM tm;")
        cs = self.cur.fetchall()
        self.tm = [[h['fid'], h['src'], h['dst'], h['vol'], h['fw'], h['lb']]
                   for h in cs]
Exemplo n.º 7
0
	def _matchWithFeature(self, coderId, videoId, coder_feature, video_feature, window=1000):
		try:
			cls = DiveFeature

			feedback = []
			
			name = "+".join( [coderId, videoId] )
			filename = os.path.join( cls.data_root, "feedback/" + name + ".txt" )

			batch = Batch()
			videoLen = batch._processFeedbackFile(filename, feedback)

			try:
				for feature_name in cls.video_info_buffer[videoId].iterkeys():
					feature_values = cls.video_info_buffer[videoId][feature_name]
					
					for feature_value in feature_values:
						for f in feedback:
							if feature_value[0] <= f and f <= feature_value[1]+window:
								if feature_name in video_feature:
									video_feature[feature_name] = video_feature[feature_name] + 1
								else:
									video_feature[feature_name] = 1
								if feature_name in coder_feature:
									coder_feature[feature_name] = coder_feature[feature_name] + 1
								else:
									coder_feature[feature_name] = 1	
			except Exception, exception:
				print exception

		except Exception, exception:
			print "_matchWithFeature => ", exception
Exemplo n.º 8
0
    def __init__(self, dbname, rounds):

        isp = dbname[3:7]
        self.isp = isp
        self.ISP_edges_file = os.getcwd() + '/ISP_topo/' + str(
            isp) + '_edges.txt'
        self.ISP_nodes_file = os.getcwd() + '/ISP_topo/' + str(
            isp) + '_nodes.txt'
        self.rib_prefixes_file = os.getcwd(
        ) + '/rib_feeds/' + "rib20011204_prefixes.txt"
        self.rib_peerIPs_file = os.getcwd(
        ) + '/rib_feeds/' + "rib20011204_nodes.txt"

        print "for database " + dbname + "--------------------"
        create_db(dbname, Batch.username)  # add comments

        if database_exists == 0:
            add_pgrouting_plpy_plsh_extension(dbname, Batch.username)
            load_schema(dbname, Batch.username,
                        "/home/mininet/ravel/sql_scripts/primitive.sql")
            Batch_isp.init_ISP_topo(self, dbname)

        Batch.__init__(self, dbname, rounds)
        remove_profile_schema(self.cur)

        rib_feeds_all = os.getcwd() + '/rib_feeds/rib20011204_edges.txt'
        feeds = dbname[8:]
        self.rib_edges_file = os.getcwd(
        ) + '/rib_feeds/rib20011204_edges_' + str(feeds) + '.txt'
        os.system("head -n " + str(feeds) + " " + rib_feeds_all + " > " +
                  self.rib_edges_file)

        Batch_isp.init_rib(self)
Exemplo n.º 9
0
    def post(self):
        """
        Execute GraphQL queries and mutations
        Use this endpoint to send http request to the GraphQL API.
        """
        payload = request.json

        # Execute request on GraphQL API
        status, data = utils.execute_graphql_request(payload['query'])

        # Execute batch of indicators
        if status == 200 and 'executeBatch' in payload['query']:
            if 'id' in data['data']['executeBatch']['batch']:
                batch_id = str(data['data']['executeBatch']['batch']['id'])
                batch = Batch()
                batch.execute(batch_id)
            else:
                message = "Batch Id attribute is mandatory in the payload to be able to trigger the batch execution. Example: {'query': 'mutation{executeBatch(input:{indicatorGroupId:1}){batch{id}}}'"
                abort(400, message)

        # Test connectivity to a data source
        if status == 200 and 'testDataSource' in payload['query']:
            if 'id' in data['data']['testDataSource']['dataSource']:
                data_source_id = str(
                    data['data']['testDataSource']['dataSource']['id'])
                data_source = DataSource()
                data = data_source.test(data_source_id)
            else:
                message = "Data Source Id attribute is mandatory in the payload to be able to test the connectivity. Example: {'query': 'mutation{testDataSource(input:{dataSourceId:1}){dataSource{id}}}'"
                abort(400, message)

        if status == 200:
            return jsonify(data)
        else:
            abort(500, data)
Exemplo n.º 10
0
def clusterize(path, algo):
    attacks = list(read_csv(path))

    cluster_builder = ClusterBuilder(nitems=len(attacks))

    batch = Batch()
    addr_to_index = {}
    for index, attack in enumerate(attacks):
        batch.feed(attack)
        addr = attack.source_addr
        try:
            prev_index = addr_to_index[addr]
        except KeyError:
            pass
        else:
            cluster_builder.merge(prev_index, index)
        addr_to_index[addr] = index

    db = algo.fit([batch.features(attack) for attack in attacks])

    specimen = [-1 for _ in range(len(set(db.labels_)))]

    for index, attack in enumerate(attacks):
        cluster = db.labels_[index]
        if cluster == -1:
            cluster_builder.mark_as_noise(index)
            continue
        if specimen[cluster] != -1:
            cluster_builder.merge(specimen[cluster], index)
        specimen[cluster] = index

    return Clusters(attacks=attacks, classes=cluster_builder.finalize())
Exemplo n.º 11
0
    def tenant_fullmesh(self, hosts):
        f = self.f
        cur = self.cur

        Batch.update_max_fid(self)
        fid = self.max_fid + 1

        cur.execute("select max (counts) from clock;")
        ct = cur.fetchall()[0]['max'] + 1

        for i in range(len(hosts)):
            for j in range(i + 1, len(hosts)):
                print "tenant_fullmesh: [" + str(hosts[i]) + "," + str(
                    hosts[j]) + "]"
                t1 = time.time()
                cur.execute("INSERT INTO tenant_policy values (%s,%s,%s);",
                            ([str(fid), int(hosts[i]),
                              int(hosts[j])]))
                cur.execute("INSERT INTO p_spv values (%s,'on');", ([ct]))
                t2 = time.time()
                f.write('----rt*tenant: route ins----' +
                        str((t2 - t1) * 1000) + '\n')
                f.flush()
                ct += 1
                fid += 1
Exemplo n.º 12
0
    def run(self):
        with open(self.train_data, 'rb') as f:
            data = pickle.load(f)
        with open(self.train_labels, 'rb') as f:
            labels = pickle.load(f)
        labels = one_hot(sorted(list(set(labels))), labels)

        with open(self.test_data, 'rb') as f:
            data_t = pickle.load(f)
        with open(self.test_labels, 'rb') as f:
            labels_t = pickle.load(f)
        labels_t = one_hot(sorted(list(set(labels_t))), labels_t)

        b = Batch(data, labels, Params.batch_size)

        var = tf.trainable_variables()
        conv = [v for v in var if v.name.startswith("conv")]
        fool = [v for v in var if v.name.startswith("fooling")]
        fc = [v for v in var if v.name.startswith("fc")]
        smax = [v for v in var if v.name.startswith("soft_max")]


        cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.output))
        optimiser = tf.train.AdamOptimizer(Params.learning_rate).minimize(cross_entropy, var_list=conv + fc + smax)
        # collect prediction in the batch
        correct_prediction = tf.equal(tf.argmax(self.pred, 1), tf.argmax(self.output, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        total_batch = int(len(data) / Params.batch_size)

        learning = []
        if self.trainable:
            saver = tf.train.Saver()
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                for epoch in range(Params.epoch):
                    b.shuffle()
                    avg_cost = 0
                    print ("{} epoch".format(epoch))
                    for i in range(total_batch):
                        batch_x, batch_y = b.next_batch()
                        _, cost = sess.run([optimiser, cross_entropy], feed_dict={model.input: batch_x, model.output: batch_y})
                        avg_cost += cost/total_batch
                    acc = sess.run(accuracy, feed_dict={model.input: data_t, model.output: labels_t})
                    learning.append(acc)
                    # saving the model
                    if epoch % 10 == 0:
                        pass
                        # checkpoint_path = os.path.join(Params.checkpoint_path, 'model.ckpt')
                        # save_path = saver.save(sess, checkpoint_path)
                        # print("model saved to {}".format(checkpoint_path))
                        # print (filters)

                    # print(avg_cost, acc)
        plt.plot(learning)
        plt.title('Epoch vs Test accuracy')
        plt.xlabel('Epoch')
        plt.ylabel('Test accuracy')
        plt.show()
Exemplo n.º 13
0
    def close (self):
        os.system ("cp "+ Batch.logfile + ' ' + self.logdest)

        if self.profile == True:
            os.system ("sudo mv "+ self.logdest + ' ' + ' /media/sf_share/ravel_plot/profile/log/')
        else:
            os.system ("sudo mv "+ self.logdest + ' ' + ' /media/sf_share/ravel_plot/fattree/log/')
        Batch.close (self)
Exemplo n.º 14
0
class DistributionTestCase(unittest.TestCase):
    
    def setUp(self):
        self.job = Job()
        self.batch = Batch()
        self.resource = Resource()
        
        rootDir = os.environ['BOLT_DIR']
        self.batch.readConfig(rootDir + configDir + "/" + batchConfig)
        self.resource.readConfig(rootDir + configDir + "/" + resourceConfig)

    def testParallelTaskDitributionPureMPI(self):
        """Pure MPI task distribution (fully populated)."""
        
        # Set the parallel distribution
        self.job.setTasks(1024)
        self.job.setTasksPerNode(self.resource.numCoresPerNode())
        self.job.setThreads(1)
        self.job.setParallelDistribution(self.resource, self.batch)
        
        correct = "aprun -n 1024 -N 32 -S 8 -d 1"
        self.assertEqual(self.job.runLine, correct, "Value= '{0}', Expected= '{1}'".format(self.job.runLine, correct))
        
    def testParallelTaskDitributionHalfPopulate(self):
        """Pure MPI task distribution (half populated)."""
        
        # Set the parallel distribution
        self.job.setTasks(1024)
        self.job.setTasksPerNode(16)
        self.job.setThreads(1)
        self.job.setParallelDistribution(self.resource, self.batch)
        
        correct = "aprun -n 1024 -N 16 -S 4 -d 2"
        self.assertEqual(self.job.runLine, correct, "Value= '{0}', Expected= '{1}'".format(self.job.runLine, correct))

    def testParallelTaskDitributionTwoThreads(self):
        """Hybrid MPI/OpenMP task distribution (2 OpenMP threads)."""
        
        # Set the parallel distribution
        self.job.setTasks(1024)
        self.job.setTasksPerNode(16)
        self.job.setThreads(2)
        self.job.setParallelDistribution(self.resource, self.batch)
        
        correct = "export OMP_NUM_THREADS=2\naprun -n 1024 -N 16 -S 4 -d 2"
        self.assertEqual(self.job.runLine, correct, "Value= '{0}', Expected= '{1}'".format(self.job.runLine, correct))

    def testParallelTaskDitributionThreeThreads(self):
        """Hybrid MPI/OpenMP task distribution (3 OpenMP threads)."""
        
        # Set the parallel distribution
        self.job.setTasks(1024)
        self.job.setTasksPerNode(10)
        self.job.setThreads(3)
        self.job.setParallelDistribution(self.resource, self.batch)
        
        correct = "export OMP_NUM_THREADS=3\naprun -n 1024 -N 10 -d 3"
        self.assertEqual(self.job.runLine, correct, "Value= '{0}', Expected= '{1}'".format(self.job.runLine, correct))
Exemplo n.º 15
0
 def execute_batch(self, max_nodes, ami, instance_type):
     batch_id = 'batch-%s' % uuid.uuid4()
     batch = Batch('received')
     batch.ami = ami
     batch.instance_type = instance_type
     batch.max_nodes = max_nodes
     self.client.set(batch_id, pickle.dumps(batch))
     self.client.publish('batches', batch_id)
     return batch_id
Exemplo n.º 16
0
    def init_rib (self):
        cursor = self.cur
        ISP_edges_file = self.ISP_edges_file
        ISP_nodes_file = self.ISP_nodes_file
        rib_prefixes_file = self.rib_prefixes_file
        rib_peerIPs_file = self.rib_peerIPs_file 
        rib_edges_file = self.rib_edges_file

        def peerIP_ISP_map (peerIP_nodes_file, ISP_nodes_file):
            pf = open (peerIP_nodes_file, "r").readlines ()
            ispf = open (ISP_nodes_file, "r").readlines ()

            node_map = {}
            for pn in pf:
                ISP_node = random.choice (ispf)
                ispf.remove (ISP_node)
                node_map[pn[:-1]] = int (ISP_node[:-1]) 
            return node_map

        # map (randomly picked) ISP nodes (switch nodes in tp table)
        # to peer IPs in rib feeds
        nm = peerIP_ISP_map (rib_peerIPs_file, ISP_nodes_file)
        ISP_borders = nm.values ()

        cursor.execute ("""
DROP TABLE IF EXISTS borders CASCADE;
CREATE UNLOGGED TABLE borders (
       sid     integer,
       peerip  text
);
""")
        # set up borders table, randomly pick 21 switches, and assign
        # each switch a unique peer IP
        for key in nm.keys():
            cursor.execute ("""INSERT INTO borders (sid, peerip) VALUES (%s,  %s)""", (nm[key], key))

        cursor.execute (""" 
SELECT *
FROM uhosts, borders WHERE
hid = 100000 + sid;
""")
        cs = self.cur.fetchall ()
        sid2u_hid = {h['sid']: int (h['u_hid']) for h in cs}
        # print len (sid2u_hid)

        ribs = open (rib_edges_file, "r").readlines ()

        Batch.update_max_fid (self)
        fid = self.max_fid + 1
        
        for r in ribs:
            switch_id = int (nm [r.split ()[0]]) 
            random_border = int(random.choice (ISP_borders))

            if random_border != switch_id:
                cursor.execute ("INSERT INTO rtm VALUES (%s,%s,%s);", (fid, sid2u_hid[switch_id], sid2u_hid[random_border]))
                fid += 1
Exemplo n.º 17
0
 def execute_batch(self, max_nodes, ami, instance_type):
     batch_id = 'batch-%s' % uuid.uuid4()
     batch = Batch('received')
     batch.ami = ami
     batch.instance_type = instance_type
     batch.max_nodes = max_nodes
     self.client.set(batch_id, pickle.dumps(batch))
     self.client.publish('batches', batch_id)
     return batch_id
Exemplo n.º 18
0
 def __init__(self, args):
     self._bidsDir = args.bids_dir
     self._dicomDir = args.dicom_dir
     self._session = Session(args.session, args.participant, self._bidsDir)
     self._parser = getattr(studyparser, args.algorithm)(self._dicomDir,
                                                         self._session)
     self._yes = args.yes
     self._codeDir = os.path.join(self._bidsDir, 'code')
     utils.make_directory_tree(self._codeDir)
     self._batch = Batch(self._codeDir, self._session)
Exemplo n.º 19
0
 def execute_batch(self, max_nodes, ami, instance_type, email=''):
     batch_id = 'batch-%s' % str(uuid.uuid4())[31:36]
     batch = Batch('received')
     batch.ami = ami
     batch.instance_type = instance_type
     batch.max_nodes = max_nodes
     batch.email = email
     self.client.set(batch_id, pickle.dumps(batch))
     self.client.publish('batches', batch_id)
     return batch_id
Exemplo n.º 20
0
def regress(simu, policy, policy_type, nb_trajs, render=False) -> None:
    batch = Batch()
    simu.env.set_reward_flag(False)
    simu.env.set_duration_flag(False)
    if policy_type == "bernoulli" or policy_type == "discrete":
        batch = perform_expert_episodes_bangbang(simu, batch, nb_trajs, render)
    else:
        batch = perform_expert_episodes_continuous(simu, batch, nb_trajs, render)
    # print("size: ", batch.size())
    batch.train_policy_through_regress(policy)
Exemplo n.º 21
0
    def close(self):
        os.system("cp " + Batch.logfile + ' ' + self.logdest)

        if self.profile == True:
            os.system("sudo mv " + self.logdest + ' ' +
                      ' /media/sf_share/ravel_plot/profile/log/')
        else:
            os.system("sudo mv " + self.logdest + ' ' +
                      ' /media/sf_share/ravel_plot/fattree/log/')
        Batch.close(self)
Exemplo n.º 22
0
def get_decode_data(hps, vocab, data_path, randomize=False):
    tf.logging.info('Fetching data..')
    filelist = glob.glob(data_path)
    inputs = []
    total_examples = 0
    total_batches = 0
    for f in filelist:
        reader = open(f, 'rb')
        while True:
            len_bytes = reader.read(8)
            if not len_bytes: break
            str_len = struct.unpack('q', len_bytes)[0]
            example_str = struct.unpack('%ds' % str_len,
                                        reader.read(str_len))[0]
            e = example_pb2.Example.FromString(example_str)
            try:
                article_text = e.features.feature['article'].bytes_list.value[
                    0].decode()
                if len(article_text) == 0:
                    #tf.logging.warning('Found an example with empty article text. Skipping it.')
                    pass
                else:
                    abstract_text = e.features.feature[
                        'abstract'].bytes_list.value[0].decode()
                    abstract_sentences = [
                        sent.strip()
                        for sent in data.abstract2sents(abstract_text)
                    ]
                    example = Example(article_text, abstract_sentences, vocab,
                                      hps)
                    inputs.append(example)
                    total_examples = total_examples + 1
            except ValueError:
                #tf.logging.error('Failed to get article or abstract from example')
                continue
    batches = []
    tf.logging.info('Creating batches..')
    if randomize:
        random.shuffle(inputs)
        example = inputs[0]
        b = [example for _ in range(hps.beam_size)]
        batches.append(Batch(b, hps, vocab))
        total_batches = 1
        total_examples = 1
    else:
        for i in range(0, len(inputs)):
            b = [inputs[i] for _ in range(hps.beam_size)]
            batches.append(Batch(b, hps, vocab))
            total_batches = total_batches + 1

    tf.logging.info('[TOTAL Batches]  : %i', total_batches)
    tf.logging.info('[TOTAL Examples] : %i', total_examples)
    tf.logging.info('Creating batches..COMPLETE')
    return batches
Exemplo n.º 23
0
def main(svc_input, configs):
    logger = Logger("查询日志", verbose=True)
    log_file_name = "log%s_%s.txt" % (svc_input.replace("?", "#"), DateTimeUtil.get_current_datetime(is_date=True))
    log_file_path = WindowsUtil.convert_win_path(os.path.join(temp_dir, log_file_name))
    logger.info("[开始查询] %s" % svc_input)
    try:
        # 找到本地匹配的保修历史记录
        history_zip = ZipFileSVC(zip_file_path=history_zipfile, mode='a')
        start_time = DateTimeUtil.get_current_datetime()
        # 创建出所有可能查询码
        svc_generator = SVCGenerator(svc_input, logger)
        logger.info("创建出所有可能查询码:%s" % len(svc_generator.target_svc_set))
        # 根据本地匹配的非法查询码历史,筛选出目标查询码,以及非法查询码
        existed_svc = history_zip.find_file_regex(svc_generator.regex)
        svc_generator.generate_target_svc_batch(existed_svc, invalid_history_file_path)
        # 调用戴尔查询API,并将API数据转化为实体类数据
        output_dell_asset_list = list([])
        if svc_generator.target_svc_set:
            batch = Batch(logger, configs)
            api_dell_asset_list = batch.begin(svc_generator.target_svc_set)
            output_dell_asset_list = api_dell_asset_list
            logger.info("从API中总共得到%s个结果" % (len(api_dell_asset_list)))
            logger.info("将实体类序列化到本地临时TXT文件")
            temp_text_files_path = DellAsset.serialize_txt_batch(api_dell_asset_list, temp_dir)
            logger.info("将序列化临时文件存到本地zip历史记录,总数:%s" % len(temp_text_files_path))
            history_zip.add_new_file_batch(temp_text_files_path)
            logger.info("删除临时 %s 个TXT文件" % len(temp_text_files_path))
            for file_path in temp_text_files_path:
                FileUtil.delete_file(file_path)
            logger.info("将API得到的实体类和历史记录实体类合并")
        else:
            logger.warn("目标查询码为空,仅从从历史记录中导出结果")
        for svc in svc_generator.existed_svc_set:
            dell_asset_content = history_zip.get_member_content(file_name="%s.txt" % svc)
            output_dell_asset_list.append(DellAsset.deserialize_txt(dell_asset_content))
        logger.info("添加历史记录,总共得到%s个结果" % (len(output_dell_asset_list)))
        excel_output_path = WindowsUtil.convert_win_path(os.path.join(excel_dir, "%s.xlsx" % svc_generator.get_file_name()))
        DellAsset.save_as_excel_batch(output_dell_asset_list, excel_output_path)
        if FileUtil.is_path_existed(excel_output_path):
            logger.info("存为Excel文档成功")
            end_time = DateTimeUtil.get_current_datetime()
            logger.info("总用时 %s " % DateTimeUtil.datetime_diff(start_time, end_time))
            logger.info("[查询结束] 总共%s个结果 保存在:%s" % (len(output_dell_asset_list), excel_output_path))
        else:
            logger.error("[保存结果失败] %s" % excel_output_path)
    except Exception as e:
        # 若程序出现错误失败,发送邮件
        logger.error("[查询失败] 已发送报告 请等待解决")
        logger.error("%s\n%s" % (e, traceback.format_exc()))
        logger.save(log_file_path)
        email_api_key = configs["email_api_key"]
        email = Email(email_api_key, subject="[查询失败] %s %s" % (DateTimeUtil.get_current_datetime(is_date=True), svc_input))
        email.add_attachment(log_file_path)
        email.send(cc_mode=logger.has_error)
Exemplo n.º 24
0
    def __init__(self, batch_size, output_file, schema, validator_map):
        write_function = csv.writer(output_file,
                                    delimiter=',',
                                    lineterminator='\n').writerows
        self.batch = Batch(batch_size, write_function)
        self.header = None
        self.header_map = {}
        self.header_written = False

        self.schema = schema
        self.validator_map = validator_map
Exemplo n.º 25
0
	def export_to_batch(self, path, return_batch=False):
		batchable_txs = []
		for account_index in self.known_accounts:
			batchable_tx = self.cache.load(Cache.TX, account_index)
			if batchable_tx:
				batchable_txs.append(batchable_tx)
		batch = Batch(self.origin_branch.master_key_names, self.destination_branch.master_key_names, batchable_txs=batchable_txs)
		batch.validate()
		batch.to_file(path)
		if return_batch:
			return batch
Exemplo n.º 26
0
    def close (self):
        os.system ("cp "+ Batch.logfile + ' ' + self.logdest)

        if self.isp == '4755' or self.isp == '3356' or self.isp == '7018':
            t = 'isp_3sizes'
        elif self.isp == '2914':
            t = 'isp' + self.isp + '_3ribs'

        if self.profile == True:
            os.system ("sudo mv "+ self.logdest + ' ' + ' /media/sf_share/ravel_plot/profile/log/')
        else:
            os.system ("sudo mv "+ self.logdest + ' ' + ' /media/sf_share/ravel_plot/' + t + '/log/')

        Batch.close (self)
Exemplo n.º 27
0
def module_runner(module_name, serialize_queue, batch_file):
    module = fetch(module_name)
    
    if batch_file is None:
        gen = module.process()
    else:
        batch = Batch()
        batch.load(batch_file)
        # print(batch.items)
        gen = [transaction for item in batch.items for transaction in module.process(item)]
    i = 0
    for transaction in gen:
        serialize_queue.put(transaction)
        i += 1
Exemplo n.º 28
0
    def run(self):
        #update local policy vars on an interval for stability
        if self.learner_policy.get_step() % self.update_interval == 0:
            self.pull_vars()
        #print('learner at %s, actor at %s' % (
        #        self.learner_policy.get_step(),
        #        self.local_policy.get_step()))

        #FIXME: last obs might be from different game
        #since games are 4.5k steps, not a big deal
        n_actions = self.env.action_space.n

        batch = Batch()
        state = self.last_obs  #first action in each new env is ~random
        lstm_state = self.local_policy.lstm_init_state
        done = step = 0
        while not done and step < self.steps:
            action, value, logit, lstm_state = self.local_policy.act(
                state, lstm_state)
            next_state, reward, done, _ = self.env.step(action)

            #skip the specified number of frame, aggregate rewards?
            #FIXME: dont just skip, stack the frames
            #might mess things up if predicting above
            #using non-diff and diff frames
            #aggregate and non aggregate
            #must be constant
            #note the env employs frame skipping already
            #more skipping seems to lead to a better policy though
            #for _ in range(3):
            #    if done:
            #        break
            #    next_state, reward_s, done, _ = self.env.step(action)
            #reward += reward_s

            #process observation data
            next_state = process_state(next_state)
            if type(action) == np.int64:
                action = to_onehot(action, n_actions)

            #add experience to batch
            batch.add((state, action, reward, value, done, next_state, logit,
                       lstm_state))

            #update
            step += 1
            state = next_state
            self.last_obs = state

        return batch.get()
Exemplo n.º 29
0
Arquivo: run.py Projeto: AmesianX/chef
def batch_execute(args: dict):
    # get list of commands from batch file:
    bare_cmd_lines = []
    from batch import Batch
    batch = Batch(args['batch_file'])
    batch_commands = batch.get_commands() # the not-yet-expanded commands
    for command in batch_commands:
        bare_cmd_lines.extend(command.get_cmd_lines())

    # assemble command lines and experiment data output directories:
    cmd_lines = []
    batch_offset = 1
    for bare_cmd_line in bare_cmd_lines:
        # experiment data path:
        expname = '%s-%04d-%s' % (args['expname'],
                                  batch_offset,
                                  os.path.basename(bare_cmd_line[0]))

        # command:
        cmd_line = [sys.argv[0]] # recursively call ourselves:

        if args['dry_run']:
            cmd_line.extend(['--dry-run'])
        cmd_line.extend(['--monitor-port', '%d'
                        % (args['monitor_port'] + batch_offset)])
        cmd_line.extend(['--vnc-display', '%d'
                        % (args['vnc_display'] + batch_offset)])
        cmd_line.extend(['--build', utils.BUILD])
        cmd_line.extend(['--network', args['network']])
        cmd_line.extend(['--memory', args['memory']])
        cmd_line.extend([args['VM[:snapshot]']])
        cmd_line.extend(['sym'])
        cmd_line.extend(['--command-port', '%d'
                        % (args['command_port'] + batch_offset)])
        cmd_line.extend(['--expname', expname])
        cmd_line.extend(['--config-file', command.config])
        if args['timeout']:
            cmd_line.extend(['--timeout', '%d' % args['timeout']])
        if args['env_var']:
            cmd_line.extend(['--env-var', args['env_var']])
        cmd_line.extend([args['snapshot']])
        cmd_line.extend(bare_cmd_line)
        cmd_lines.append(' '.join(cmd_line))

        # counter:
        batch_offset += 1

    utils.execute(assemble_parallel_cmd_line(args), stdin='\n'.join(cmd_lines))
Exemplo n.º 30
0
class App(object):
    """
    """
    def __init__(self, args):
        self._bidsDir = args.bids_dir
        self._dicomDir = args.dicom_dir
        self._session = Session(args.session, args.participant, self._bidsDir)
        self._parser = getattr(studyparser, args.algorithm)(self._dicomDir,
                                                            self._session)
        self._yes = args.yes
        self._codeDir = os.path.join(self._bidsDir, 'code')
        utils.make_directory_tree(self._codeDir)
        self._batch = Batch(self._codeDir, self._session)

    def run(self):
        utils.new_line()
        utils.info('Parse and group DICOM directory')
        self._parser.parse_acquisitions()

        utils.new_line()
        utils.info('Sort and set up acquisitions')
        self._parser.sort_acquisitions()

        #utils.new_line()
        #utils.ok('Acquisitions of interest:')
        #for _ in self._parser.caught: utils.info(_)

        utils.new_line()
        utils.warning('Acquisitions excluded:')
        for _ in self._parser._excluded:
            utils.info(_)

        utils.new_line()
        utils.info('Create YAML file for dcm2niibatch')
        for acq in self._parser.acquisitions:
            self._batch.add(acq)
        self._batch.write()

        utils.new_line()
        utils.ok('Batch file:')
        self._batch.show()

        if self._yes:
            launchBatch = True
        else:
            msg = "Do you want to launch dcm2niibatch ?"
            launchBatch = utils.query_yes_no(msg)

        if launchBatch:
            self._batch.launch()
            for acq in self._parser.acquisitions:
                acq.update_json()
        else:
            utils.new_line()
            utils.ok("To launch dcm2niibatch later:")
            utils.info("cd {}".format(self._codeDir))
            utils.info(self._batch.command)
        return 0
Exemplo n.º 31
0
    def close(self):
        os.system("cp " + Batch.logfile + ' ' + self.logdest)

        if self.isp == '4755' or self.isp == '3356' or self.isp == '7018':
            t = 'isp_3sizes'
        elif self.isp == '2914':
            t = 'isp' + self.isp + '_3ribs'

        if self.profile == True:
            os.system("sudo mv " + self.logdest + ' ' +
                      ' /media/sf_share/ravel_plot/profile/log/')
        else:
            os.system("sudo mv " + self.logdest + ' ' +
                      ' /media/sf_share/ravel_plot/' + t + '/log/')

        Batch.close(self)
Exemplo n.º 32
0
    def test_batch_job_spawn(self):
        self.os_mock.listdir = mock.MagicMock()
        self.os_mock.listdir.return_value = ['job-dir1', 'job-dir2']

        from batch_midwife import BatchMidwife
        from batch import Batch

        midwife = BatchMidwife()
        midwife.apprentice = mock.MagicMock()
        midwife.client = mock.MagicMock()
        midwife.batch_pub_sub = mock.MagicMock()
        midwife.batch_pub_sub.listen.return_value = [{'data': 'batch-lovelyhashcode'}]
        midwife.client.exists.return_value = True
        batch = Batch('uploaded')
        midwife.client.get.return_value = pickle.dumps(batch)
        midwife.client.set = mock.MagicMock()
        midwife.client.publish = mock.MagicMock()

        midwife.run()

        assert midwife.client.exists.call_count == 1
        assert midwife.client.get.call_count == 1
        assert midwife.client.set.call_count == 4
        assert midwife.client.publish.call_count == 2
        assert midwife.client.set.call_args_list[1][0][0] == 'job-dir1_1'
        assert midwife.client.set.call_args_list[2][0][0] == 'job-dir2_1'
        assert pickle.loads(midwife.client.set.call_args_list[3][0][1]).state == 'running'
        assert self.os_mock.listdir.call_count == 1
Exemplo n.º 33
0
    def getNext(self):
        "iterator"
        batchRange = range(self.currIdx, self.currIdx + config.BATCH_SIZE)
        gtTexts = [self.samples[i].gtText for i in batchRange]

        imgs = []
        for i in batchRange:
            try:
                self.binaryImageFile.seek(self.samples[i].imageStartPosition)
                img = np.frombuffer(
                    self.binaryImageFile.read(self.samples[i].imageSize),
                    np.dtype('B'))
                img = img.reshape(self.samples[i].imageHeight,
                                  self.samples[i].imageWidth)
                img = preprocess(img, config.IMAGE_WIDTH, config.IMAGE_HEIGHT,
                                 config.RESIZE_IMAGE,
                                 config.CONVERT_IMAGE_TO_MONOCHROME,
                                 config.AUGMENT_IMAGE)
                imgs.append(img)
            except IOError as e:
                print("I/O error({0}): {1}".format(e.errno, e.strerror))
                pass
            except ValueError as e:
                print("Value error({0}): {1}".format(e.errno, e.strerror))
                pass
            except:
                print("Unexpected error:", sys.exc_info()[0])
                pass

        self.currIdx += config.BATCH_SIZE
        return Batch(gtTexts, imgs)
Exemplo n.º 34
0
    def __init__(self, dbname, rounds):
        sql_script = "/home/mininet/ravel/sql_scripts/primitive.sql"
        #topology = 'toy'
        topology = 'fat'

        self.dbname = dbname
        self.create_db(sql_script)
        Batch.connect(self)

        self.load_schema(sql_script)
        self.load_sig_example_schema()
        self.load_topo()

        Batch.__init__(self, dbname, rounds, sql_script, topology)

        self.profile = False
Exemplo n.º 35
0
    def __init__(self,dbname, rounds):
	sql_script = "/home/mininet/ravel/sql_scripts/primitive.sql"
        #topology = 'toy'
	topology = 'fat'
	
        self.dbname = dbname
        self.create_db (sql_script)
        Batch.connect (self)

        self.load_schema (sql_script)
        self.load_sig_example_schema ()
        self.load_topo ()

        Batch.__init__(self, dbname, rounds, sql_script, topology)

        self.profile = False
Exemplo n.º 36
0
def processUbertoolBatchRunsIntoBatchModelRuns(ubertools):
    logger.info("Start Ubertool Batching")
    batch_id = ubertools['id']
    user = users.get_current_user()
    batchs = Batch.all()
    batch = None
    for poss_batch in batchs:
        logger.info(batch_id)
        logger.info(str(poss_batch.key()))
        if str(poss_batch.key()) == batch_id:
            batch = poss_batch
    logger.info(batch.to_xml())
    ubertools_results = {}
    ubertools_data = ubertools['ubertools']
    for ubertool in ubertools_data:
        combined_ubertool_props = combineUbertoolProperties(ubertool)
        ubertool_id = combined_ubertool_props["ubertool-config-name"]
        ubertool_result = {}
        #logger.info(combined_ubertool_props)
        ubertool_result = terrPlantRunner.runTerrPlantModel(combined_ubertool_props,ubertool_result)
        ubertool_result = sipRunner.runSIPModel(combined_ubertool_props,ubertool_result)
        #perform on all other eco models
        ubertools_results[ubertool_id]=ubertool_result
    batch.completed = db.DateTimeProperty.now()
    results_pickle = pickle.dumps(ubertools_results)
    batch.ubertool_results = results_pickle
    batch.put()
    logger.info(batch.to_xml())
Exemplo n.º 37
0
 def reset(self):
     #----------------------------------------------
     # failures arrive by using poisson distribution
     #----------------------------------------------
     if self.failure_type == 0:
         trace = Poisson(self.sys.num_disks, self.failure_percent,
                         self.mtbf)
     if self.failure_type == 1:
         trace = Exponential(self.sys.num_disks, self.failure_percent,
                             self.mtbf)
     if self.failure_type == 2:
         trace = Batch(self.sys.num_disks,
                       self.failure_percent,
                       self.mtbf,
                       cascade_factor=10.0)
     self.trace_entry = trace.generate_failures()
     #------------------------------------------
     # put the disk failures in the event queue
     #------------------------------------------
     self.events_queue = []
     for disk_fail_time, diskId in self.trace_entry:
         heappush(self.events_queue,
                  (disk_fail_time, Disk.EVENT_FAIL, diskId))
         print ">>>>> reset disk", diskId, Disk.EVENT_FAIL, "@", disk_fail_time
         self.mission_time = disk_fail_time
     print " - system mission time - ", self.mission_time
     #------------------------------
     # initialize the system state
     #------------------------------
     self.state = State(self.sys, self.rebuild, self.copyback,
                        self.events_queue)
 def make_monte_carlo_batch(self, nb_episodes, render, policy):
     """
     Create a batch of episodes with a given policy
     Used in Monte Carlo approaches
     :param nb_episodes: the number of episodes in the batch
     :param render: whether the episode is displayed or not (True or False)
     :param policy: the policy controlling the agent
     :return: the resulting batch of episodes
     """
     batch = Batch()
     self.env.set_reward_flag(False)
     self.env.set_duration_flag(False)
     for e in range(nb_episodes):
         episode = self.train_on_one_episode(policy, False, render)
         batch.add_episode(episode)
     return batch
Exemplo n.º 39
0
 def setUp(self):
     self.job = Job()
     self.batch = Batch()
     self.resource = Resource()
     
     rootDir = os.environ['BOLT_DIR']
     self.batch.readConfig(rootDir + configDir + "/" + batchConfig)
     self.resource.readConfig(rootDir + configDir + "/" + resourceConfig)
Exemplo n.º 40
0
 def __init__(self,
              name,
              command,
              containerID=None,
              working_dir=None,
              image=None,
              endpoint=None):
     Batch.__init__(self, name, command, working_dir=working_dir)
     self.command = command
     self.containerID = containerID
     self.working_dir = working_dir
     self.image = image
     self.endpoint = endpoint
     self.docker_client = DockerClient()
     self.transfer = DataTransfer.inferDataTransportation(
         "127.0.0.1", self.endpoint)
     self.ssh_connection = None
Exemplo n.º 41
0
 def encode(self, G):
     if type(G) != list:
         G = [G]
     # encode graphs G into latent vectors
     b = Batch.from_data_list(G)
     Hg = self(b)
     mu, logvar = self.fc1(Hg), self.fc2(Hg)
     return mu, logvar
Exemplo n.º 42
0
 def __init__(self,
              name,
              ssh_username,
              keypath,
              command,
              ip=None,
              working_dir=None,
              local_working_dir=None,
              endpoint=None):
     Batch.__init__(self, name, command, working_dir=working_dir)
     self.ip = ip
     self.keypath = keypath
     self.command = command
     self.endpoint = endpoint
     self.working_dir = working_dir
     self.ssh_username = ssh_username
     self.local_working_dir = local_working_dir
Exemplo n.º 43
0
    def primitive(self):
        size = 10
        Batch.rtm_ins(self, size)
        Batch.init_acl(self)
        Batch.init_lb(self)

        Batch.op_primitive(self)

        dbname = self.logdest.split('.')[0]
        self.logdest = dbname + '_primitive.log'
Exemplo n.º 44
0
    def primitive (self):
        size = 10
        Batch.rtm_ins (self, size)
        Batch.init_acl (self)
        Batch.init_lb (self)

        Batch.op_primitive (self)

        dbname = self.logdest.split ('.')[0]
        self.logdest = dbname + '_primitive.log'
Exemplo n.º 45
0
    def get_experience(self, insert_dummy=True):
        batch = Batch(dummy=[])

        for i in range(self.env_num):
            #append to total batch
            batch.append(self.buffers[i])

            #set dummy key
            if insert_dummy:
                length = len(self.buffers[i])

                batch.dummy += [False] * length

                #append dummy item at the end if not done
                if length:
                    if not batch[-1].done:
                        dummy_item = batch[-1:]

                        dummy_item.dummy = [True]
                        dummy_item.done = [True]
                        dummy_item.state = dummy_item.next_state

                        batch.append(dummy_item)

        return batch.to_numpy()
Exemplo n.º 46
0
def data_gen(V, batch, nbatches):
    "Generate random data for a src-tgt copy task."
    # dtype = torch.FloatTensor
    for i in range(nbatches):
        data = torch.from_numpy(np.random.randint(1, V, size=(batch, 10)))
        data[:, 0] = 1
        src = Variable(data, requires_grad=False)
        tgt = Variable(data, requires_grad=False)
        yield Batch(src, tgt, 0)
Exemplo n.º 47
0
    def routing_ins_acl_lb_tenant (self,hosts):
        cur = self.cur
        f = self.f
        
        [h1, h2] = random.sample(hosts, 2)

        Batch.update_max_fid (self)
        fid = self.max_fid + 1

        try:
            t1 = time.time ()
            cur.execute ("INSERT INTO tenant_policy VALUES ("+str (fid) +"," +str (h1) + "," + str (h2)+");")
            cur.execute("select max (counts) from clock;")
            ct = cur.fetchall () [0]['max'] 
            cur.execute ("INSERT INTO t1 VALUES (" + str (ct+1) + ", 'on');")
            t2 = time.time ()
            f.write ('----(acl+lb+rt)*tenant: route ins----' + str ((t2-t1)*1000) + '\n')
            f.flush ()
        except:
            pass
Exemplo n.º 48
0
def inferSingleImage(paraModel, paraFnImg):
    "recognize text in image provided by file path"
    img = cv2.imread(paraFnImg, cv2.IMREAD_GRAYSCALE)
    img = preprocess(img, config.IMAGE_WIDTH, config.IMAGE_HEIGHT, True, False,
                     False)

    batch = Batch(None, [img])
    #(recognized, probability) = model.inferBatch(batch)
    (recognized, probability) = paraModel.inferBatch(batch, True)
    print('Recognized:', '"' + recognized[0] + '"')
    print('Probability:', probability[0])
Exemplo n.º 49
0
 def __init__(
         self,
         package, cli_composer, deploy_status,
         use_package_path=False):
     """
     Initialize an instance of Deployment
     """
     import uuid
     if (use_package_path):
         self.batch = Batch(package.cwd)
     else:
         self.batch = Batch(tempfile.mkdtemp())
     self.cli_composer = cli_composer
     self.cwd = self.batch.cwd
     self.cwd_use_package_path = use_package_path
     self.deployed = False
     self.deployment_id = '{0}'.format(uuid.uuid1())
     self.deploy_status = deploy_status
     self.package = package
     self.started = False
Exemplo n.º 50
0
    def tenant_fullmesh (self, hosts):
        f = self.f
        cur = self.cur

        Batch.update_max_fid (self)
        fid = self.max_fid + 1

        cur.execute ("select max (counts) from clock;")
        ct = cur.fetchall ()[0]['max'] + 1

        for i in range (len (hosts)):
            for j in range (i+1,len (hosts)):
                print "tenant_fullmesh: [" + str (hosts[i]) + "," + str (hosts[j]) + "]"
                t1 = time.time ()
                cur.execute ("INSERT INTO tenant_policy values (%s,%s,%s);",([str (fid) ,int (hosts[i]), int (hosts[j])]))
                cur.execute ("INSERT INTO p_spv values (%s,'on');",([ct]))
                t2 = time.time ()
                f.write ('----rt*tenant: route ins----' + str ((t2-t1)*1000) + '\n')
                f.flush ()
                ct += 1
                fid += 1
Exemplo n.º 51
0
 def close (self):
     Batch.close (self)
Exemplo n.º 52
0
 def load_sig_example_schema (self):
     sql_script = '/home/mininet/ravel/xym/new.sql'
     Batch.load_schema(self, sql_script)
Exemplo n.º 53
0
    def add_flow (self, src, dst):
        Batch.update_max_fid(self)

        self.cur.execute ("INSERT INTO tm(fid,src,dst,vol,FW,LB) VALUES (%s,%s,%s,%s,%s,%s);", 
                          ([self.max_fid +1, src, dst, 0, 0, 0]))
Exemplo n.º 54
0
 def primitive (self):
     Batch.init_acl (self)
     print "Batch_isp.init_acl"
     Batch.init_lb (self)
     print "Batch_isp.init_lb"
     Batch.op_primitive (self)
def main():
    def find_group_name(filepath):
        unix_stat_of_file = stat(fp)
        grp_id_of_file = unix_stat_of_file.st_gid
        group_name = getattr(getgrgid(grp_id_of_file), 'gr_name', None)
        return group_name

    def find_user_name(filepath):
        uid_of_file = unix_stat_of_file.st_uid
        user_name = getpwuid(uid_of_file)
        return user_name
        
    parser = ArgumentParser(description="{description}". \
                            format(description=__description__),
                            epilog="Copyright University of Chicago; " + \
                            "written by {author} ". \
                            format(author = __author__) + \
                            " <{email}> University of Chicago". \
                            format(email = __email__))
    parser.add_argument("-v", help="See the version of this program",
                        action="version", version=__version__)
    parser.add_argument( \
                         '-b','-verbose',help="set verbose logging",
                         action='store_const',dest='log_level',
                         const=INFO \
    )
    parser.add_argument( \
                         '-d','--debugging',help="set debugging logging",
                         action='store_const',dest='log_level',
                         const=DEBUG \
    )
    parser.add_argument( \
                         '-l','--log_loc',help="save logging to a file",
                         action="store_const",dest="log_loc",
                         const='./{progname}.log'. \
                         format(progname=argv[0]) \
    )
    selection = parser.add_mutually_exclusive_group()

    selection.add_argument("--directory_path", 
                           help="Enter a directory that you need to work on ",
                           action='store')
    selection.add_argument("--from_db",help="Select to create a batch " + \
                           "from database",
                           action="store")
    parser.add_argument("--tables",help="Only use this is selecting from_db",
                        nargs="*",action=dbBeforeTables)    
    parser.add_argument("root",help="Enter the root of the directory path",
                        action="store")
    parser.add_argument("numfiles",help="Enter the number of files you " + \
                        "want to check in this iteration.",action="store",
                        type=int)
    args = parser.parse_args()
    log_format = Formatter( \
                            "[%(levelname)s] %(asctime)s  " + \
                            "= %(message)s",
                            datefmt="%Y-%m-%dT%H:%M:%S" \
    )
    global logger
    logger = getLogger( \
                        "lib.uchicago.repository.logger" \
    )
    ch = StreamHandler()
    ch.setFormatter(log_format)
    try:
        logger.setLevel(args.log_level)
    except TypeError:
        logger.setLevel(INFO)
    if args.log_loc:
        fh = FileHandler(args.log_loc)
        fh.setFormatter(log_format)
        logger.addHandler(fh)
    logger.addHandler(ch)


    current_date = datetime.now()
    isof_current_date = current_date.strftime("%Y-%m-%dT%H:%M:%S")
    sixty_days_ago_date = current_date - timedelta(days=60)
    isof_sixty_days_ago_date = sixty_days_ago_date.strftime( \
                            "%Y-%m-%dT%H:%M:%S")
    if args.from_db:
        db = Database(args.from_db, ['record','file'])
        class Record(db.base):
            __table__ = Table('record', db.metadata, autoload=True)
            
        class File(db.base):
            __table__ = Table('file', db.metadata, autoload=True)        
        accessions_to_check  = db.session.query(Record). \
                                   filter(or_(Record.lastFixityCheck == None,
                                          Record.lastFixityCheck \
                                              <= isof_sixty_days_ago_date,
                                          Record.fixityCheckCompleteness \
                                              == 'incompleted',
                                          Record.fixityCheckCompleteness \
                                              == None)).subquery()
        files_to_check = db.session.query(File.accession,
                                          File.checksum,
                                          File.size,
                                          File.filepath). \
                            filter(File.accession== \
                                   accessions_to_check.c.receipt,
                               or_(File.lastFixityCheck == None,
                                   File.lastFixityCheck \
                                       <= isof_sixty_days_ago_date)). \
                                       order_by(func.random()).limit(args.numfiles)
        b = Batch(args.root, query = files_to_check)
        generated_output = b.find_items(from_db = True)
    else:
        b = Batch(args.root, directory = args.directory_path)
        generated_output = b.find_items(from_directory=True)
    b.set_items(generated_output)
    try:
        for n in b.items:
            if exists(n.filepath):
                sha256_fixity = n.find_hash_of_file(sha256)
                mime = n.find_file_mime_type()
                n.set_hash(sha256_fixity)
                n.set_file_mime_type(mime)
                new_hash = n.get_hash()
                old_hash = n.get_old_hash()
                if new_hash != old_hash:
                    logger.error("{path} is corrupted".format(path=n.filepath))
            else:
                logger.error("{path} does not exist on the filesystem".format(path=n.filepath))
        return 0
    except KeyboardInterrupt:
         logger.error("Program aborted manually")
         return 131
Exemplo n.º 56
0
	def get_all_batches(self):

		collection = self.batch_coll
		result = collection.find()

		batches = []
		for item in result:
			
			single_batch = Batch()
			
			single_batch.set_id(item['_id'])
			single_batch.set_desc(item['desc'])
			single_batch.set_current_sem(item['current_sem'])
			single_batch.set_subject_array(self.construct_subject_master(item))
			single_batch.set_status(item['status'])

			batches.append(single_batch)

		# print json.dumps(batches, default=Batch.__str__)
		return batches
Exemplo n.º 57
0
	def get_batch_by_id(self, batchid):

		collection = self.batch_coll
		item = collection.find_one({'_id':batchid})

		if(item != None):
			single_batch = Batch()
			
			single_batch.set_id(item['_id'])
			single_batch.set_desc(item['desc'])
			single_batch.set_current_sem(item['current_sem'])
			single_batch.set_subject_array(self.construct_subject_master(item))
			single_batch.set_status(item['status'])

			return single_batch
		
		return None
Exemplo n.º 58
0
 def load_sig_example_schema (self):
     sql_script = '/home/mininet/ravel/sql_scripts/sigcomm_example.sql'
     Batch.load_schema(self, sql_script)
Exemplo n.º 59
0
	def extractFromAggregated(self, aggregated):
		batch = Batch()

		total = sum(aggregated)
		entropy = batch._entropy(aggregated)
		return [total, entropy]
Exemplo n.º 60
0
    def fetch (self):
        Batch.fetch (self)

        self.cur.execute ("SELECT * FROM tm;")
        cs = self.cur.fetchall ()
        self.tm = [[h['fid'], h['src'], h['dst'], h['vol'], h['fw'], h['lb']] for h in cs]