Beispiel #1
0
 def test(self, feature_data = None):
     #test on current scan:
     print ut.getTime(), 'test on:', self.processor.scan_dataset.id    
         
     if feature_data == None:
         filename = self.processor.get_features_filename()
         dict = ut.load_pickle(filename)
     else:
         dict = feature_data
     
     baseline_labels = self.classify_baseline_code()
 
     return baseline_labels, self.test_results(dict, baseline_labels)  
Beispiel #2
0
def run(func, args):
    messages = {
        trucks: "import trucks ",
        compute: "compute truck dates and centers ",
        stops: "compute stops and properties"
    }
    message = messages[func]

    try:
        getTime(func, message, *args)
        # func(*args)
        #notify(message)
    except:
        print traceback.format_exc()
        notify(message + "failed")
Beispiel #3
0
  def __init__(self, config_file=None, runmode="local"):
    self.start_time = time.time()
    self.config = updateConfig(DEFAULT_CONFIG, config_file)
    self.runmode = runmode

    self.client_name = "client_%s_%s" % (runmode,
      randString(self.config['client_uid_length']))
    self.client_dir = os.path.join(self.config['base_dir'], self.client_name)
    self.create_client_dir()

    self.logger = Logger(os.path.join(self.client_dir, "log.txt"), "a")
    sys.stdout = self.logger
    sys.stderr = self.logger

    print "[%s][%s] Starting client with following config:" % (getTime(),
      self.client_name)
    pprint.pprint(self.config, indent=2); print

    my_queue = ShortestTaskQueue if self.config['runmode'] == "hybrid" else FIFOQueue
    self.submit_queue = my_queue(queue_dir=os.path.join(self.config['base_dir'],
      self.config['submit_queue_name']))
    self.result_queue = my_queue(queue_dir=os.path.join(self.config['base_dir'],
      self.config['result_queue_name']))

    self.stop_running = False
    self.run()
Beispiel #4
0
    def train(self):
        #cv_boost_params = cv.CvBoostParams()

        #priors = cv.cvCreateMat(1,2,cv.CV_32FC1)
        #priors[0] = 10
        #priors[1] = 1
        
        #cv_boost_params.max_categories = 2
        #cv_boost_params.priors = priors #TODO: activate them
        self.cv_classifier = cv.CvDTree() #cv.CvBoost() #TODO: CHANGE CLASSIFIER HERE
        train_datastructures = self.create_train_datastructures()
            
        (train_data, train_labels, type_mask) = train_datastructures
        print 'WARNING! use CvDTree (single decision trees) for now as load/save works!'#'boost'
        print ut.getTime(), self.cv_classifier.train(train_data, cv.CV_ROW_SAMPLE, train_labels, None, None, type_mask ) #TODO: CHANGE CLASSIFIER HERE
       
        print ut.getTime(), 'traning finished'
Beispiel #5
0
    def test(self, feature_data = None):
        #test on current scan:
        print ut.getTime(), 'test on:', self.processor.scan_dataset.id    
            
        if feature_data == None:
            filename = self.processor.get_features_filename()
            print 'loading', filename
            dict = ut.load_pickle(filename)
        else:
            dict = feature_data
        
        #print ut.getTime(), dict
        current_set_size = dict['set_size']
        feature_vector_length = len(self.processor.features.get_indexvector(self.features))
        print ut.getTime(), feature_vector_length
        labels = np.array(np.zeros(len(self.processor.map_polys)))
        print 'test: length of labels vector:', len(labels)
        test = cv.cvCreateMat(1,feature_vector_length,cv.CV_32FC1)
        
        if current_set_size == 0:
            print ut.getTime(), 'ERROR: test dataset is empty!'
            return labels, 1, 1, 1

        count = 0
        for index in dict['point_indices']:
            fv = (dict['features'][count])[self.processor.features.get_indexvector(self.features)]
            #print ut.getTime(), fv, dict['features'][count]

            for fv_index, fv_value in enumerate(fv):
                test[fv_index] = fv_value
             
            #print 'class',self.cv_classifier
            label = self.cv_classifier.predict(test)
            #print label.value
            labels[index] = label.value
            #print 'tdone'
            if count % 4096 == 0:
                print ut.getTime(), 'testing:', count, 'of', current_set_size, '(',(float(count)/float(current_set_size)*100.0),'%)'
                
            count += 1


        #save for later use for postprocessing:
        self.test_feature_dict = dict
        self.test_labels = labels
        #cv.cvReleaseMat(test)
        return labels, self.test_results(dict, labels)  
Beispiel #6
0
    def test_results(self, dict, labels):
        current_set_size = dict['set_size']
        count_correct = 0
        count_clutter_correct = 0
        count_surface_correct = 0
        count_clutter = 0
        count_surface = 0
        count = 0
        for index in dict['point_indices']:
            label = labels[index]
            
            if label == dict['labels'][count]:
                count_correct += 1
                
            if dict['labels'][count] == processor.LABEL_CLUTTER:
                count_clutter += 1
                if label == dict['labels'][count]:
                    count_clutter_correct += 1
            if dict['labels'][count] == processor.LABEL_SURFACE:
                count_surface += 1
                if label == dict['labels'][count]:
                    count_surface_correct += 1                    

            count += 1        
        
        print ut.getTime(), '##########################################'
        print ut.getTime(), '####tested on ', self.features, '###########################'
        print ut.getTime(), '==================================='
        print ut.getTime(), 'percent in total: surface:',(float(count_surface)/float(current_set_size)*100), '%, clutter:',(float(count_clutter)/float(current_set_size)*100),'%'
        print ut.getTime(), '#points surface:',count_surface,'clutter:',count_clutter
        print ut.getTime(), '#points correct: surface:',count_surface_correct,'clutter:',count_clutter_correct
        if count_surface > 0:
            percent_surface_correct = float(count_surface_correct)/float(count_surface) * 100
        else:
            percent_surface_correct = 100
        if count_clutter > 0:
            percent_clutter_correct = float(count_clutter_correct)/float(count_clutter) * 100
        else:
            percent_clutter_correct = 100
        print ut.getTime(), '#percent correct: surface:',percent_surface_correct,'clutter:',percent_clutter_correct
        print ut.getTime(), '==================================='
        print ut.getTime(), '##########################################'
        testresults = (count_surface, count_clutter,count_surface_correct, count_clutter_correct, percent_surface_correct, percent_clutter_correct)
    
        return testresults  
Beispiel #7
0
    header = header + "`" + BEGAN_TIME + "--" + END_TIME + "`"
    header = header + "\n\n\n"
    curBook.header = header

# 向文件添加标注内容
stce_succ_cnt = 0  # 向html文件添加笔记成功次数
stce_fail_cnt = 0  # 向html文件添加笔记失败次数
# print("html name:",os.listdir())
file_list = os.listdir(".")  # 获取当前目录文件名,存放于file_list
for j in range(0, sentence.__len__()):
    temp = both[j]
    filename = "{}{}{}".format(util.changechar(temp[0][0:80]), nowTime,
                               FILE_SUFFIX)
    curBook = findBook(filename, allBooks)
    s1 = util.getAddr(temp[1])  # 获取标注位置
    s2 = util.getTime(temp[1])  # 获取标注时间
    s3 = util.getMark(temp[1])  # 获取标注内容
    if s3 != "\n":  # 如果文本内容非空
        stce_succ_cnt += 1
        cnt_temp = stceOfBookCnt[filename]
        stceOfBookCnt[filename] = cnt_temp + 1
        # senContent = "\n### " + str(cnt_temp + 1) + ". " + s3
        senContent = s3
        senContent = senContent + "\n    " + s2 + " &&" + s1 + "\n"
        senContent = senContent + "\n"
        curSen = book.Sentence(senContent, util.getBeginPos(s1))
        if curBook.hasChapter:
            curBook.appendChapterSen(curSen)
        else:
            curBook.appendSen(curSen)
        if curBook.sentenceLen() == 1:
Beispiel #8
0
		tmp = []

		for site in sites:
			status_code = 0
			text = None
			try:
				req = requests.get(sites[site], timeout=10)
				status_code = req.status_code
				tmp.append(status_code)
				if status_code != 200:
					text = str(req.text)
			except Exception as e:
				status_code = -1
				tmp.append(status_code)

			# save bad file
			if text != None:
				with open('errors/' + util.getTime(format="%Y-%m-%d_%H-%M-%S") + '_' + site + '_' + str(status_code) + '.html', "w") as text_file:
					for bc in bad_chars:
						text = text.replace(bc, '')
					text_file.write(text)

		# print to console
		out = [str(datetime.now())[:19]] + tmp
		util.p('TEST', str(out))

		# write to file
		with open('data.csv', 'a') as f:
			csv_writer = csv.writer(f, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL, lineterminator = '\n')
			csv_writer.writerow(out)
		time.sleep(0.999)
    files = os.listdir(INPUT_DIR)
    if os.path.exists(INPUT_DIR) and len(files) == 0:
        print('the directory ' + INPUT_DIR + ' not exists or is empty')
        time.sleep(1)  # wait for one second
        continue

    for file in files:
        print('processing file: ' + file)
        f = open(INPUT_DIR + "/" + file, 'r')
        lines = f.readlines()

        if len(lines) == 0:
            print('file is empty: ' + file)
            continue  # next file

        result = open(SYNTACTIC_RESULT_DIR + "/" + util.getTime() + '_' + file,
                      'w+')

        for line in lines:
            doc = nlp(line)
            for token in doc:
                word = token.text.strip()
                word = 'new_line' if word == '' else token.text

                result.write(convertToResult(word, token))
                print(convertToResult(word, token))

            # end token for
        # end line for

        result.close()
Beispiel #10
0
loss = nn.CrossEntropyLoss()  # 损失函数选择,交叉熵函数
optimizer = optim.SGD(model.parameters(), lr=0.1)
num_epochs = 10

# 以下四个列表是为了可视化(暂未实现)
losses = []
acces = []
eval_losses = []
eval_acces = []

for echo in range(num_epochs):
    train_loss = 0  # 定义训练损失
    train_acc = 0  # 定义训练准确度
    model.train()  # 将网络转化为训练模式
    print("startTime = ", util.getTime())
    for i, (X, label) in enumerate(train_loader):  # 使用枚举函数遍历train_loader
        # X = X.view(-1,784)       #X:[64,1,28,28] -> [64,784]将X向量展平
        if device == "cpu":
            X = Variable(X)  # 包装tensor用于自动求梯度
            label = Variable(label)
        else:
            X = Variable(X).cuda()  # 包装tensor用于自动求梯度
            label = Variable(label).cuda()
        out = model(X)  # 正向传播
        lossvalue = loss(out, label)  # 求损失值
        optimizer.zero_grad()  # 优化器梯度归零
        lossvalue.backward()  # 反向转播,刷新梯度值
        optimizer.step()  # 优化器运行一步,注意optimizer搜集的是model的参数

        # 计算损失
    def get_featurevector(self, index, count, pts = None):
        if pts == None:
            pts = self.processor.pts3d_bound

        #print 'i',index,'c', count
        fv = [] 
        
        indices = np.asarray(self.kdtree_queried_indices[count])
        invalid_value = np.shape(pts)[1]
        #print indices
        #print 'iv',invalid_value
        indices = indices[indices != invalid_value]
        
        #print ut.getTime(), indices
        #print ut.getTime(), 'number of pts', len(indices)
        a = pts[:,indices]
        view = processor.rotate_to_plane(self.processor.scan_dataset.ground_plane_normal, np.matrix([-1,0,0.]).T)
        normal, eigenvalues = gaussian_curvature.gaussian_curvature(a,view)
        #eigenvalues = eigenvalues / np.square(r)
        #fv += [normal[0,0],0,normal[2,0]]
        #fv += normal.T.A[0].tolist()
        #fv += eigenvalues.tolist()
        #print np.asarray(pts[:,index].T[0])[0]
       # print 'pt',np.asarray(pts[:,index].T[0])
        point = pts[:,index]
        
        ev1, ev2 = self.get_voi_histogram_spread(point)
        #z_max_height_diff = pts[2,index] - self.get_voi_maxcount_height()
        #fv += [self.get_voi_histogram_value(point),z_max_height_diff,normal[0,0],normal[1,0],normal[2,0], ev1, ev2]
        fv += [self.get_voi_histogram_value(point),normal[0,0],normal[1,0],normal[2,0], ev1, ev2]
        
        h = self.imNP_h[self.processor.map2d[1,index],self.processor.map2d[0,index]]
        s = self.imNP_s[self.processor.map2d[1,index],self.processor.map2d[0,index]]
        i = self.processor.intensities_bound[index]
        hsi = self.get_voi_hsi_histogram_values(point,h,s,i)
        fv += [hsi[0],hsi[1],hsi[2]]
        #print np.shape(self.imNP_tex1)
        #print np.shape(self.map2d)
        tex1 = self.imNP_tex1[self.processor.map2d[1,index],self.processor.map2d[0,index]]
        tex2 = self.imNP_tex2[self.processor.map2d[1,index],self.processor.map2d[0,index]]
        fv += [tex1, tex2]
        #print tex1, tex2
        

        #color histograms:
        colors_h = []
        colors_s = []
        colors_v = []
        for idx in indices:
            colors_h.append(float(self.imNP_h[self.processor.map2d[1,idx],self.processor.map2d[0,idx]]))
            colors_s.append(float(self.imNP_s[self.processor.map2d[1,idx],self.processor.map2d[0,idx]]))
            colors_v.append(float(self.imNP_v[self.processor.map2d[1,idx],self.processor.map2d[0,idx]]))
        
        color_hist = stats.histogram2(np.array(colors_h), [0,51,102,153,204])
        color_hist = color_hist / float(np.sum(color_hist))
        color_hist = list(color_hist)
        fv += color_hist
        color_hist = stats.histogram2(np.array(colors_s), [0,51,102,153,204])
        color_hist = color_hist / float(np.sum(color_hist))
        color_hist = list(color_hist)
        fv += color_hist
        color_hist = stats.histogram2(np.array(colors_v), [0,51,102,153,204])
        color_hist = color_hist / float(np.sum(color_hist))
        color_hist = list(color_hist)
        fv += color_hist
        
        #intensities
        intensities = self.processor.intensities_bound[indices]
        intensities = np.asarray(intensities)
        #map to 0-255-range:   TODO: perhaps do some nonlinear transformation here? 
        intensities = intensities / 10000 * 255
        intensity_hist = stats.histogram2(intensities, [0,51,102,153,204])
        intensity_hist = intensity_hist / float(np.sum(intensity_hist))
        intensity_hist = list(intensity_hist)
        fv += intensity_hist    
    
        #current colors:
        fv += [float(self.imNP_h[self.processor.map2d[1,index],self.processor.map2d[0,index]]) / 255.0]
        fv += [float(self.imNP_s[self.processor.map2d[1,index],self.processor.map2d[0,index]]) / 255.0]
        fv += [float(self.imNP_v[self.processor.map2d[1,index],self.processor.map2d[0,index]]) / 255.0]  
        
        #current intensity value (scaled)
        intensity = self.processor.intensities_bound[index]
        #scale:
        intensity = intensity / 15000.0
        intensity = [intensity]
        fv += intensity  

        
        if self.debug_before_first_featurevector == True:
            self.debug_before_first_featurevector = False
            print ut.getTime(), 'get_featurevector: Choosing not to print Feature Vector Sample'
            #print ut.getTime(), 'feature vector sample(gaussian histograms):', fv
        return fv
Beispiel #12
0
top_id=['s_122', 'f_0062', 'c', 'k']
entainment_id=['f_0055', 's_112', 'k_5']
military_id=['f_0052', 's_122101', 'c_3']
society_id=['s_122102', 'f_0062-0003', 'c_4', 'k_3']
economy_id=['s_122104', 'f_0062-0005', 'k_6']
technogy_id=['s_122106', 'f_0062-0008']
inner_id=['s_122204', 'f_0062-0000', 'c_1', 'k_1']
internation_id=['s_122205', 'f_0062-0001', 'c_2', 'k_2']
sports_id=['s_16', 'f_0062-0006', 'k_4']
taiwan_id=['f_0062-0002', 'f_0052-0001']
property_id=['f_0062-0009']
category = [('top', top_id, 1), ('inner', inner_id, 3), ('military', military_id, 4), ('entainment', entainment_id, 5), ('internation', internation_id, 6),\
            ('sports', sports_id, 7), ('economy', economy_id, 8), ('society', society_id, 10)]
# category = [('top', top_id, 1)]
##category = [('sports', sports_id, 7)]
now, bef24, bef72 = util.getTime()
# now = '2014-07-30 21:47:20'
# bef24 = '2014-07-29 21:47:20'
# bef72 = '2014-07-27 21:47:20'

def getRes(news_category_id, category_id, category_name):
    news_corpus = sql.getNews(news_category_id, now)
    sounds_corpus = sql.getSound(category_id, bef72, now)
    news, sounds = pre.pre_all(news_corpus, sounds_corpus, now)
    
    index = -1
    for i, item in enumerate(sounds):
        if item[2] < bef24:
            index = i
            break
Beispiel #13
0
    def create_train_datastructures(self):
        #loop through all marked datasets
        self.processor.scan_dataset = self.processor.scans_database.get_dataset(0)
          
        training_set_size = 0
        
        data = []
        #get size of training set in total
        while False != self.processor.scan_dataset:
            if self.processor.scan_dataset.is_training_set:
                
                filename = self.processor.get_features_filename(True)
                print 'loading', filename
                dict = ut.load_pickle(filename)

                # make an equal size of points for each class: use object labels more often:
                difference = np.sum(dict['labels'] == processor.LABEL_SURFACE) - np.sum(dict['labels'] == processor.LABEL_CLUTTER)
                #print ut.getTime(), filename
                #print ut.getTime(), 'surface',np.sum(dict['labels'] == LABEL_SURFACE)
                #print ut.getTime(), 'clutter',np.sum(dict['labels'] == LABEL_CLUTTER)
                #print ut.getTime(), difference, "difference = np.sum(dict['labels'] == LABEL_SURFACE) - np.sum(dict['labels'] == LABEL_CLUTTER)"
                #print ut.getTime(), ''
                if difference > 0:
                    clutter_features = (dict['features'])[np.nonzero(dict['labels'] == processor.LABEL_CLUTTER)]
                    if len(clutter_features) > 0: #if there are none, do nothin'
                        dict['set_size'] += difference
                        dict['features'] = np.vstack((dict['features'], clutter_features[np.random.randint(0,len(clutter_features),size=difference)]))
                        dict['labels'] = np.hstack((dict['labels'], np.ones(difference) * processor.LABEL_CLUTTER))
                elif difference < 0: 
                    surface_features = (dict['features'])[np.nonzero(dict['labels'] == processor.LABEL_SURFACE)]
                    if len(surface_features) > 0: #if there are none, do nothin'
                        difference = -difference
                        dict['set_size'] += difference
                        dict['features'] = np.vstack((dict['features'], surface_features[np.random.randint(0,len(surface_features),size=difference)]))
                        dict['labels'] = np.hstack((dict['labels'], np.ones(difference) * processor.LABEL_SURFACE))
                    
                training_set_size += dict['set_size']
                data.append(dict)
            #get next one
            self.processor.scan_dataset = self.processor.scans_database.get_next_dataset()
            #print ut.getTime(),  self.scan_dataset
        
        #create training set:
        self.processor.scan_dataset = self.processor.scans_database.get_dataset(0)
        current_training_set_index = 0
        
       
        feature_vector_length = len(self.processor.features.get_indexvector(self.features))
        print ut.getTime(), feature_vector_length
        #create dataset matrices:
        print ut.getTime(), '#training set size ', training_set_size 
        
        #deactivate for now:
        max_traning_size = 1800000#2040000
        #if training_set_size < max_traning_size:
        #if True:       
        train_data = cv.cvCreateMat(training_set_size,feature_vector_length,cv.CV_32FC1) #CvMat* cvCreateMat(int rows, int cols, int type)
        train_labels = cv.cvCreateMat(training_set_size,1,cv.CV_32FC1)
        
        for dict in data:        
            for index in range(dict['set_size']):
                #only train on surface and clutter
                if dict['labels'][index] == processor.LABEL_SURFACE or dict['labels'][index]== processor.LABEL_CLUTTER:
                
                    #print ut.getTime(), point3d
                    #print ut.getTime(), 'fvindexv',self.get_features_indexvector(features)
                    #print ut.getTime(), 'len', len(self.get_features_indexvector(features))
                    fv = (dict['features'][index])[self.processor.features.get_indexvector(self.features)]

                    #print ut.getTime(), 'fv',fv
                    #print ut.getTime(), np.shape(fv)
                    for fv_index, fv_value in enumerate(fv):
                        train_data[current_training_set_index][fv_index] = fv_value
                    train_labels[current_training_set_index] = dict['labels'][index]
#                    for fv_index, fv_value in enumerate(fv):
#                        print ut.getTime(), train_data[current_training_set_index][fv_index]
#                    print ut.getTime(), '##',train_labels[current_training_set_index],'##'                    
                    #print ut.getTime(), 'fv ', fv
                    #print ut.getTime(), 'tr ',train_data[index]
                    current_training_set_index = current_training_set_index + 1
        
                    #if current_training_set_index % 4096 == 0:
                    #    print ut.getTime(), 'label', dict['labels'][index], 'fv', fv        
                    if current_training_set_index %  16384 == 0:
                        print ut.getTime(), 'reading features:', current_training_set_index, 'of', training_set_size, '(',(float(current_training_set_index)/float(training_set_size)*100.0),'%)'
##subsample from the features, NOT USED/NOT WORKING?
#        else:
#            print ut.getTime(), 'more than',max_traning_size,'features, sample from them...'
#            #select 2040000 features:
#            all_data = []
#            all_labels = []
#            for dict in data:  
#                for index in range(dict['set_size']):
#                    if dict['labels'][index] == processor.LABEL_SURFACE or dict['labels'][index]== processor.LABEL_CLUTTER:
#                        fv = (dict['features'][index])[self.processor.features.get_indexvector(self.features)]
#                        all_data += [fv]
#                        all_labels += [dict['labels'][index]]
#                        
#                        current_training_set_index = current_training_set_index + 1    
#                        if current_training_set_index %  16384 == 0:
#                            print ut.getTime(), 'reading features:', current_training_set_index, 'of', training_set_size, '(',(float(current_training_set_index)/float(training_set_size)*100.0),'%)'
#            
#            del data
#            indices = np.array(random.sample(xrange(len(all_labels)),max_traning_size))
#            all_data = np.asarray(all_data)
#            all_labels = np.asarray(all_labels)
#            
#            all_data = all_data[indices]
#            all_labels = all_labels[indices]
#            
#            train_data = cv.cvCreateMat(max_traning_size,feature_vector_length,cv.CV_32FC1) #CvMat* cvCreateMat(int rows, int cols, int type)
#            train_labels = cv.cvCreateMat(max_traning_size,1,cv.CV_32FC1)
#                        
#            for index in range(max_traning_size):
#                for fv_index, fv_value in enumerate(all_data[index]):
#                    train_data[index][fv_index] = fv_value
#                    train_labels[index] = all_labels[index]
#                if index % 16384 == 0:
#                    print ut.getTime(), 'setting features:', (float(index)/float(max_traning_size))
#          
          
        print ut.getTime(), 'start training Classifier'

        type_mask = cv.cvCreateMat(1, feature_vector_length+1, cv.CV_8UC1)
        cv.cvSet( type_mask, cv.CV_VAR_NUMERICAL, 0)
        type_mask[feature_vector_length] = cv.CV_VAR_CATEGORICAL
        
        return (train_data, train_labels, type_mask)
Beispiel #14
0
 def load(self):
     self.cv_classifier = cv.CvDTree() #cv.CvBoost() #TODO: CHANGE CLASSIFIER HERE
     print ut.getTime(), 'loading Classifier',self.features
     self.cv_classifier.load(self.get_filename())
Beispiel #15
0
def setupAll():
    try:
        run(trucks, [])
        run(compute, [])
        run(stops,[])
        notify("complete setup succeeded!")
    except:
        print traceback.format_exc()
        notify("complete setup failed...")


##
# deletes the database and cleans up the collections
def dataPurge(db):
    client = MongoClient()
    client.drop_database(db)

if __name__ == '__main__':
    dataPurge(db)
    setupAll()

    if len(sys.argv) == 2:
        if sys.argv[1] == "all":
            getTime(setupAll, "Ran complete setup")
        if sys.argv[1] == "trucks":
            run(trucks, [])
        if sys.argv[1] == "stops":
            run(stops, [])
        if sys.argv[1] == "compute":
            run(compute, [])
Beispiel #16
0
def run():
    """execute the TraCI control loop"""
    traci.init(PORT)
    step = 0
    timeCount = 0
    rushHour = 1
    # we start with phase 2 where EW has green
    traci.trafficlights.setPhase("0", 2)
    evaluator = learningAgents.LearningAgent()

    #opening an output file.
    fileName = "Output.txt"
    outFile = open(fileName, 'w')
    """
    Lane IDs:
    West to East: 1i_0
    East to West: 2i_0
    South to North: 3i_0
    North to South: 4i_0
    """
    laneWE = 0
    laneEW = 0
    laneSN = 0
    laneNS = 0

    #phaseTimer # = 0 # Counts how many time steps have occured since the last phase change
    while traci.simulation.getMinExpectedNumber() > 0:
        traci.simulationStep()
        step += 1
        timeCount += 1
        # Functions from http://sumo.dlr.de/daily/pydoc/traci.html

        state = basics.TrafficState(traci)

        #newPhase = learningAgents.chooseActionReflex(state, 0)
        #traci.trafficlights.setPhase("0", newPhase)
        if step >= 32:
            step = 0
            print "Stepped"
            if evaluator.switchPhase(state, timeCount, 0):
                # we are not already switching
                #if traci.inductionloop.getLastStepVehicleNumber("0") > 0:
                if traci.trafficlights.getPhase("0") == 2:
                    # there is a vehicle from the north, switch
                    traci.trafficlights.setPhase("0", 3)
                else:
                    # otherwise try to keep green for EW
                    traci.trafficlights.setPhase("0", 2)

        time = util.getTime(timeCount)

        if time[1] == 7 and time[2] == 0 and time[3] == 0:
            rushHour = 1

        if time[1] >= 7 and time[1] < 9:
            laneWE = ((laneWE * (rushHour - 1)) +
                      traci.lane.getLastStepMeanSpeed('1i_0')) / rushHour
            laneEW = ((laneEW * (rushHour - 1)) +
                      traci.lane.getLastStepMeanSpeed('2i_0')) / rushHour
            laneSN = ((laneSN * (rushHour - 1)) +
                      traci.lane.getLastStepMeanSpeed('3i_0')) / rushHour
            laneNS = ((laneNS * (rushHour - 1)) +
                      traci.lane.getLastStepMeanSpeed('4i_0')) / rushHour

        if time[1] == 9 and time[2] == 0 and time[3] == 0:
            outFile.write(
                str(
                    util.stringWaitTimes(timeCount, laneWE, laneEW, laneSN,
                                         laneNS)))
            outFile.write('\n')
            laneWE = 0
            laneEW = 0
            laneSN = 0
            laneNS = 0

        if time[1] == 17 and time[2] == 0 and time[3] == 0:
            rushHour = 1

        if time[1] >= 17 and time[1] < 19:
            laneWE = ((laneWE * (rushHour - 1)) +
                      traci.lane.getLastStepMeanSpeed('1i_0')) / rushHour
            laneEW = ((laneEW * (rushHour - 1)) +
                      traci.lane.getLastStepMeanSpeed('2i_0')) / rushHour
            laneSN = ((laneSN * (rushHour - 1)) +
                      traci.lane.getLastStepMeanSpeed('3i_0')) / rushHour
            laneNS = ((laneNS * (rushHour - 1)) +
                      traci.lane.getLastStepMeanSpeed('4i_0')) / rushHour

        if time[1] == 19 and time[2] == 0 and time[3] == 0:
            outFile.write(
                str(
                    util.stringWaitTimes(timeCount, laneWE, laneEW, laneSN,
                                         laneNS)))
            outFile.write('\n')
            laneWE = 0
            laneEW = 0
            laneSN = 0
            laneNS = 0
        """
        #getting a wait time string
        if timeCount%60 == 0:
            #print util.getTime(timeCount)
            #print laneWE
            #print laneEW
            #print laneSN
            #print laneNS
            
            #outFile.write(util.stringWaitTimes(timeCount, laneWE, laneEW, laneSN, laneNS))
            
        """
        """
        if traci.trafficlights.getPhase("0") == 2:
            # we are not already switching
            #if traci.inductionloop.getLastStepVehicleNumber("0") > 0:
            if learningAgents.switchPhaseReflex(state, 0):
                # there is a vehicle from the north, switch
                traci.trafficlights.setPhase("0", 3)
            else:
                # otherwise try to keep green for EW
                traci.trafficlights.setPhase("0", 2)
        """
        rushHour += 1
        step += 1
    outFile.close()
    traci.close()
    sys.stdout.flush()
Beispiel #17
0
def generate_routefile():
    random.seed(42)  # make tests reproducible
    #N = 86400  #One Day
    #N = 172800 #Two Days
    N = 691200  #Eight Days
    # number of time steps, one tick is one second.

    #break points will be set at rush hour, end of day 1, start of day 8, rush hour day

    trafficBase = .08

    # demand per second from different directions
    pWE = trafficBase  #travel from a suburb into a city
    pEW = trafficBase  #travel from a city into a suburb
    pNS = trafficBase * .7  #Uniform NS traffic the entire time
    pSN = trafficBase * .7  #Uniform SN traffic the entire time

    time = 0

    with open("data/cross.rou.xml", "w") as routes:
        print >> routes, """<routes>
        <vType id="typeWE" accel="0.8" decel="4.5" sigma="0.5" length="5" minGap="2.5" maxSpeed="16.67" guiShape="passenger"/>
        <vType id="typeNS" accel="0.8" decel="4.5" sigma="0.5" length="5" minGap="2.5" maxSpeed="16.67" guiShape="passenger"/>

        <route id="right" edges="51o 1i 2o 52i" />
        <route id="left" edges="52o 2i 1o 51i" />
        <route id="down" edges="54o 4i 3o 53i" />
        <route id="up" edges="53o 3i 4o 54i" />"""
        lastVeh = 0
        vehNr = 0
        for i in range(N):
            if random.uniform(0, 1) < pWE:
                print >> routes, '    <vehicle id="right_%i" type="typeWE" route="right" depart="%i" />' % (
                    vehNr, i)
                vehNr += 1
                lastVeh = i
            if random.uniform(0, 1) < pEW:
                print >> routes, '    <vehicle id="left_%i" type="typeWE" route="left" depart="%i" />' % (
                    vehNr, i)
                vehNr += 1
                lastVeh = i
            if random.uniform(0, 1) < pNS:
                print >> routes, '    <vehicle id="down_%i" type="typeNS" route="down" depart="%i" />' % (
                    vehNr, i)
                vehNr += 1
                lastVeh = i
            if random.uniform(0, 1) < pSN:
                print >> routes, '    <vehicle id="up_%i" type="typeNS" route="up" depart="%i" />' % (
                    vehNr, i)
                vehNr += 1
                lastVeh = i

            time = util.getTime(i)

            #Morning rush hour defined as starting around 7am and continuing until 9am, peaking at 8am, gradual increase and decrease.
            #Set to alter the value every minute
            if time[1] < 9 & time[1] > 7 & time[3] == 0:
                if time[1] < 8:
                    pWE += .005
                else:
                    pWE -= .005

            #Afternoon rush hour defined as starting at 5pm and going until 7pm, sharp increase with slow taper.
            #Set to alter the value every minute
            if time[1] > 17 & time[1] < 19 & time[3] == 0:
                if time[1] == 5 & time[2] < 15:
                    pEW += .02
                else:
                    pEW -= .00125

            #Resetting the traffic values to prevent weird things happening with floats.
            if time[1] == 0 & time[2] == 0 & time[3] == 0:
                pWE = trafficBase
                pEW = trafficBase
            #leave north to south / south to north traffic constant for the whole experiment.

        print >> routes, "</routes>"
    def prepare(self, features_k_nearest_neighbors, nonzero_indices = None, all_save_load = False, regenerate_neightborhood_indices = False):
        #print np.shape(self.processor.pts3d_bound), 'shape pts3d_bound'

        imgTmp = cv.cvCloneImage(self.processor.img)
        self.imNP = ut.cv2np(imgTmp,format='BGR')
        ###self.processor.map2d = np.asarray(self.processor.camPts_bound) #copied from laser to image mapping
        
        if features_k_nearest_neighbors == None or features_k_nearest_neighbors == False: #use range
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            
            #print len(nonzero_indices)
            #print np.shape(np.asarray((self.processor.pts3d_bound.T)[nonzero_indices]))
            
            if nonzero_indices != None:
                print ut.getTime(), 'query ball tree for ', len(nonzero_indices), 'points'
                kdtree_query = kdtree.KDTree((self.processor.pts3d_bound.T)[nonzero_indices])
            else:
                print ut.getTime(), 'query ball tree'
                kdtree_query = kdtree.KDTree(self.processor.pts3d_bound.T)
            
            filename = self.processor.config.path+'/data/'+self.processor.scan_dataset.id+'_sphere_neighborhood_indices_'+str(self.processor.feature_radius)+'.pkl'
            if all_save_load == True and os.path.exists(filename) and regenerate_neightborhood_indices == False:
                #if its already there, load it:
                print ut.getTime(), 'loading',filename
                self.kdtree_queried_indices = ut.load_pickle(filename)    
            else:
                self.kdtree_queried_indices = kdtree_query.query_ball_tree(self.kdtree2d, self.processor.feature_radius, 2.0, 0.2) #approximate
                print ut.getTime(), 'queried kdtree: ',len(self.kdtree_queried_indices),'points, radius:',self.processor.feature_radius
                if all_save_load == True:
                    ut.save_pickle(self.kdtree_queried_indices, filename)
                    
            #make dict out of list for faster operations? (doesn't seem to change speed significantly):
            #self.kdtree_queried_indices = dict(zip(xrange(len(self.kdtree_queried_indices)), self.kdtree_queried_indices))
        
        else: #experiemental: use_20_nearest_neighbors == True
            #TODO: exclude invalid values in get_featurevector (uncomment code there)
           
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            self.kdtree_queried_indices = []
            print ut.getTime(), 'kdtree single queries for kNN start, k=', features_k_nearest_neighbors
            count = 0
            for point in ((self.processor.pts3d_bound.T)[nonzero_indices]):
                count = count + 1
                result = self.kdtree2d.query(point, features_k_nearest_neighbors,0.2,2,self.processor.feature_radius)
                #existing = result[0][0] != np.Inf
                #print existing
                #print result[1]
                self.kdtree_queried_indices += [result[1]] #[existing]
                if count % 4096 == 0:
                    print ut.getTime(),count
            print ut.getTime(), 'kdtree singe queries end'
            
            #convert to numpy array -> faster access
            self.kdtree_queried_indices = np.asarray(self.kdtree_queried_indices)
        
        #print self.kdtree_queried_indices
        #takes long to compute:
        #avg_len = 0
        #minlen = 999999
        #maxlen = 0
        #for x in self.kdtree_queried_indices:
        #    avg_len += len(x)
        #    minlen = min(minlen, len(x))
        #    maxlen = max(maxlen, len(x))
        #avg_len = avg_len / len(self.kdtree_queried_indices)
        #print ut.getTime(), "range neighbors: avg_len", avg_len, 'minlen', minlen, 'maxlen', maxlen
        
        
        #create HSV numpy images:
        # compute the hsv version of the image 
        image_size = cv.cvGetSize(self.processor.img)
        img_h = cv.cvCreateImage (image_size, 8, 1)
        img_s = cv.cvCreateImage (image_size, 8, 1)
        img_v = cv.cvCreateImage (image_size, 8, 1)
        img_hsv = cv.cvCreateImage (image_size, 8, 3)
        
        cv.cvCvtColor (self.processor.img, img_hsv, cv.CV_BGR2HSV)
        
        cv.cvSplit (img_hsv, img_h, img_s, img_v, None)
        self.imNP_h = ut.cv2np(img_h)
        self.imNP_s = ut.cv2np(img_s)
        self.imNP_v = ut.cv2np(img_v)
        
        textures = texture_features.eigen_texture(self.processor.img)
        self.imNP_tex1 = textures[:,:,0]
        self.imNP_tex2 = textures[:,:,1]
        
        self.debug_before_first_featurevector = True
        
        self.generate_voi_histogram(self.processor.point_of_interest,self.processor.voi_width)
Beispiel #19
0
 def exit(self):
   print "[%s][%s] Client shutting down!" % (getTime(), self.client_name)
   sys.exit()