Esempio n. 1
0
def spatial_reduction(image_name, neighbourhood):
    pix = mu.get_image_as_cv2(image_name)
    pix = pix.astype(np.uint16)
    row, col = neighbourhood

    h, w, c = pix.shape
    print(pix.shape)

    h2 = int(np.floor(h / row))
    w2 = int(np.floor(w / col))

    new_img = np.zeros((h2, w2, 3))
    print(new_img.shape)
    for i in range(0, w, col):
        for j in range(0, h, row):
            for k in range(0, c):
                c1 = j
                c2 = i
                c3 = min(j + col + 1, h)
                c4 = min(i + row + 1, w)

                temp = pix[c1:c3, c2:c4, k]

                j2 = min(int(np.floor(j / row)), new_img.shape[0] - 1)
                i2 = min(int(np.floor(i / col)), new_img.shape[1] - 1)

                new_img[j2, i2, k] = int(np.floor(np.mean(temp)))

    dest_name = mu.save_img(new_img, image_name,
                            "_space" + str(neighbourhood).replace(',', '-'))
    print(neighbourhood, pix.shape, new_img.shape, mu.get_file_size(dest_name),
          mu.get_file_size(image_name))
Esempio n. 2
0
def TestRead(order, episode_len=6):

    if order == "omniglot":
        if not os.path.exists("train.npz") or not os.path.exists("test.npz"):
            MyUtils.read_omniglot()
        input_path = "train.npz"
        valid_path = "test.npz"

    else:
        raise NotImplementedError

    if order == "omniglot":
        input_size = (episode_len, 28, 28, 1)
    else:
        raise NotImplementedError

    with open(input_path, "rb") as f:
        input_npz = np.load(f)
        inputs = {}
        print('train', np.shape(input_npz.files))
        for filename in input_npz.files:
            inputs[filename] = input_npz[filename]

    with open(valid_path, "rb") as f:
        valid_npz = np.load(f)
        valid_inputs = {}
        print('test', np.shape(valid_npz.files))
        for filename in valid_npz.files:
            valid_inputs[filename] = valid_npz[
                filename]  # filename is the class label ,each class has 20 samples
 def label(self, task_id, img_file_list, boundingbox):
     LOG.debug('label function called! labeling following files:')
     LOG.debug(img_file_list)
     cache_manager = cachemanagement.cacheManager()
     cur_dir =os.path.dirname(os.path.realpath(__file__))
     # task_id is needed for separating labeling multiple objects
     cache_dir = os.path.dirname(os.path.realpath(__file__)) + '/tmp/s3/'
     cache_dir, img_file_list = cache_manager.cacheImageFromFileList(img_file_list, cache_dir)
     img_file_list = map(lambda x: cache_dir+'/'+x, img_file_list)
     input_img_file = cache_dir+'/'+ str(task_id) +'labeler_input_img_list.txt'
     MyUtils.writeListToFile(img_file_list, input_img_file)
     output_bx_file = cache_dir +'/'+ str(task_id) + 'labeler_output_bx_list.txt'
     cmd = cur_dir+'/TrackingApp '
     cmd += str(input_img_file) + ' '
     cmd += str(output_bx_file) + ' '
     for bx_vec in boundingbox:
         cmd += str(bx_vec) + ' '
     cmd += 'BOOSTING'
     # call tracker
     os.system(cmd)
     # get input
     output_bxes = []
     with open(output_bx_file, 'r') as bx_file:
         bxes = bx_file.read().splitlines()
         for bx in bxes:
             LOG.debug(bx)
             output_bx = ast.literal_eval(bx)
             output_bxes.append(output_bx)
     return output_bxes
Esempio n. 4
0
def main():
    global klistener
    global mlistener
    MyUtils.create_file("EviacamTest/")

    while not done:
        continue
    sys.exit()
Esempio n. 5
0
def on_click(x, y, button, pressed):
    if pressed:
        if str(button) == "Button.left":
            MyUtils.time_convert(
                time.time() - (start_time if start_time > 0 else on_time),
                "Left Click")
        else:
            MyUtils.time_convert(
                time.time() - (start_time if start_time > 0 else on_time),
                "Right Click")
Esempio n. 6
0
def main():
    global klistener
    global done
    MyUtils.create_file("LeapMotionTest/")
    listener = LeapMotionListener()
    controller = Leap.Controller()

    controller.add_listener(listener)

    while not done:
        continue
    controller.remove_listener(listener)
    def label_old(self, task_id, img_file_list, boundingbox):
        LOG.debug('camshift labeler label function called! labeling following files:')
        LOG.debug(img_file_list)
        cache_manager = cachemanagement.cacheManager()
        cache_dir = os.path.dirname(os.path.realpath(__file__)) + '/tmp/s3'
        cache_dir, img_file_list = cache_manager.cacheImageFromFileList(img_file_list, cache_dir)
        img_file_list = map(lambda x: cache_dir+'/'+x, img_file_list)
        MyUtils.writeListToFile(img_file_list, cache_dir+'/'+'labeler_input_img_list.txt')
        initialized = False
        result_bounding_boxes=[boundingbox]
        for each_img_file in img_file_list:
            LOG.debug('read in image file' + each_img_file)
            self.frame = cv2.imread(each_img_file)
            vis = self.frame.copy()
            hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))

            if not initialized:
                # set up initial labeled img
                x0, y0, width, height = boundingbox
                x1 = x0 + width
                y1 = y0 + height
                self.track_window = (x0, y0, width, height)
                hsv_roi = hsv[y0:y1, x0:x1]
                mask_roi = mask[y0:y1, x0:x1]
                hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
                cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
                self.hist = hist.reshape(-1)
                initialized = True
                continue
#                self.show_hist()

            prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
            prob &= mask
            term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
            track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
#            pdb.set_trace()
            # get rectangle 
            track_rect = cv2.boundingRect(cv2.boxPoints(track_box))


            # try:
            #     pt1 = (track_rect[0],track_rect[1])
            #     pt2 = (track_rect[0] + track_rect[2], track_rect[1] + track_rect[3])
            #     cv2.rectangle(vis, pt1, pt2, (0, 255, 0), 2)
            #     cv2.ellipse(vis, track_box, (0, 0, 255), 2)
            # except:

            print track_rect
            result_bounding_boxes.append(list(track_rect))
            
        return result_bounding_boxes
Esempio n. 8
0
def on_press(key):
    global start_time
    global spaceFlag
    global done
    if str(key) == "Key.space" and not spaceFlag:
        if start_time == 0:
            start_time = time.time()
            MyUtils.time_convert(0, "----------Test start!----------")
        else:
            MyUtils.time_convert(time.time() - start_time, "---------- Test end! ----------")
            spaceFlag = True
            MyUtils.f.close()
            done = True
Esempio n. 9
0
 def read_message(self, sc, addr):
     message = sc.recv(1024)
     split_message = MyUtils.MessageHandler(message).message_loads()
     if split_message[0] == 'need_page':
         pass
     elif split_message[0] == 'change_page':
         pass
     else:
         sc.send(
             MyUtils.MessageBuilder(
                 ['No exist function: ' + split_message[0]],
                 'error').get_message())
     sc.close()
Esempio n. 10
0
def humomentDetect(VideoName):
    myFrames = []
    result_path = "humomentDetectResult"
    MyUtils.makeDir(result_path)
    cap = cv2.VideoCapture(VideoName)  # 提取视频
    index = -1
    list_hu_dist = []
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret == False:
            break
        index += 1
        # if index < 600:
        #     continue
        if index >= 3563:
            break
        img = frame
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        moments = cv2.moments(img_gray)
        humoments = cv2.HuMoments(moments)
        humoments = np.log(np.abs(humoments))  # 7个不变矩

        temp_frame = Frame()
        temp_frame.hu = humoments
        temp_frame.index = index
        temp_frame.img = img.copy()

        myFrames.append(temp_frame)
        if len(myFrames) >= 2:
            last_Frame = myFrames[len(myFrames) - 2]
            tempdist = (last_Frame.hu[0] - temp_frame.hu[0])**2 + (
                last_Frame.hu[1] - temp_frame.hu[1])**2 + (last_Frame.hu[2] -
                                                           temp_frame.hu[2])**2
            last_Frame.hu_dist = tempdist
            print(tempdist)
            list_hu_dist.append(tempdist)

    max_value = max(list_hu_dist)
    print("max_value", max_value)
    threshold = 10
    num = 0
    for i in range(0, len(myFrames) - 1):
        if myFrames[i].hu_dist >= threshold:
            num += 1
            print(myFrames[i].index)
            cv2.imwrite(result_path + "/" + str(myFrames[i].index) + ".png",
                        myFrames[i].img)

    print(num)
Esempio n. 11
0
def check_guess(guess):
    with MySQL.UseDatabase(config) as cursor:
        cursor.execute("""SELECT * from dict where word_sn = %s;""", (guess, ))
        if len(cursor.fetchall()) == 0:
            return False
        else:
            return True
Esempio n. 12
0
def create_db_dict():
    with MySQL.UseDatabase(config) as cursor:
        cursor.execute("""drop table if exists dict;""")
        cursor.execute(
            """create table dict(dict_id INT NOT NULL PRIMARY KEY AUTO_INCREMENT, word_sn VARCHAR(24));"""
        )
        print("Table dict created!")
Esempio n. 13
0
    def _GetSectionItems(self, sectionname, parser):
        """ Get the list of items in a section. 
        Parameters: sectionname (string): The name of the section.
        sectionitems: The list of section items.
        Returns: A dictionary with the parameters for the section,
            or None if an error occured.
        """
        #Default values:
        configuration={'offactivated':'0','startwithoutinternet':'0','offtimeout':'360',
                             'startwithoutmouse':'0','startwithoutsound':'0','offteacher':'0','offwithoutlogin':'******',
                             'startwithoutmessages':'0', 'classroomname':MyUtils.classroomName(),'allowburstwol':'1'}
      

        # Now, go through all the remaining items in the section, 
        # and check for options
        try:
            itemlist = self._ConfigParser.items(sectionname)
        except:
            return configuration
        
        for i in itemlist:
            # First, check to see if it is a parameter already dealt with.
            if i[0] in self._OptionalItems:
                optionname = i[0]
                if self._translatedict.has_key(optionname):
                    optionname=self._translatedict[optionname]
                configuration[optionname]=str(i[1]).replace('"','')
  
        return configuration
Esempio n. 14
0
    def __init__(self, screen_w, screen_h, screen_x, screen_y, proc_width,
                 proc_height, mask_img, model, keys, keys_to_onehot,
                 onehot_names):
        self.screen_w = screen_w
        self.screen_h = screen_h
        self.screen_x = screen_x
        self.screen_y = screen_y
        self.proc_width = proc_width
        self.proc_height = proc_height
        self.keys = keys
        self.keys_to_onehot = keys_to_onehot
        self.onehot_names = onehot_names
        self.paused = False
        self.mask = cv2.imread(mask_img, cv2.IMREAD_GRAYSCALE)
        self.model = model
        if self.mask is None:
            print "Image mask not found"
            exit(1)
        elif self.mask.shape[1] != proc_width or self.mask.shape[
                0] != proc_height:
            print "Image mask has invalid size"
            exit(1)
        self.scc = MyUtils.ScreenCapture({
            'width': screen_w,
            'top': screen_y,
            'height': screen_h,
            'left': screen_x
        })

        cv2.namedWindow("input", cv2.WINDOW_NORMAL)
Esempio n. 15
0
def create_db_hiscore():
    with MySQL.UseDatabase(config) as cursor:
        cursor.execute("""drop table if exists hiscore;""")
        cursor.execute(
            """create table hiscore(hiscore_id INT NOT NULL PRIMARY KEY AUTO_INCREMENT, score_amt INT NOT NULL, player_sn varchar(16) NOT NULL);"""
        )
    print("Table hiscore created!")
Esempio n. 16
0
def hiscore_to_db():
    if check_score_list():
        with MySQL.UseDatabase(config) as cursor:
            for item in scorelist:
                cursor.execute(
                    """INSERT INTO hiscore(player_sn, score_amt) VALUES(%s, %s);""",
                    (item[0], item[1]))
    print("Scores inserted into database!")
Esempio n. 17
0
def spatial_reduction(image_name, neighbourhood):
    pix = mu.get_image_as_cv2(image_name)
    pix = pix.astype(np.uint16)
    row, col = neighbourhood
    row_half = int(np.floor(row / 2))
    col_half = int(np.floor(col / 2))

    h, w, c = pix.shape
    print(pix.shape)

    new_img = pix.copy()
    max0 = lambda t: max(t, 0)
    max0func = np.vectorize(max0)

    minw = lambda t: min(t, w)
    minwfunc = np.vectorize(minw)

    minh = lambda t: min(t, h)
    minhfunc = np.vectorize(minh)

    c1 = np.array(list(range(0, w))) - row_half
    c1 = max0func(c1)
    c2 = np.array(list(range(0, w))) + row_half
    c2 = minwfunc(c2)
    c3 = np.array(list(range(0, h))) - col_half
    c3 = max0func(c3)
    c4 = np.array(list(range(0, h))) + col_half
    c4 = minhfunc(c4)
    """sumbox = lambda t: np.sum(pix[t[0]:t[1]],pix[t[2]:t[3]])
    sumboxfunc = np.vectorize(sumbox)
    
    sumbox = lambda t: np.sum(pix[t[0]:t[1],t[2]:t[3],t[5]])
    sumboxfunc = np.vectorize(sumbox)"""

    new_img = pix.copy()
    for i in c1:
        for j in c3:
            for k in range(0, c):
                temp = pix[c3[j]:c4[j] + 1, c1[i]:c2[i] + 1, k]
                new_img[j, i, k] = np.sum(temp)

    divisor = (neighbourhood[0] * neighbourhood[1]) - 1
    new_img = (new_img - pix) / divisor
    dest_name = mu.save_img(new_img, image_name,
                            str(neighbourhood).replace(',', '-'))
    def train(self, task_id, training_set, testing_set=None, **kwargs):
        print 'vm trainer called!'
        
        # connect to database manager
        self.db_manager = MongoDBManager(HogSvmTrainer.dbname, ip_addr='128.2.213.107')
        self.storage_manager = StorageManager('test1')

        local_tmp_dir = os.path.abspath(HogSvmTrainer.local_tmp_dir)

        # download images to cache and get all bounding_boxes in memory:
        obj_image_bounding_box = \
            MyUtils.downloadImgWithBx(local_tmp_dir, self.db_manager, self.storage_manager, training_set)

        # create output dir structure
        output_dir_prefix = os.path.abspath('./classifier/hog_svm_')
        # create an output folder for each task
        if 'id' in kwargs:
            output_dir = output_dir_prefix + 'id'
        else:
            timestamp = time.strftime("%m_%d_%y_%H_%M_%S")
            output_dir = output_dir_prefix + timestamp
        MyUtils.create_dir(output_dir)

        # generate vm specific input files
        # for each object, create a folder for train.txt, test.txt, and final classifier svm
        obj_output_dirs = {}
        for each_obj, obj_info in obj_image_bounding_box.iteritems():
            train_obj_info, test_obj_info = self.split_train_file(obj_info, 10)
            cur_obj_dir = output_dir + '/' + each_obj
            obj_output_dirs[each_obj] = cur_obj_dir
            MyUtils.create_dir(cur_obj_dir)
            self.genInputFileForObj(each_obj, train_obj_info, cur_obj_dir+'/' + 'train_'+each_obj+'.xml')
            self.genInputFileForObj(each_obj, test_obj_info, cur_obj_dir+'/' + 'test_'+each_obj+'.xml')
            # split the training file randomly for generating testing file

        for each_obj, each_obj_dir in obj_output_dirs.iteritems():
            print 'calling dlib to train at folder {}'.format(each_obj_dir)
            detector_name = each_obj + '.svm'
            detector_path = each_obj_dir + '/' + detector_name
            accuracy = self.train_with_dlib(each_obj_dir, detector_path, 'train_'+each_obj +'.xml', 'test_'+each_obj+'.xml')

            # save the classifier into s3 and database
            accuracy = "Testing accuracy: {}".format(accuracy)

        MyUtils.zipdir(output_dir)
        zip_file = output_dir + '.zip'
        print 'uploading file to s3...'
        key = self.storage_manager.uploadFile(zip_file)
        print 'updating database...'

        # TODO: maybe send this information to controller?
        # add meta data info to database
        classifier_metadata={'name': key,
                             'training_set':training_set,
                             'testing_set':testing_set,
                             'accuracy': 'see log',
                             'filepath': key}
        self.db_manager.addClassifier(classifier_metadata)
        return 'succesfully added ' + str(classifier_metadata)
Esempio n. 19
0
 def start_track(self, frame, droi):
     self.selection=MyUtils.drectangle_to_tuple(droi)
     hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
     mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
     x0, y0, x1, y1 = self.selection
     self.track_window = (x0, y0, x1-x0, y1-y0)
     hsv_roi = hsv[y0:y1, x0:x1]
     mask_roi = mask[y0:y1, x0:x1]
     hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
     cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
     self.hist = hist.reshape(-1)
def test_extract_sameids():
    MyUtils.init_logging("ExploreQuestions.log")
    q_df = pd.read_csv(RQ.QA_TRAINSUBSET_DFPATH, sep="_")
    q_df_sorted = q_df.sort_values(by=["asin"],
                                   axis=0,
                                   ascending=True,
                                   inplace=False,
                                   kind='quicksort',
                                   na_position='last')

    q_df_asins = q_df_sorted["asin"].copy()
    q_df_asins.drop_duplicates(inplace=True)
    print(len(q_df_asins))

    for asin in q_df_asins:
        q_df_subset = q_df_sorted[q_df_sorted["asin"] == asin].copy()
        subset_duplicates = q_df_subset[q_df_subset.duplicated(
            'unixTime', keep=False) == True]
        if len(subset_duplicates) > 0:
            subset_duplicates.to_csv("ExploreQuestions.log", mode="a", sep="_")
Esempio n. 21
0
def print_sell_change(content):
    db_data = util.get_sell_history_data()
    content += '\n'
    for row in db_data:
        fund_code = row[2]
        # 根据基金码获取基金信息
        fund_dict = get_fund_value_json(fund_code)
        percentage = (float(fund_dict['dwjz']) - float(row[3])) / float(row[3])
        # print(fund_dict['dwjz'])
        content += '<' + fund_dict['name'] + '>截止到' + fund_dict[
            'jzrq'] + '基金涨跌幅为:' + '%.2f%%' % (percentage * 100)
        # print(content)
    return content
Esempio n. 22
0
 def notify_status_to_server(self):
     if self.identifier == "":
         message = MyUtils.MessageBuilder([self.my_port_to_listen],
                                          'new_client').get_message()
     else:
         message = MyUtils.MessageBuilder(
             [self.my_port_to_listen, self.identifier],
             'new_client').get_message()
     print("Sending state to server: " + message.decode())
     self.socket_to_server.send(message)
     result = self.socket_to_server.recv(1024)
     split_message = MyUtils.MessageHandler(result).message_loads()
     print(result.decode())
     if split_message[0] != 'error':
         self.identifier = split_message[0]
         i = 1
         while i < len(split_message):
             self.add_page_number(split_message[i])
             i = i + 1
     else:
         print("error: " + split_message[1])
         exit()
Esempio n. 23
0
 def start(self, interval, normalized_keys):
     self.scc = MyUtils.ScreenCapture({
         'width': self.screen_w,
         'top': self.screen_y,
         'height': self.screen_h,
         'left': self.screen_x
     })
     self.interval = interval
     self.last_time = time.time()
     self.normalized_keys = normalized_keys
     self.trainData = []
     self.loadRawData()
     self.paused = False
Esempio n. 24
0
def dict_to_db():
    if check_dict_lists():
        with MySQL.UseDatabase(config) as cursor:
            #All short words of guesslist
            for guess in guesslist:
                if len(guess) < 7:
                    cursor.execute("""INSERT INTO dict(word_sn) VALUES(%s);""",
                                   (guess, ))
            #Any other words in sourcelist (filtered for unwinnable)
            for source in sourcelist:
                cursor.execute("""INSERT INTO dict(word_sn) VALUES(%s);""",
                               (source, ))
    print("Dictionaries inserted into database!")
Esempio n. 25
0
def getdevice(device, position=None, brand=None):
    url = apiUrl + 'getdevice'
    postdata = {
        'brand': brand,
        'device': device,
        'position': position,
        'token': apiKey
    }
    headers = {'content-type': 'application/json'}
    result = MyUtils.sendpostdata(url, postdata, headers)
    try:
        return result.json()
    except AttributeError:
        return result
Esempio n. 26
0
 def run(self):
     with open(self.path2, 'w') as f:
         f.write(
             "# ################################## Sub2 ############################################### #\n"
         )
         for k, v in self.json_conf.items():
             MyUtils.DebugInfo(GeneratePyFileByJson.getType(v),
                               "getFileType", 0)
             if GeneratePyFileByJson.getType(v) in ("integer", "list"):
                 f.write(str(k) + ' = ' + str(v) + '\n')
             elif GeneratePyFileByJson.getType(v) == "String":
                 f.write(str(k) + ' = \"' + str(v) + '\"\n')
             else:
                 f.write(str(k) + ' = \"' + str(v) + '\"\n')
         f.write("\n")
 def __init__(self, dbname, debug=False, ip_addr=None):
     # constants
     self.IMG_COLL_NAME = 'image'
     self.CLASSIFIER_COLL_NAME = 'classifier'
     #vars
     self.client=None
     if ip_addr != None:
         self.client = MongoClient(ip_addr)
     else:
         self.client = MongoClient()
         
     self.db = self.client[dbname]
     self.img_coll = self.db[self.IMG_COLL_NAME]
     self.createIndexes()
     self.logger         = MyUtils.getLogger('mongdbmanager')        
     if debug:
         pdb.set_trace()
Esempio n. 28
0
def readOmdCurpus(fileName):
    path = './OMD/' + fileName + '.csv'
    inFile = []
    with open(path, 'r', encoding='utf-8') as csvfile:
        csvReader = csv.reader(csvfile)
        for row in csvReader:
            inFile.append(row[0:2])

    tokenziedInput = [
        inp[0:1] + mu.getTokenizedSentenceForDrKeshavarz(inp[1])
        for inp in inFile
    ]
    with open(fileName + '_lemmatized_' + '.csv', 'w',
              encoding='utf-8') as csvfile:
        spamwriter = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
        for row in tokenziedInput:
            spamwriter.writerow(row[0:1] + [' '.join(row[1:])])
    def __init__(self):
        # vm registries
        self.label_vm   = []
        self.feature_vm = []
        self.classify_vm = []
        self.logger         = MyUtils.getLogger('controller')
        
        #file manager
        self.data_manager = StorageManager('test1')
        self.db_manager = DBManager()
        
        #engines

        # rpc server for now for handling client request
        rpc_paths = ('/RPC2',)
        # Create server
        self.server = SimpleXMLRPCServer(("0.0.0.0", 8888))
        self.server.register_introspection_functions()
        self.server.register_function(self.addImage, 'addImage')
        self.server.serve_forever()
Esempio n. 30
0
def check_plan():
    sql = 'select name,plan_url,detail_url from fund_plan'
    db_data = util.get_all_db_data(sql)
    content = ''
    count = 0
    for row in db_data:
        # name = row[0]
        plan_url = row[1]
        detail_url = row[2]
        _json = get_url_json(plan_url)
        last_trade_date_fmt = _json.get('data').get('last_trade_date_fmt')
        now_date_fmt = time.strftime('%Y-%m-%d', time.localtime(time.time()))
        # 发车日期等于今天
        if (last_trade_date_fmt == now_date_fmt):
            detail_json = get_url_json(detail_url)
            content = '<' + _json.get('data').get('plan_name') + '> 已发车,发车时间:' + get_current_time() +\
                '.发车方案:'
            for item in detail_json.get('data').get('items'):
                content += '<' + item.get('fd_name') + '>'
            send_dingding(count, content)
        count += 1
Esempio n. 31
0
def testing():
    testing = r'FmtMessageBox(_LS(IDS_SDSw_Cannot_Find_Text_Editot_Program), sGENEdit);'
    FuncConv = MyUtils.FunctionConvertor()
    MyUtils.ConvertLiteralString2TCHARString(testing)
Esempio n. 32
0
def main():
    nSizeArg = len(sys.argv)

    if nSizeArg < 2:
        print('''잘못된 인수입니다.
        "File Path" [extension] [/Temp]
        "File Path" : 해당 폴더 포함 하위의 모든 파일이 수정 됩니다.
        "extendsion" : 세미콜론으로 구분된 확장자가 들어갑니다. 파일을 필터링 할 때 사용됩니다.
        [/Temp] : /Temp 입력 시 파일에 파일에 _Temp 을 붙여 생성합니다. 값 입력이 없을 경우 덮어쓰기 합니다.
        [/NoneRecursive] : /NoneRecursive 입력 시 하위 폴더는 제외하고 해당 경로의 파일만 검색합니다.
        ex) "C:\\MIDAS\\wbs\\src" ".cpp;.h"''')

    src_path = sys.argv[1]
    extension = []
    bMakeTemp = "/Temp" in sys.argv
    bNotFindUnder = "/NoneRecursive" in sys.argv
    bFileListTXT = False
    if os.path.isfile(src_path):
        if os.path.splitext(src_path)[-1] == '.txt':
            bFileListTXT = True

    if nSizeArg > 2:
        extension = sys.argv[2].split(';')

    FileList = []
    if bFileListTXT == False:
        FileList = MyUtils.GetAllFileWithExt(src_path, extension,
                                             bNotFindUnder)
    else:
        FileList = MyUtils.GetFileListFromTextFile(src_path, extension)

    encoding_list = ['cp949', 'gb2312', 'utf-8', 'utf-8-sig', 'utf-16']
    ErrorFiles = []
    for FilePath in tqdm(FileList):
        file_encoding = ''
        lines = []
        for encoding in encoding_list:
            file_encoding = encoding
            try:
                file = open(FilePath, 'r', encoding=encoding)
                lines = file.readlines()
            except UnicodeDecodeError:
                continue
            break

        if len(lines) == 0:
            ErrorFiles.append(FilePath)
            continue

        new_lines = MyUtils.ConvertAll(lines)

        split_list = os.path.splitext(FilePath)
        file_out = file
        if bMakeTemp == True:
            file_out = open("".join(split_list[0:-1]) + '_Temp' +
                            split_list[-1],
                            'w',
                            encoding=file_encoding)
        else:
            file_out = open(FilePath, 'w', encoding=file_encoding)

        file_out.writelines(new_lines)
    for ErrorFile in ErrorFiles:
        print("Can't Encoding This File! Path : {}".format(ErrorFile))

    return True
Esempio n. 33
0
ax.format_coord = Formatter(im)
plt.show()



in_size = 32*32
#im_w = 32
#im_h = 32
#in_filter_w = 3
#in_filter_h = 3

#model.add(Convolution2D(32, 3, in_filter_h, in_filter_w, border_mode='full')) 
#im_w = im_w + in_filter_w - 1
#im_h = im_h + in_filter_h -1 

result_image = MyUtils.ImageToTheanoTensor('3wolfmoon.jpg')
two_samples = np.vstack((result_image,result_image))

nhid=100
model = Sequential()
model.add(Convolution2D(1, 2, 3, 3, border_mode='full'))
#model.add(Convolution2D(1, 1, 3, 3, border_mode='valid'))
#model.add(Convolution2D(3, 1, 3, 3, border_mode='full'))
model.add(Activation('sigmoid', target=0.05))
#model.add(Dense(nhid, 256))


(X_train, y_train), (X_test, y_test) = keras.datasets.cifar10.load_data(test_split=0.1, seed=113)


sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
Esempio n. 34
0
import MyUtils

IDs = [
    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
    21
]

for pt in IDs:

    #    subband = 'full'
    #    print ("pt= " + str(pt+1))
    #    MyUtils.TrainTest(pt, subband)

    subband = 'bandAlpha'
    print("pt= " + str(pt + 1))
    MyUtils.TrainTest(pt, subband)
    FileName_BestModel = 'BestModel_' + subband + '_excl' + str(pt) + '.h5'
    MyUtils.NewTest(FileName_BestModel, subband + 'Filtered', pt)

    subband = 'bandBeta'
    print("pt= " + str(pt + 1))
    MyUtils.TrainTest(pt, subband)
    FileName_BestModel = 'BestModel_' + subband + '_excl' + str(pt) + '.h5'
    MyUtils.NewTest(FileName_BestModel, subband + 'Filtered', pt)

    subband = 'bandDelta'
    print("pt= " + str(pt + 1))
    MyUtils.TrainTest(pt, subband)
    FileName_BestModel = 'BestModel_' + subband + '_excl' + str(pt) + '.h5'
    MyUtils.NewTest(FileName_BestModel, subband + 'Filtered', pt)
Esempio n. 35
0
# Removed 8,9 i.e. 7, 8
IDs = [0, 1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19, 21]

PATH = "C:/Elham/EEG_PatientIdentification/P1/P1_SHAM_TASK_AK_Saliency/from GPU_OLD/Models/"
subband = 'full'

ef1_S = np.zeros(22)
ef2_S = np.zeros(22)
ef3_S = np.zeros(22)

for pt in IDs:
    print("pt= " + str(pt + 1))
    FileName_BestModel = PATH + 'BestModel_full_excl' + str(pt) + '.h5'
    part = 1
    ef1_S[pt] = MyUtils.Test_Temporal3Parts_6bars_ShortLong(
        FileName_BestModel, subband, pt, part, 'S')

    part = 2
    ef2_S[pt] = MyUtils.Test_Temporal3Parts_6bars_ShortLong(
        FileName_BestModel, subband, pt, part, 'S')

    part = 3
    ef3_S[pt] = MyUtils.Test_Temporal3Parts_6bars_ShortLong(
        FileName_BestModel, subband, pt, part, 'S')

new_ef1_S = [n for n in ef1_S if n > 0]
new_ef2_S = [n for n in ef2_S if n > 0]
new_ef3_S = [n for n in ef3_S if n > 0]

data = [new_ef1_S, new_ef2_S, new_ef3_S]
fig, ax = plt.subplots()
    def train(self, task_id, training_set, testing_set, **kwargs):
        print 'caffe train called'

        self.output_dir_prefix = os.path.abspath('./classifier/caffenet_')
        # create an output folder for each task
        if None != task_id:
            self.output_dir = self.output_dir_prefix + str(task_id)
        else:
            timestamp = time.strftime("%m_%d_%y_%H_%M_%S")
            self.output_dir = self.output_dir_prefix + timestamp

        # TODO: changed to user input parameter?
        max_iter = 500
        if 'iteration' in kwargs:
            max_iter = kwargs['iteration']

        MyUtils.create_dir(self.output_dir)


        # connect to database manager
        self.db_manager = MongoDBManager(CaffeNetTrainer.dbname, ip_addr='128.2.213.107')
        self.storage_manager = StorageManager('test1')

        # make intermediate folder
        self.ori_image_dir = CaffeNetTrainer.local_tmp_dir + '/' +'original'
        self.crop_image_dir = CaffeNetTrainer.local_tmp_dir + '/' +'crop'
        self.cache_manager = cacheManager(self.ori_image_dir)
        MyUtils.create_dir(self.ori_image_dir)
        MyUtils.create_dir(self.crop_image_dir)
        # get image file list and bounding boxes

        # get objects regardless of their videos, since same object in different video belongs to same identitfy
        # summarize to get all objects
        obj_set = {}
        for obj, vid in training_set.iteritems():
            if obj not in obj_set:
                obj_set[obj] = []
            obj_set[obj].extend(vid)
        print obj_set

        # for each object, get images
        training_set_obj_file_path ={}
        for obj, vids in obj_set.iteritems():
            # 1. download image
            # 2. crop image based on bounding boxes
            obj_image_path = []
            obj_bounding_boxes_with_image = {}
            for vid in vids:
                obj_bounding_boxes_each_vid = self.db_manager.getBoundingBoxWithImageByVidAndObj(vid, obj)
                # TODO: probably can be a bit more efficient. right now is downloading vid repeatedly if it appears
                # multiple times in train yaml
                self.cache_manager.cacheImageFiles(vid, dir=False)
                obj_bounding_boxes_with_image.update(obj_bounding_boxes_each_vid)

            for image, bx_list in obj_bounding_boxes_with_image.iteritems():
#                self.storage_manager.download(image, self.ori_image_dir)
                idx =0
                for bx in bx_list:
                    im = Image.open(self.ori_image_dir+'/'+image)
                    left_x = bx[0]
                    left_y = bx[1]
                    right_x = left_x + bx[2]
                    right_y = left_y + bx[3]
                    output_file_path = self.crop_image_dir + '/' +os.path.splitext(image)[0] + '_' + str(obj) + str(idx) \
                                       + os.path.splitext(image)[1]
                    im.crop((left_x,left_y, right_x, right_y)).save(output_file_path)
                    obj_image_path.append(output_file_path)
                    idx +=1
            training_set_obj_file_path[obj] = obj_image_path

        # generate training file
        training_set_obj_file_path, testing_set_obj_file_path =self.split_train_file(training_set_obj_file_path, 10)
        self.generate_caffe_train_file(training_set_obj_file_path, self.output_dir + '/train.txt')
        self.generate_caffe_train_file(testing_set_obj_file_path, self.output_dir +'/test.txt')
        # generate label file for corresponds label to object names
        self.generate_caffe_label_file(training_set_obj_file_path, self.output_dir + '/label.txt')

        # modify network prototxt
        num_output_category = len(obj_set)

        train_file_path = os.path.abspath(self.output_dir + '/train.txt')
        test_file_path = os.path.abspath(self.output_dir + '/test.txt')

        output_train_val_path = self.output_dir + '/' + 'train_val_custom.prototxt'
        output_solver_path = self.output_dir + '/' + 'solver_custom.prototxt'
        output_deploy_path = self.output_dir + '/' + 'deploy_custom.prototxt'
        # fine-tuning output
        self.custom_net_path = self.output_dir + '/train_val_custom.prototxt'
        self.custom_snapshot_prefix= self.output_dir + '/caffenet_custom'
        self.output_layer_name ="fc8_custom"
        self.output_net_name="CaffeNetCustom"
        # original model
        self.original_output_layer_name ='fc8'
        # reference design file locations
        input_file_prefix = os.path.abspath('./bvlc_reference_caffenet')
        train_file ='train_val.prototxt'
        solver_file ='solver.prototxt'
        deploy_file ='deploy.prototxt'

        self.mod_caffe_net(input_file_prefix+'/' + train_file, num_output_category, train_file_path,
                           test_file_path, output_train_val_path)
        self.mod_caffe_solver(input_file_prefix+ '/' +solver_file,
                              max_iter, output_solver_path)
        self.mod_caffe_deploy(input_file_prefix + '/' + deploy_file,
                              num_output_category, output_deploy_path)

        cmd ="/opt/caffe/build/tools/caffe train"
        cmd += " -solver " + output_solver_path
        cmd += " -weights ./bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"
        cmd += " | tee " + self.output_dir + '/' + 'log.txt'

        print cmd
        # issue train
        os.system(cmd)
        MyUtils.zipdir(self.output_dir)
        zip_file = self.output_dir + '.zip'
        print 'uploading file to s3...'
        key = self.storage_manager.uploadFile(zip_file)
        print 'updating database...'

        # TODO: maybe send this information to controller?
        # add meta data info to database
        classifier_metadata={'name': key,
                             'training_set':training_set,
                             'testing_set':testing_set,
                             'accuracy': 'see log',
                             'filepath': key}
        self.db_manager.addClassifier(classifier_metadata)
        return
Esempio n. 37
0
    def __init__(self, name, **kwargs):
        """Constructor
        Args:
            name: Nombre del objeto creado
            kwargs: Lista de parámetros aceptados:
                loopback_window: Time-steps a pasado para utilizar en la predicción (24)
                forward_window: Time-steps a futuro a predecir (4)
                num_lstm_layers: Número de capas LSTM (3)
                num_cells: Número de neuronas en cada capa (128)
                num_dense_layers: Número de capas Dense intermedias (0)
                batch_size: Número de muestras del batch (32)
                suffle_enable: Flag para hacer 'suffling' en el entrenamiento (True)
                tvt_csv_file: Archivo csv con datos históricos train-validate-test
				verbose: Flag de depuración
        """
        self.name = name
        self.lbw = None
        self.fww = None
        self.nll = None
        self.nlc = None
        self.ndl = None
        self.ndc = None
        self.bs = None
        self.sf = None
        _dbg = False
        self.df = None        
        self.dfapp = None
        self.num_outputs = 0
        self.num_inputs = 0
        self.num_in_steps = self.lbw
        self.num_out_steps = self.fww
        self.sts_df = None
        self.sts_src = None
        self.sts_scaled = None
        self.scaler = None
        self.x_train = None
        self.y_train = None
        self.x_validation = None
        self.y_validation = None
        self.x_test = None
        self.y_test = None
        self.model = None
        self.callbacks_list = None
        self.columns_to_include = None
        self.columns_to_exclude = None
        self.indicators_to_generate = None
        
        for key,val in kwargs.items():        
            if key=='loopback_window': 
                self.lbw = val
                self.num_in_steps = val
            if key=='forward_window': 
                self.fww = val
                self.num_out_steps = val
            if key=='num_lstm_layers': 
                self.nll = val
            if key=='num_cells': 
                self.nlc = val
                self.ndc = val
            if key=='num_dense_layers': 
                self.ndl = val
            if key=='batch_size': 
                self.bs = val
            if key=='suffle_enabled': 
                self.sf = val
            if key=='verbose':
                if val=='full':
                    _dbg = True
            if key=='tvt_csv_file': 
                if _dbg:
                    print('Cargando histórico...')
                self.df = utils.load_hist(val, sep=';', remove_weekends=True)
                if _dbg:
                    print(self.df.head())
                    print('Incluyendo indicadores...')
                self.df = utils.add_indicators(self.df, applied=['HIGH','LOW'], base_cols=['OPEN','HIGH','LOW','CLOSE','OC2','HLC3','OHLC4'])
                
                self.num_outputs = 2
                self.df['OUT_HIGH'] = self.df.HIGH.shift(-1)
                self.df['OUT_LOW'] = self.df.LOW.shift(-1)
                self.num_inputs = self.df.shape[1] - self.num_outputs
                if _dbg:
                    print(self.df.head())
                    print('Parseando a supervisado con ins={}, outs={}...'.format(self.num_inputs,self.num_outputs))
                self.sts_df = self.series_to_supervised(self.df, self.num_inputs, self.num_outputs, self.lbw, self.fww)                
                if _dbg:
                    print(self.sts_df.head())
                    print('Normalizando...')
                self.sts_scaled, self.scaler = self.normalize_data(self.sts_df, None, self.name+'_scaler.save')
                if _dbg:
                    print(self.sts_scaled.head())
                    print('Preparando pares XY...')
                self.x_train, self.y_train, self.x_validation, self.y_validation, self.x_test, self.y_test = self.prepare_training_data(self.sts_scaled, self.bs, 4, 1, 0.8, _dbg)
                if _dbg:
                    print('Contruyendo red...')
                self.model, self.callbacks_list = self.build_net(_dbg) 
                if _dbg:
                    print(self.model.summary())
Esempio n. 38
0
def train():
    hparams = define_flags()
    hparams.seq_len = episode_len = hparams.n * hparams.k + 1

    if hparams.dataset == "omniglot":
        if not os.path.exists(
                "/youedata/home/zhangyuhan/Data/OmniglotNPZ/train.npz"
        ) or not os.path.exists(
                "/youedata/home/zhangyuhan/Data/OmniglotNPZ/test.npz"):
            MyUtils.read_omniglot()
        input_path = "/youedata/home/zhangyuhan/Data/OmniglotNPZ/train.npz"
        valid_path = "/youedata/home/zhangyuhan/Data/OmniglotNPZ/test.npz"

    else:
        raise NotImplementedError

    if hparams.dataset == "omniglot":
        input_size = (episode_len, 28, 28, 1)
    else:
        raise NotImplementedError

    with open(input_path, "rb") as f:
        input_npz = np.load(f)
        inputs = {}
        for filename in input_npz.files:
            inputs[filename] = input_npz[filename]

    with open(valid_path, "rb") as f:
        valid_npz = np.load(f)
        valid_inputs = {}
        for filename in valid_npz.files:
            valid_inputs[filename] = valid_npz[
                filename]  #filename is the class label ,each class has 20*4 samples

    with tf.Graph().as_default():
        q = FewShotInputQueue(inputs.keys(), inputs, hparams.n, hparams.k)
        valid_q = FewShotInputQueue(valid_inputs.keys(), valid_inputs,
                                    hparams.n, hparams.k)

        generated_input, generated_label = tf.py_func(q.make_one_data, [],
                                                      [tf.float32, tf.int32])
        batch_tensors = tf.train.batch([generated_input, generated_label],
                                       batch_size=hparams.batch_size,
                                       num_threads=4,
                                       shapes=[input_size, (episode_len, )],
                                       capacity=hparams.batch_size * 5)
        valid_input, valid_label = tf.py_func(valid_q.make_one_data, [],
                                              [tf.float32, tf.int32])
        valid_batch_tensors = tf.train.batch(
            [valid_input, valid_label],
            batch_size=hparams.batch_size,
            num_threads=4,
            shapes=[input_size, (episode_len, )],
            capacity=hparams.batch_size * 5)
        #each batch with the shape(128,)
        with tf.variable_scope("networks"):
            embed_network = OmniglotEmbedNetwork(batch_tensors,
                                                 hparams.batch_size)
            gnn = GNN_FSL(hparams, embed_network.output,
                          embed_network.label_placeholder, True)

        with tf.variable_scope("networks", reuse=True):
            valid_embed_network = OmniglotEmbedNetwork(valid_batch_tensors,
                                                       hparams.batch_size)
            valid_gnn = GNN_FSL(hparams, valid_embed_network.output,
                                valid_embed_network.label_placeholder, False)
        params_to_str = f"M_Nets_{hparams.input_dim}_{hparams.lr}_{hparams.n}_{hparams.k}_{hparams.num_gcn_blocks}_{hparams.label_cut}_{hparams.nn_option}"
        log_dir = os.path.abspath(
            os.path.join(os.path.curdir, "runs", params_to_str))
        # Summaries
        tf.summary.scalar("train_loss", gnn.loss)
        tf.summary.scalar("train_acc", gnn.accuracy)

        tf.summary.scalar("valid_loss", valid_gnn.loss)
        tf.summary.scalar("valid_acc", valid_gnn.accuracy)

        tf.summary.image("inputs",
                         valid_embed_network.input_placeholder[0],
                         max_outputs=episode_len)

        # Supervisor
        supervisor = tf.train.Supervisor(
            logdir=log_dir,
            save_summaries_secs=240,
            save_model_secs=600,
            global_step=gnn.global_step,
        )

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        print("Training start")
        #使用supervisor 有checkpoint就从checkpoint中载入,没有就自动初始化哦
        with supervisor.managed_session(config=config) as sess:
            min_dev_loss = 10000
            min_step = -1

            STEP_NUM = 10000000
            EARLY_STOP = 3000000
            print_every = 500

            HUGE_VALIDATION_CYCLE = print_every * 20

            # writer = tf.summary.FileWriter(logdir=log_dir,graph=sess.graph)
            # writer.close()
            last_dev = time.time()

            for step in range(STEP_NUM):
                if supervisor.should_stop():
                    break

                if step - min_step > EARLY_STOP:
                    print("Early stopping...")
                    break

                if step % print_every != 0:
                    _, loss, acc, global_step = sess.run([
                        gnn.train_step, gnn.loss, gnn.accuracy, gnn.global_step
                    ])
                    # print(gnn.predict_label.shape)
                else:
                    _, loss, acc, global_step = sess.run([
                        gnn.train_step, gnn.loss, gnn.accuracy, gnn.global_step
                    ])

                    loss, acc = sess.run([valid_gnn.loss, valid_gnn.accuracy])

                    current_time = time.time()
                    print(
                        f'Evaluate(Step {step}/{global_step} : valid loss({loss}), acc({acc}) in {current_time - last_dev} s'
                    )

                    _, train_data, loss, acc, target_label, predictlabel, propagation, diff, Lap, simi, cmpr = sess.run(
                        [valid_gnn.train_step, valid_gnn.concated_input, valid_gnn.loss, valid_gnn.accuracy, valid_gnn.target_label,
                         valid_gnn.predict_label, \
                         valid_gnn.propagation, valid_gnn.diff, valid_gnn.Lap, valid_gnn.simi, valid_gnn.cmpr])

                    print("predict is \n", predictlabel)
                    print('target is \n', target_label)
                    # print('diff is: \n', diff, '\n')
                    # print('compare is \n', cmpr)
                    # print('simi is', '\n', simi, '\n')
                    print('Lap is \n', Lap)
                    print('propagation is', '\n', propagation, '\n')
                    print("loss and acc \n", loss, acc, '\n')

                    # HUGE VALIDATION
                    if step != 0 and step % HUGE_VALIDATION_CYCLE == 0:
                        total_loss = total_acc = 0.
                        BATCH_NUM = 40
                        for _ in range(BATCH_NUM):
                            loss, acc = sess.run(
                                [valid_gnn.loss, valid_gnn.accuracy])
                            total_loss += loss * hparams.batch_size
                            total_acc += acc * hparams.batch_size

                        total_loss /= BATCH_NUM * hparams.batch_size
                        total_acc /= BATCH_NUM * hparams.batch_size

                        huge_data_acc_summary = tf.Summary()
                        huge_data_acc_summary.value.add(
                            tag="huge_data_accuracy", simple_value=total_acc)
                        supervisor.summary_computed(sess,
                                                    huge_data_acc_summary,
                                                    global_step=global_step)

                        huge_data_loss_summary = tf.Summary()
                        huge_data_loss_summary.value.add(
                            tag="huge_data_loss", simple_value=total_loss)
                        supervisor.summary_computed(sess,
                                                    huge_data_loss_summary,
                                                    global_step=global_step)

                    last_dev = current_time

                    if loss < min_dev_loss:
                        min_dev_loss = loss
                        min_step = step
Esempio n. 39
0
    if modified:  
        configfile=open(newconfig, 'wb')
        new_configparser.write(configfile)
        configfile.close()                         
        
def config_exist(newconfig):
    """Checks if a classroom is already configured
    in the current config file"""
    new_configparser = ConfigParser.ConfigParser()  
    new_configparser.read(newconfig)    
    aula = RootConfigs['classroomname'] 
    return new_configparser.has_section(aula)
                     
########################################################                                

APP_DIR=os.path.join(MyUtils.getHomeUser(),'.controlaula')
WWWPAGES='/usr/share/controlaula/frontend/www'
LANG='/usr/share/controlaula/lang'
TEACHER_UID=''

if not os.path.isdir(APP_DIR):
        os.mkdir(APP_DIR)
            
if MyUtils. getLoginName()!='root':            
    IMAGES_DIR=os.path.join(APP_DIR,'loginimages')
    if not os.path.isdir(IMAGES_DIR):
            os.mkdir(IMAGES_DIR)
    if not os.path.isdir(IMAGES_DIR + '/screenshots'):
            os.mkdir(IMAGES_DIR + '/screenshots')
                    
    FILES_DIR=os.path.join(APP_DIR,'sendfile')
Esempio n. 40
0
 def __init__(self, server_ip=u"ws://localhost", server_port=9000):
     self.logger=MyUtils.getLogger(__name__)
     self.logger.setLevel(logging.DEBUG)
     server_ip_port = server_ip + ':' +str(server_port)
     self.ws=create_connection(server_ip_port)