示例#1
0
 def get(self):
     start = datetime.datetime.now()
     if start.weekday() in [6,7]:
         logging.info("Not workday, just return")
         return None
     logging.info("Workday, continue")
     myutils.cache(['aapl', 'goog', 'vxx'])
示例#2
0
文件: worker.py 项目: liqueur/monitor
def run():
    try:
	new_bind = utils.bind()
	ctime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
	timestamp = time()
	with open('/home/zhoujingzhong/monitor/bind.log', 'a+') as f:
	    old_bind = int(f.readlines()[-1].strip().split('\t')[-1])
	    f.write('{}\t{}\t{}\n'.format(ctime, timestamp, new_bind))
	    if new_bind >= old_bind:
	        new_bind -= old_bind
	    data = {'ip': IP,
	    	    'cpu': utils.cpu(),
	    	    'memory': utils.memory(),
	    	    'disk': utils.disk(LOC),
	    	    'cache': utils.cache(),
	    	    'bind': new_bind,
	    	    'timestamp': timestamp,
	    	    'ctime': ctime}
	    data = {'data': json.dumps(data)}
	    data = urllib.urlencode(data)
	    req = urllib2.Request(url=HOST, data=data)
	    ret = urllib2.urlopen(req)
	    ret = ret.read()
    except Exception, e:
	traceback.print_exc()
	logger.error(str(e))
示例#3
0
 def rsrc(url):
     server.set_header("Content-Type", MIME_TYPES[url.split(".")[-1]])
     if (os.path.exists(BASE_PATH + url)):
         server.set_code(200)
         return utils.cache(BASE_PATH + url)
     server.set_code(404)
     server.set_header("Content-Type", "text/plain")
     return b"Not Found"
示例#4
0
 def rsrc(url):
     server.set_header("Content-Type", MIME_TYPES[url.split(".")[-1]])
     if (os.path.exists(BASE_PATH + url)):
         server.set_code(200)
         server.set_header("Cache-Control",
                           "public,max-age=31536000,immutable")
         return utils.cache(BASE_PATH + url)
     server.set_code(404)
     server.set_header("Content-Type", "text/plain")
     return b"Not Found"
示例#5
0
def take_order(message):
    expire = cache(message.from_user.id)
    if not expire:
        bot.forward_message(chat_id=-1001299756866,
                            from_chat_id=message.chat.id,
                            message_id=message.message_id)
        bot.send_message(chat_id=message.chat.id, text='Заявка принята.')
    else:
        bot.send_message(
            chat_id=message.chat.id,
            text=f'Подождите {expire} секунд прежде чем опять отправить заявку.'
        )
示例#6
0
 def play_token(url):
     server.set_header("Content-Type", "text/html")
     tk = api.is_valid_login(url[6:])
     if (tk is not None):
         server.set_code(307)
         server.set_header(
             "Set-cookie",
             f"__ctoken={tk};Max-Age={api.TOKEN_EXP_DATE};SameSite=Secure;Secure;HttpOnly;Path=/"
         )
         server.set_header("Location",
                           "https://megimeru.herokuapp.com/play")
         return b""
     server.set_code(404)
     server.set_header("Content-Type", "text/html")
     return utils.cache("web/not_found.html")
示例#7
0
    X_val, Y_val, X_test, Y_test, mmn, external_dim, \
    timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = read_cache(
        fname, 'preprocessing_nyc.pkl')
    print("load %s successfully" % fname)
else:
    if (model_name == 'model3'):
        load_data = BikeNYC3d.load_data
    else:
        load_data = BikeNYC.load_data
    X_train_all, Y_train_all, X_train, Y_train, \
    X_val, Y_val, X_test, Y_test, mmn, external_dim, \
    timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = load_data(
        T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend, len_test=len_test,
        len_val=len_val, preprocess_name='preprocessing_nyc.pkl', meta_data=True, datapath=DATAPATH)
    if CACHEDATA:
        cache(fname, X_train_all, Y_train_all, X_train, Y_train, X_val, Y_val, X_test, Y_test,
              external_dim, timestamp_train_all, timestamp_train, timestamp_val, timestamp_test)

print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])

def train_model(encoder_blocks, lstm_units, lr, batch_size, save_results=False, i=''):
    # get discrete parameters
    encoder_blocks = int(encoder_blocks)
    lstm_units = 2 ** int(lstm_units)
    batch_size = 16 * int(batch_size)

    filters = [32,64,16] if encoder_blocks==2 else [32,64,64,16]

    # build model
    m = models_dict[model_name]
    model = m.build_model(
        len_closeness, len_period, len_trend, nb_flow, map_height, map_width,
示例#8
0
from pprint import pprint
import requests
from utils import cache

from key import API_KEY

AUTH_HEADERS = {'Authorization': 'token %s' % API_KEY}

# Helper function for Github API URLs.
api_url = "https://api.github.com/{}".format

# Manually decorate requests.get with our cache function.
requests.get = cache(requests.get)


def followers_of(username):
    """Generates the followers of a username."""
    request = requests.get(api_url("users/{}/followers".format(username)),
                           headers=AUTH_HEADERS)
    if request.status_code == 404:
        raise ValueError("404! User not found.")
    else:
        followers = request.json()
        yield from followers


def follower_graph(username, depth=2):
    """Generate a graph of followers (of followers, of followers...).
    :param depth: The maximum depth to search for followers in the graph.
    :param username: The username we're starting with for this graph.
    """
    def update(self):
        '''
        gesture flag for distinguish different scenario
        '''
        global color_flag
        OK, origin = self.cap.read()

        x = None
        if OK:
            #print(self.mode)
            rect = camrectify(origin)
            # self.out.write(rect)
            # rect = cv2.flip(rect,0)
            # rect = cv2.flip(rect,1)
            warp = warp_img(rect)
            thresh = get_objectmask(warp)
            cv2.imshow('thresh', thresh)
            self.image = warp.copy()
            draw_img1 = warp.copy()
            self.get_bound(draw_img1, thresh, visualization=True)
            cx, cy = None, None
            lx, rx = None, None

            # self.handls = []
            # hsv = cv2.cvtColor(warp.copy(),cv2.COLOR_BGR2HSV)
            # hand_mask = cv2.inRange(hsv, Hand_low, Hand_high)
            # hand_mask = cv2.dilate(hand_mask, kernel = np.ones((7,7),np.uint8))
            # (_,hand_contours, hand_hierarchy)=cv2.findContours(hand_mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
            # for i , contour in enumerate(hand_contours):
            #     area = cv2.contourArea(contour)
            #     if area>600 and area < 100000 and hand_hierarchy[0, i, 3] == -1:					
            #         x,y,w,h = cv2.boundingRect(contour)
            #         self.handls.append([x, y, w, h])
            
            result = hand_tracking(warp_img(rect), cache(10), cache(10)).get_result()
            num_hand_view = len(result)
            # if num_hand_view == 0:
            #     self.tip_deque.clear()
            #     self.tip_deque1.clear()
            #     self.tip_deque2.clear()
            if num_hand_view == 0:
                if len(self.hand_mask) > 0 and self.after_trigger:
                    if color_flag is not None:
                        object_mask = get_objectmask(deepcopy(self.image))
                        if color_flag == "yellow":
                            color_mask = get_yellow_objectmask(deepcopy(self.image))
                        elif color_flag == "blue":
                            color_mask = get_blue_objectmask(deepcopy(self.image))
                        # elif color_flag == "green":
                        #     color_mask = get_green_objectmask(deepcopy(self.image))
                        mask = self.hand_mask[0]
                        for i in range(1, len(self.hand_mask), 1):
                            mask = cv2.bitwise_or(self.hand_mask[i],mask)                     
                        mask = cv2.bitwise_and(mask,color_mask)
                        temp_result = []
                        for cx, cy in self.surfacels:
                            if mask[cy, cx] == 255:
                                temp_result.append((cx, cy))
                    else:
                        object_mask = get_objectmask(deepcopy(self.image))
                        mask = self.hand_mask[0]
                        for i in range(1, len(self.hand_mask), 1):
                            mask = cv2.bitwise_or(self.hand_mask[i],mask)                     
                        mask = cv2.bitwise_and(mask,object_mask)
                        temp_result = []
                        for cx, cy in self.surfacels:
                            if mask[cy, cx] == 255:
                                temp_result.append((cx, cy))
                    '''
                    multihand
                    '''
                    self.draw = draw_img1
                    print("getting bitwise and when there is one finger after palm")
                    #print([temp_result, tips[0], center,3])
                    #self.hand_mask = []
                    #self.after_trigger = False
                    #netsend(temp_result, flag=1, need_unpack=True)
                    self.last_select = temp_result
                    self.mode = 3
                   # self.center = center
                    #return [temp_result, tips[0], center,3]
                else:
                    netsend([777,888], need_unpack=False, flag=-19)
            '''
            one hand in the view
            '''
            if num_hand_view == 1:
                center = result[0][0]
                tips = result[0][1]
                radius = result[0][2]
                box = result[0][3]
                fake_tip, fake_center = result[0][4]
                app = result[0][5]
                cv2.drawContours(draw_img1, [app],-1,(255, 0, 0),1)
                for k in range(len(tips)):
                    cv2.circle(draw_img1,tips[k],10,(255, 0, 0),2)
                    cv2.line(draw_img1,tips[k],center,(255, 0, 0),2)
                num_tips = len(tips)
                label = self.test(box, draw_img1)
                self.onehand_center = center
                #print(box)
                #label = -1
                #label = classifier(draw_img1,self.image, box)
                #self.tip_deque.appendleft(tips)
            # '''
            # one hand and one finger, flag == 1
            # '''
                
                    #rospy.loginfo("mask, trigger:{},{}".format(len(self.hand_mask), self.after_trigger))
                #if num_tips == 1 and len(self.boxls) > 0 and label == 1:
                if len(self.hand_mask) > 0 and self.after_trigger:
                    if color_flag is not None:
                        object_mask = get_objectmask(deepcopy(self.image))
                        if color_flag == "yellow":
                            color_mask = get_yellow_objectmask(deepcopy(self.image))
                            netsend([777,888], need_unpack=False, flag=-200)
                        elif color_flag == "blue":
                            color_mask = get_blue_objectmask(deepcopy(self.image))
                            netsend([777,888], need_unpack=False, flag=-100)
                        # elif color_flag == "green":
                        #     color_mask = get_green_objectmask(deepcopy(self.image))
                        mask = self.hand_mask[0]
                        for i in range(1, len(self.hand_mask), 1):
                            mask = cv2.bitwise_or(self.hand_mask[i],mask)                     
                        mask = cv2.bitwise_and(mask,color_mask)
                        temp_result = []
                        for cx, cy in self.surfacels:
                            if mask[cy, cx] == 255:
                                temp_result.append((cx, cy))
                    else:
                        object_mask = get_objectmask(deepcopy(self.image))
                        mask = self.hand_mask[0]
                        for i in range(1, len(self.hand_mask), 1):
                            mask = cv2.bitwise_or(self.hand_mask[i],mask)   
                        #print(mask.dtype, object_mask.dtype)                  
                        mask = cv2.bitwise_and(mask,object_mask)
                        temp_result = []
                        for cx, cy in self.surfacels:
                            if mask[cy, cx] == 255:
                                temp_result.append((cx, cy))
                    '''
                    multihand
                    '''
                    self.draw = draw_img1
                    print("getting bitwise and when there is one finger after palm")
                    if len(tips) == 0:
                        rospy.logwarn("no finger tips")
                    else:
                        #print([temp_result, tips[0], center,3])
                        #self.hand_mask = []
                        #self.after_trigger = False
                        self.last_select = temp_result
                        self.mode = 3
                        #self.center = center
                        return [temp_result, tips[0], center,3]

                if len(self.boxls) > 0 and num_tips == 1 and label != 4:        
                    if len(self.hand_mask) == 0 or not self.after_trigger:
                        #rospy.loginfo("single pointing")
                        #point = max(tips, key=lambda x: np.sqrt((x[0]- center[0])**2 + (x[1] - center[1])**2))
                        point = tips[0]
                        self.tip_deque.appendleft(point)
                        #
                        length_ls = []
                        for x, y, w, h in self.boxls:
                            length_ls.append((get_k_dis((point[0], point[1]), (center[0], center[1]), (x+w/2, y+h/2)), (x+w/2, y+h/2)))
                        length_ls = filter(lambda x: (point[1] - x[1][1]) * (point[1] - center[1]) <= 0, length_ls)
                        length_ls = filter(lambda x: x[1][1] - point[1] < 0, length_ls)
                        length_ls = filter(lambda x: x[0] < 15, length_ls)
                        if len(length_ls) > 0:
                            x,y = min(length_ls, key=lambda x: distant((x[1][0], x[1][1]), (point[0], point[1])))[1]
                            ind = test_insdie((x, y), self.boxls)
                            x, y, w, h = self.boxls[ind]
                            cx, cy = self.surfacels[ind]
                            cv2.rectangle(draw_img1,(x,y),(x+w,y+h),(0,0,255),2)
                            #cv2.circle(draw_img1, (cx, cy), 5, (0, 0, 255), -1)
                            #cv2.putText(draw_img1,"pointed",(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,0,255))
                            
                            '''
                            flag is 1
                            '''
                            if self.trigger:
                                self.pick_tip = tuple([point[0],point[1]])
                            self.draw = draw_img1
                            self.last_select = [(cx, cy)]
                            netsend([cx, cy], need_unpack=False)
                            self.mode = 1
                            self.pick_center = center
                            return [[point[0],point[1]],(cx, cy), center,1]
                        else:
                            self.draw = draw_img1
                            self.mode = 1
                            self.pick_center = center
                            return [[point[0],point[1]], center,1]
            #  '''
            # one hand and two finger, flag == 2
            # '''
                elif num_tips == 2 and len(self.boxls) > 0 and label != 4:
                    boxls = deepcopy(self.boxls)
                    length_lsr = []
                    length_lsl = []
                    rpoint, lpoint = tips
                    for x, y, w, h in self.boxls:
                        length_lsr.append((get_k_dis((rpoint[0], rpoint[1]), (center[0], center[1]), (x+w/2, y+h/2)), (x+w/2, y+h/2)))
                    length_lsr = filter(lambda x: (rpoint[1] - x[1][1]) * (rpoint[1] - center[1]) <= 0, length_lsr)
                    length_lsr = filter(lambda x: x[0] < 20, length_lsr)
                    if len(length_lsr) > 0:
                        rx,ry = min(length_lsr, key=lambda x: distant((x[1][0], x[1][1]), (rpoint[0], rpoint[1])))[1]
                        rind = test_insdie((rx, ry), self.boxls)
                        rx, ry = self.surfacels[rind]
                        x, y, w, h = self.boxls[rind]
                        #rx, ry = int(x+w/2), int(y+h/2)
                        del boxls[rind]
                        cv2.rectangle(draw_img1,(x,y),(x+w,y+h),(0,0,255),2)
                        #cv2.putText(draw_img1,"pointed_right",(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,0,255))
                        if len(boxls) > 0:
                            for x, y, w, h in boxls:
                                length_lsl.append((get_k_dis((lpoint[0], lpoint[1]), (center[0], center[1]), (x+w/2, y+h/2)), (x+w/2, y+h/2)))
                            length_lsl = filter(lambda x: (lpoint[1] - x[1][1]) * (lpoint[1] - center[1]) <= 0, length_lsl)
                            length_lsl = filter(lambda x: x[0] < 20, length_lsl)
                            if len(length_lsl) > 0:
                                lx,ly = min(length_lsl, key=lambda x: distant((x[1][0], x[1][1]), (lpoint[0], lpoint[1])))[1]
                                lind = test_insdie((lx, ly), boxls)
                                lx, ly = self.surfacels[lind]
                                x, y, w, h = boxls[lind]
                                #lx, ly = int(x+w/2), int(y+h/2)
                                cv2.rectangle(draw_img1,(x,y),(x+w,y+h),(0,0,255),2)
                                #cv2.putText(draw_img1,"pointed_left",(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,0,255))
                                '''
                                flag is 2
                                '''
                                self.draw = draw_img1
                                self.last_select = [[rx, ry], [lx, ly]]
                                netsend([[rx, ry], [lx, ly]])
                                self.mode = 2
                                #self.center = center
                                self.pick_center = center
                                return [[tips[0][0], tips[0][1]], [tips[1][0], tips[1][1]], [rx, ry], [lx, ly], center,2]

                # '''
                # one hand and multi finger, flag == 3
                # '''
                elif num_tips > 0 and label == 3:
                    temp_center = (center[0], center[1] - 30)
                    if not self.trigger:
                        netsend(list(temp_center), need_unpack=False, flag=-18)
                    elif self.trigger:
                        # surface = np.ones(self.image.shape)
                        # cv2.circle(surface, center, 120, (255, 255, 255), -1)
                        # grayscaled = cv2.cvtColor(surface,cv2.COLOR_BGR2GRAY)
                        # retval, threshold = cv2.threshold(grayscaled, 10, 255, cv2.THRESH_BINARY)
                        # self.hand_mask.append(threshold)
                        self.hand_mask = []
                        self.hand_mask.append(get_handmask(deepcopy(self.image), center))
                        rospy.loginfo("get brushed")
                        self.draw = draw_img1
                        self.trigger = False
                        self.mode = 3
                        rospy.loginfo("send center information :{}".format(list(temp_center)))
                        netsend(list(temp_center), need_unpack=False, flag=-8)
                        self.pick_center = center
                        #self.center = center
                        return [temp_center,3]

                elif label == 4 and len(self.boxls) > 0 and len(tips) > 0 and len(tips) < 4:
                    #point = max(tips, key=lambda x: np.sqrt((x[0]- center[0])**2 + (x[1] - center[1])**2))
                    point = fake_tip
                    center = fake_center
                    length_ls = []
                    for x, y, w, h in self.boxls:
                        length_ls.append((get_k_dis((point[0], point[1]), (center[0], center[1]), (x+w/2, y+h/2)), (x+w/2, y+h/2)))
                    #length_ls = filter(lambda x: (point[1] - x[1][1]) * (point[1] - center[1]) <= 0, length_ls)
                    #length_ls = filter(lambda x: (point[0] - x[1][0]) * (center[0] - x[1][0]) > 0, length_ls)
                    length_ls = filter(lambda x: x[1][1] - point[1] < 0, length_ls)
                    #print("haha", len(length_ls))
                    length_ls = filter(lambda x: x[0] < 50, length_ls)
                    #print("ddd", len(length_ls))
                    sub_result = []
                    if color_flag is not None:
                        object_mask = get_objectmask(deepcopy(self.image))
                        if color_flag == "yellow":
                            color_mask = get_yellow_objectmask(deepcopy(self.image))
                        elif color_flag == "blue":
                            color_mask = get_blue_objectmask(deepcopy(self.image))
                        if len(length_ls) > 0:
                            for i in range(len(length_ls)):
                                # x,y = min(length_ls, key=lambda x: distant((x[1][0], x[1][1]), (point[0], point[1])))[1]
                                # ind = test_insdie((x, y), self.boxls)
                                x,y = length_ls[i][1]
                                ind = test_insdie((x, y), self.boxls)
                                x, y, w, h = self.boxls[ind]
                                cx, cy = self.surfacels[ind]
                                if color_mask[cy, cx] == 255:
                                    sub_result.append((cx, cy))
                                cv2.rectangle(draw_img1,(x,y),(x+w,y+h),(0,0,255),2)
                                #cv2.circle(draw_img1, (cx, cy), 5, (0, 0, 255), -1)
                                #cv2.putText(draw_img1,"general",(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,0,255))
                            
                            '''
                            flag is 1
                            '''
                            self.draw = draw_img1
                            self.last_select = sub_result
                            self.mode = 6
                            self.pick_center = center
                            #self.center = center
                            return [sub_result, center ,6]
                        else:
                            self.draw = draw_img1
                            return None
                    
                    else:
                        if len(length_ls) > 0:
                            for i in range(len(length_ls)):
                            # x,y = min(length_ls, key=lambda x: distant((x[1][0], x[1][1]), (point[0], point[1])))[1]
                            # ind = test_insdie((x, y), self.boxls)
                                x,y = length_ls[i][1]
                                ind = test_insdie((x, y), self.boxls)
                                x, y, w, h = self.boxls[ind]
                                cx, cy = self.surfacels[ind]
                                sub_result.append((cx, cy))
                                cv2.rectangle(draw_img1,(x,y),(x+w,y+h),(0,0,255),2)
                                #cv2.circle(draw_img1, (cx, cy), 5, (0, 0, 255), -1)
                                #cv2.putText(draw_img1,"general",(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,0,255))
                            netsend(sub_result, need_unpack=True)
                            self.draw = draw_img1
                            self.last_select = sub_result
                            self.mode = 6
                            self.pick_center = center
                            #self.center = center
                            return [sub_result, center ,6]
                        else:
                            self.draw = draw_img1
                            return None
                    







            '''
            two hand in the view
            '''
            if num_hand_view == 2:
                lcenter = result[0][0]
                ltips = result[0][1]
                lnum_tips = len(ltips)
                lradius = result[0][2]
                lbox = result[0][3]
                llabel = self.test(lbox, draw_img1) 
                app = result[0][5]
                cv2.drawContours(draw_img1, [app],-1,(255, 0, 0),1)
                for k in range(len(ltips)):
                    cv2.circle(draw_img1,ltips[k],10,(255, 0, 0),2)
                    cv2.line(draw_img1,ltips[k],lcenter,(255, 0, 0),2)

                rcenter = result[1][0]
                rtips = result[1][1]
                rnum_tips = len(rtips)
                rradius = result[1][2]
                rbox = result[1][3]
                rlabel = self.test(rbox, draw_img1)
                lapp = result[1][5]
                cv2.drawContours(draw_img1, [lapp],-1,(255, 0, 0),1)
                for k in range(len(rtips)):
                    cv2.circle(draw_img1,rtips[k],10,(255, 0, 0),2)
                    cv2.line(draw_img1,rtips[k],rcenter,(255, 0, 0),2)
                # '''
                # two hand is both one finger pointing, ONLY PLACE
                # '''
                if set([lnum_tips, rnum_tips]) == set([1,1]) and len(self.boxls) > 0 and set([llabel, rlabel]) == set([1,1]):
                    self.draw = draw_img1
                    
                    '''
                    flag is 4
                    '''
                    self.mode = 4
                    self.two_hand_mode =4
                    self.tip_deque1.appendleft((ltips[0][0], ltips[0][1]))
                    self.tip_deque2.appendleft((rtips[0][0], rtips[0][1]))
                    self.center = [list(lcenter), list(rcenter)]
                    return [[rtips[0][0], rtips[0][1]], [ltips[0][0], ltips[0][1]], [list(rcenter), list(lcenter)], 4]

                elif max(set([lnum_tips, rnum_tips])) >= 2 and min(set([lnum_tips, rnum_tips])) == 1 and max(set([llabel, rlabel])) < 4 and self.onehand_center:
                    #sub_result = filter(lambda x: len(x[1]) == 1 , [[rcenter, rtips], [lcenter, ltips]])
                    sub_result = max([[rcenter, rtips], [lcenter, ltips]], key=lambda x: distant(x[0], self.onehand_center))
                    center = sub_result[0]
                    tips = sub_result[1]
                    # center = sub_result[0][0]
                    # tips = sub_result[0][1]
                    self.tip_deque.appendleft((tips[0][0], tips[0][1]))
                    self.draw = draw_img1
                    
                    if max(set([lnum_tips, rnum_tips])) == 2 and set([lnum_tips, rnum_tips]) == set([1,2]):
                        self.mode = 1
                        self.two_hand_mode = 1
                        return [[tips[0][0], tips[0][1]], 1]
                    else:
                        self.mode = 5
                        self.two_hand_mode = 5
                        return [[tips[0][0], tips[0][1]], 5]
                
                elif min(set([lnum_tips, rnum_tips])) == 1 and max(set([llabel, rlabel])) == 4 and self.onehand_center:
                    #sub_result = filter(lambda x: len(x[1]) == 1 , [[rcenter, rtips], [lcenter, ltips]])
                    sub_result = max([[rcenter, rtips], [lcenter, ltips]], key=lambda x: distant(x[0], self.onehand_center))
                    center = sub_result[0]
                    tips = sub_result[1]
                    # center = sub_result[0][0]
                    # tips = sub_result[0][1]
                    self.tip_deque.appendleft((tips[0][0], tips[0][1]))
                    self.draw = draw_img1
                    self.mode = 1
                    self.two_hand_mode = 1
                    #rospy.loginfo("jdjdjdjjs")
                    return [[tips[0][0], tips[0][1]], 1]
        self.draw = draw_img1       
        for k in range(len(finger)):
            cv2.circle(frame_in, finger[k], 10, (0, 0, 255), 2)
            cv2.line(frame_in, finger[k], (cx, cy), (0, 0, 255), 2)
        return frame_in, finger


def warp(img):
    #pts1 = np.float32([[115,124],[520,112],[2,476],[640,480]])
    pts1 = np.float32([[206, 138], [577, 114], [208, 355], [596, 347]])
    pts2 = np.float32([[0, 0], [640, 0], [0, 480], [640, 480]])
    M = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(img, M, (640, 480))
    return dst


if __name__ == '__main__':
    cap = cv2.VideoCapture(0)
    while True:
        OK, origin = cap.read()
        if OK:
            ob = hand_tracking(warp(origin), cache(10), cache(10))
            #print(ob.get_result())
        # if ob.angle is not None:
        #     print(ob.angle)
        k = cv2.waitKey(1) & 0xFF  # large wait time to remove freezing
        if k == 113 or k == 27:
            break
    cap.release()
    cv2.destroyAllWindows()
示例#11
0
    def __init__(self, start): 	
        self.cap = cv2.VideoCapture(0)	
        self.start_time = start

        self.stored_flag = False
        self.trained_flag = False
        self.milstone_flag = False
        self.incremental_train_flag = False
        self.tracking_flag = False

        self.boxls = None
        self.count = 1
        self.new_count = 1
        self.path = "/home/intuitivecompting/Desktop/color/Smart-Projector/script/datasets/"
        if MODE == 'all':
            self.file = open(self.path + "read.txt", "w")
            self.milestone_file = open(self.path + "mileston_read.txt", "w")
        self.user_input = 0
        self.predict = None
        self.memory = cache(10)
        self.memory1 = cache(10)
        self.hand_memory = cache(10)

        self.node_sequence = []
        #-----------------------create GUI-----------------------#
        self.gui_img = np.zeros((130,640,3), np.uint8)
        cv2.circle(self.gui_img,(160,50),30,(255,0,0),-1)
        cv2.putText(self.gui_img,"start",(130,110),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(255,0,0))
        cv2.circle(self.gui_img,(320,50),30,(0,255,0),-1)
        cv2.putText(self.gui_img,"stop",(290,110),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,255,0))
        cv2.circle(self.gui_img,(480,50),30,(0,0,255),-1)
        cv2.putText(self.gui_img,"quit",(450,110),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,0,255))
        cv2.namedWindow('gui_img')
        cv2.namedWindow('gui_img1')
        cv2.setMouseCallback('gui_img',self.gui_callback)
        cv2.setMouseCallback('gui_img1',self.gui_callback)
        #-----------------------Training sign--------------#
        self.training_surface = np.ones((610,640,3), np.uint8) * 255
        cv2.putText(self.training_surface,'Training...',(120,300),cv2.FONT_HERSHEY_SIMPLEX, 3.0,(255,192,203), 5)
        #----------------------new coming item id------------------#
        self.new_come_id = None
        self.old_come_id = None
        self.new_come_side = None
        self.old_come_side = None
        self.new_coming_lock = True
        self.once_lock = True
        #---------------------set some flag-------------------#
        self.storing = None
        self.quit = None
        self.once = True
        #---------------------set gui image----------------------#
        self.temp_surface = None
        #----------------------for easlier developing-----------------#
        if MODE == 'test':
            if not GPU:
                self.net = Net()
            else:
                self.net = Net().cuda()
            self.net.load_state_dict(torch.load(f=self.path + 'model'))
            self.user_input = 5
            self.stored_flag = True
示例#12
0
def main():
    parser = argparse.ArgumentParser(description="Evaluate a given model")
    parser.add_argument("-m",
                        "--model",
                        type=str,
                        required=True,
                        choices=MODELS,
                        help="which model to use")
    parser.add_argument("-p",
                        "--model-path",
                        type=str,
                        help="path to the saved model")
    parser.add_argument("-s",
                        "--split",
                        default="validation",
                        choices={"validation", "test"},
                        help="data split (for 'test' it only predicts)")
    parser.add_argument("-c",
                        "--city",
                        required=True,
                        choices=CITIES,
                        help="which city to evaluate")
    parser.add_argument("--overwrite",
                        default=False,
                        action="store_true",
                        help="overwrite existing predictions if they exist")
    parser.add_argument("--channels",
                        nargs='+',
                        default=["Volume", "Speed", "Heading"],
                        help="List of channels to predict")
    parser.add_argument("--tablefmt",
                        default="github",
                        help="how to format the results")
    parser.add_argument("-v",
                        "--verbose",
                        action="count",
                        help="verbosity level")
    args = parser.parse_args()
    args.channels.sort(
        key=lambda x: src.dataset.Traffic4CastSample.channel_to_index[x])

    if args.verbose:
        print(args)

    Model = MODELS[args.model]
    model = Model()

    if args.model_path:
        model.load_state_dict(torch.load(args.model_path))
        model.eval()

    if model.num_channels == len(args.channels):
        if (model.num_channels != 3):
            print(
                f"WARNING: Model predicts {model.num_channels} and "
                f"{args.channels} were selected. Unselected channels will be "
                "predicted as 0.")
        selected_channels_transforms = [
            src.dataset.Traffic4CastSample.Transforms.SelectChannels(
                args.channels)
        ]
    elif model.num_channels == 1:
        print(
            f"WARNING: Model predicts {model.num_channels} channel but "
            f"channels {args.channels} were selected. Iteration mode enabled."
            "Unselected channels will be predicted as 0.")
        selected_channels_transforms = [
            src.dataset.Traffic4CastSample.Transforms.SelectChannels([c])
            for c in args.channels
        ]
    else:
        print(f"ERROR: Model to channels missmatch. Model can predict "
              f"{model.num_channels} channels. {len(args.channels)} were "
              "selected.")
        sys.exit(1)

    transforms = [
        lambda x: x.float(),
        lambda x: x / 255,
        src.dataset.Traffic4CastSample.Transforms.Permute("TCHW"),
    ]
    dataset = src.dataset.Traffic4CastDataset(ROOT, args.split, [args.city],
                                              transforms)

    loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=1,
        shuffle=False,
        num_workers=2,
        collate_fn=src.dataset.Traffic4CastDataset.collate_list)

    def predict(sample, channel_transforms):
        predictions = np.zeros(EVALUATION_SHAPE)
        for transform in channel_transforms:
            s = copy.deepcopy(sample)
            transform(s)
            for f, p in model.predict(SUBMISSION_FRAMES[args.city], s).items():
                for c_i, c in enumerate(transform.channels):
                    predictions[SUBMISSION_FRAMES[args.city].index(f), :, :,
                                src.dataset.Traffic4CastSample.
                                channel_to_index[c]] = p[c_i]

        predictions = predictions * 255.0
        predictions = predictions.reshape(SUBMISSION_SHAPE)
        return predictions

    # Cache predictions to a specified path
    to_overwrite = args.overwrite
    cached_predict = lambda path, *args: cache(predict, path, to_overwrite, *
                                               args)

    if args.model_path:
        model_name, _ = os.path.splitext(os.path.basename(args.model_path))
    else:
        model_name = args.model

    dirname = get_prediction_folder(args.split, model_name, args.city)
    os.makedirs(dirname, exist_ok=True)

    to_str = lambda v: f"{v:.4f}"

    errors = []
    for sample in loader:
        sample = sample[0]
        predictions = cached_predict(
            sample.predicted_path(dirname),
            sample,
            selected_channels_transforms,
        )
        if args.split == "validation":
            # Prepare predictions
            predictions = predictions / 255.0
            predictions = predictions.reshape(*EVALUATION_SHAPE)
            # Prepare groundtruth
            sample.permute('THWC')
            i = torch.tensor(SUBMISSION_FRAMES[args.city], dtype=torch.long)
            gt = sample.data.index_select(0, i).numpy()
            # Compute error
            mse = np.mean((gt - predictions)**2, axis=(0, 1, 2))
            errors.append(mse)
            if args.verbose:
                print(sample.date, "|", " | ".join(to_str(e) for e in mse))
        elif args.split == "test":
            if args.verbose:
                print(sample.date)

    if args.split == "validation":
        errors = np.vstack(errors)
        table = [[args.model] +
                 [to_str(v) for v in errors.mean(axis=0).tolist()] +
                 [to_str(errors.mean())]]
        headers = ["model"] + CHANNELS + ["mean"]
        print(tabulate(table, headers=headers, tablefmt=args.tablefmt))
    def update(self):
        '''
        gesture flag for distinguish different scenario
        '''
        global color_flag
        OK, origin = self.cap.read()

        x = None
        if OK:
            rect = camrectify(origin)
            warp = warp_img(rect)
            thresh = get_objectmask(warp)
            cv2.imshow('thresh', thresh)
            self.image = warp.copy()
            draw_img1 = warp.copy()
            self.get_bound(draw_img1, thresh, visualization=True)
            cx, cy = None, None
            lx, rx = None, None

            result = hand_tracking(warp_img(rect), cache(10),
                                   cache(10)).get_result()
            num_hand_view = len(result)
            if num_hand_view == 0:
                if len(self.hand_mask) > 0 and self.after_trigger:
                    if color_flag is not None:
                        object_mask = get_objectmask(deepcopy(self.image))
                        if color_flag == "yellow":
                            color_mask = get_yellow_objectmask(
                                deepcopy(self.image))
                        elif color_flag == "blue":
                            color_mask = get_blue_objectmask(
                                deepcopy(self.image))
                        mask = self.hand_mask[0]
                        for i in range(1, len(self.hand_mask), 1):
                            mask = cv2.bitwise_or(self.hand_mask[i], mask)
                        mask = cv2.bitwise_and(mask, color_mask)
                        temp_result = []
                        for cx, cy in self.surfacels:
                            if mask[cy, cx] == 255:
                                temp_result.append((cx, cy))
                    else:
                        object_mask = get_objectmask(deepcopy(self.image))
                        mask = self.hand_mask[0]
                        for i in range(1, len(self.hand_mask), 1):
                            mask = cv2.bitwise_or(self.hand_mask[i], mask)
                        mask = cv2.bitwise_and(mask, object_mask)
                        temp_result = []
                        for cx, cy in self.surfacels:
                            if mask[cy, cx] == 255:
                                temp_result.append((cx, cy))
                    '''
                    multihand
                    '''
                    self.draw = draw_img1
                    print(
                        "getting bitwise and when there is one finger after palm"
                    )
                    self.last_select = temp_result
                    self.mode = 3
                else:
                    netsend([777, 888], need_unpack=False, flag=-19)
            '''
            one hand in the view
            '''
            if num_hand_view == 1:
                center = result[0][0]
                tips = result[0][1]
                radius = result[0][2]
                box = result[0][3]
                fake_tip, fake_center = result[0][4]
                app = result[0][5]
                cv2.drawContours(draw_img1, [app], -1, (255, 0, 0), 1)
                for k in range(len(tips)):
                    cv2.circle(draw_img1, tips[k], 10, (255, 0, 0), 2)
                    cv2.line(draw_img1, tips[k], center, (255, 0, 0), 2)
                num_tips = len(tips)
                label = self.test(box, draw_img1)
                self.onehand_center = center
                # '''
                # one hand and one finger, flag == 1
                # '''

                if len(self.hand_mask) > 0 and self.after_trigger:
                    if color_flag is not None:
                        object_mask = get_objectmask(deepcopy(self.image))
                        if color_flag == "yellow":
                            color_mask = get_yellow_objectmask(
                                deepcopy(self.image))
                            netsend([777, 888], need_unpack=False, flag=-200)
                        elif color_flag == "blue":
                            color_mask = get_blue_objectmask(
                                deepcopy(self.image))
                            netsend([777, 888], need_unpack=False, flag=-100)
                        mask = self.hand_mask[0]
                        for i in range(1, len(self.hand_mask), 1):
                            mask = cv2.bitwise_or(self.hand_mask[i], mask)
                        mask = cv2.bitwise_and(mask, color_mask)
                        temp_result = []
                        for cx, cy in self.surfacels:
                            if mask[cy, cx] == 255:
                                temp_result.append((cx, cy))
                    else:
                        object_mask = get_objectmask(deepcopy(self.image))
                        mask = self.hand_mask[0]
                        for i in range(1, len(self.hand_mask), 1):
                            mask = cv2.bitwise_or(self.hand_mask[i], mask)
                        #print(mask.dtype, object_mask.dtype)
                        mask = cv2.bitwise_and(mask, object_mask)
                        temp_result = []
                        for cx, cy in self.surfacels:
                            if mask[cy, cx] == 255:
                                temp_result.append((cx, cy))
                    '''
                    multihand
                    '''
                    self.draw = draw_img1
                    print(
                        "getting bitwise and when there is one finger after palm"
                    )
                    if len(tips) == 0:
                        rospy.logwarn("no finger tips")
                    else:
                        self.last_select = temp_result
                        self.mode = 3
                        return [temp_result, tips[0], center, 3]

                if len(self.boxls) > 0 and num_tips == 1 and label != 4:
                    if len(self.hand_mask) == 0 or not self.after_trigger:
                        point = tips[0]
                        self.tip_deque.appendleft(point)
                        #
                        length_ls = []
                        for x, y, w, h in self.boxls:
                            length_ls.append((get_k_dis(
                                (point[0], point[1]), (center[0], center[1]),
                                (x + w / 2, y + h / 2)), (x + w / 2,
                                                          y + h / 2)))
                        length_ls = filter(
                            lambda x: (point[1] - x[1][1]) *
                            (point[1] - center[1]) <= 0, length_ls)
                        length_ls = filter(lambda x: x[1][1] - point[1] < 0,
                                           length_ls)
                        length_ls = filter(lambda x: x[0] < 15, length_ls)
                        if len(length_ls) > 0:
                            x, y = min(length_ls,
                                       key=lambda x: distant(
                                           (x[1][0], x[1][1]),
                                           (point[0], point[1])))[1]
                            ind = test_insdie((x, y), self.boxls)
                            x, y, w, h = self.boxls[ind]
                            cx, cy = self.surfacels[ind]
                            cv2.rectangle(draw_img1, (x, y), (x + w, y + h),
                                          (0, 0, 255), 2)
                            '''
                            flag is 1
                            '''
                            if self.trigger:
                                self.pick_tip = tuple([point[0], point[1]])
                            self.draw = draw_img1
                            self.last_select = [(cx, cy)]
                            netsend([cx, cy], need_unpack=False)
                            self.mode = 1
                            self.pick_center = center
                            return [[point[0], point[1]], (cx, cy), center, 1]
                        else:
                            self.draw = draw_img1
                            self.mode = 1
                            self.pick_center = center
                            return [[point[0], point[1]], center, 1]
            #  '''
            # one hand and two finger, flag == 2
            # '''
                elif num_tips == 2 and len(self.boxls) > 0 and label != 4:
                    boxls = deepcopy(self.boxls)
                    length_lsr = []
                    length_lsl = []
                    rpoint, lpoint = tips
                    for x, y, w, h in self.boxls:
                        length_lsr.append((get_k_dis(
                            (rpoint[0], rpoint[1]), (center[0], center[1]),
                            (x + w / 2, y + h / 2)), (x + w / 2, y + h / 2)))
                    length_lsr = filter(
                        lambda x: (rpoint[1] - x[1][1]) *
                        (rpoint[1] - center[1]) <= 0, length_lsr)
                    length_lsr = filter(lambda x: x[0] < 20, length_lsr)
                    if len(length_lsr) > 0:
                        rx, ry = min(length_lsr,
                                     key=lambda x: distant(
                                         (x[1][0], x[1][1]),
                                         (rpoint[0], rpoint[1])))[1]
                        rind = test_insdie((rx, ry), self.boxls)
                        rx, ry = self.surfacels[rind]
                        x, y, w, h = self.boxls[rind]
                        del boxls[rind]
                        cv2.rectangle(draw_img1, (x, y), (x + w, y + h),
                                      (0, 0, 255), 2)
                        if len(boxls) > 0:
                            for x, y, w, h in boxls:
                                length_lsl.append(
                                    (get_k_dis((lpoint[0], lpoint[1]),
                                               (center[0], center[1]),
                                               (x + w / 2, y + h / 2)),
                                     (x + w / 2, y + h / 2)))
                            length_lsl = filter(
                                lambda x: (lpoint[1] - x[1][1]) *
                                (lpoint[1] - center[1]) <= 0, length_lsl)
                            length_lsl = filter(lambda x: x[0] < 20,
                                                length_lsl)
                            if len(length_lsl) > 0:
                                lx, ly = min(length_lsl,
                                             key=lambda x: distant(
                                                 (x[1][0], x[1][1]),
                                                 (lpoint[0], lpoint[1])))[1]
                                lind = test_insdie((lx, ly), boxls)
                                lx, ly = self.surfacels[lind]
                                x, y, w, h = boxls[lind]
                                cv2.rectangle(draw_img1, (x, y),
                                              (x + w, y + h), (0, 0, 255), 2)
                                '''
                                flag is 2
                                '''
                                self.draw = draw_img1
                                self.last_select = [[rx, ry], [lx, ly]]
                                netsend([[rx, ry], [lx, ly]])
                                self.mode = 2
                                self.pick_center = center
                                return [[tips[0][0], tips[0][1]],
                                        [tips[1][0], tips[1][1]], [rx, ry],
                                        [lx, ly], center, 2]

                # '''
                # one hand and multi finger, flag == 3
                # '''
                elif num_tips > 0 and label == 3:
                    temp_center = (center[0], center[1] - 30)
                    if not self.trigger:
                        netsend(list(temp_center), need_unpack=False, flag=-18)
                    elif self.trigger:
                        self.hand_mask = []
                        self.hand_mask.append(
                            get_handmask(deepcopy(self.image), center))
                        rospy.loginfo("get brushed")
                        self.draw = draw_img1
                        self.trigger = False
                        self.mode = 3
                        rospy.loginfo("send center information :{}".format(
                            list(temp_center)))
                        netsend(list(temp_center), need_unpack=False, flag=-8)
                        self.pick_center = center
                        return [temp_center, 3]

                elif label == 4 and len(
                        self.boxls) > 0 and len(tips) > 0 and len(tips) < 4:
                    point = fake_tip
                    center = fake_center
                    length_ls = []
                    for x, y, w, h in self.boxls:
                        length_ls.append((get_k_dis(
                            (point[0], point[1]), (center[0], center[1]),
                            (x + w / 2, y + h / 2)), (x + w / 2, y + h / 2)))
                    length_ls = filter(lambda x: x[1][1] - point[1] < 0,
                                       length_ls)
                    length_ls = filter(lambda x: x[0] < 50, length_ls)
                    sub_result = []
                    if color_flag is not None:
                        object_mask = get_objectmask(deepcopy(self.image))
                        if color_flag == "yellow":
                            color_mask = get_yellow_objectmask(
                                deepcopy(self.image))
                        elif color_flag == "blue":
                            color_mask = get_blue_objectmask(
                                deepcopy(self.image))
                        if len(length_ls) > 0:
                            for i in range(len(length_ls)):
                                x, y = length_ls[i][1]
                                ind = test_insdie((x, y), self.boxls)
                                x, y, w, h = self.boxls[ind]
                                cx, cy = self.surfacels[ind]
                                if color_mask[cy, cx] == 255:
                                    sub_result.append((cx, cy))
                                cv2.rectangle(draw_img1, (x, y),
                                              (x + w, y + h), (0, 0, 255), 2)
                            '''
                            flag is 1
                            '''
                            self.draw = draw_img1
                            self.last_select = sub_result
                            self.mode = 6
                            self.pick_center = center
                            #self.center = center
                            return [sub_result, center, 6]
                        else:
                            self.draw = draw_img1
                            return None

                    else:
                        if len(length_ls) > 0:
                            for i in range(len(length_ls)):
                                x, y = length_ls[i][1]
                                ind = test_insdie((x, y), self.boxls)
                                x, y, w, h = self.boxls[ind]
                                cx, cy = self.surfacels[ind]
                                sub_result.append((cx, cy))
                                cv2.rectangle(draw_img1, (x, y),
                                              (x + w, y + h), (0, 0, 255), 2)
                            netsend(sub_result, need_unpack=True)
                            self.draw = draw_img1
                            self.last_select = sub_result
                            self.mode = 6
                            self.pick_center = center
                            #self.center = center
                            return [sub_result, center, 6]
                        else:
                            self.draw = draw_img1
                            return None
            '''
            two hand in the view
            '''
            if num_hand_view == 2:
                lcenter = result[0][0]
                ltips = result[0][1]
                lnum_tips = len(ltips)
                lradius = result[0][2]
                lbox = result[0][3]
                llabel = self.test(lbox, draw_img1)
                app = result[0][5]
                cv2.drawContours(draw_img1, [app], -1, (255, 0, 0), 1)
                for k in range(len(ltips)):
                    cv2.circle(draw_img1, ltips[k], 10, (255, 0, 0), 2)
                    cv2.line(draw_img1, ltips[k], lcenter, (255, 0, 0), 2)

                rcenter = result[1][0]
                rtips = result[1][1]
                rnum_tips = len(rtips)
                rradius = result[1][2]
                rbox = result[1][3]
                rlabel = self.test(rbox, draw_img1)
                lapp = result[1][5]
                cv2.drawContours(draw_img1, [lapp], -1, (255, 0, 0), 1)
                for k in range(len(rtips)):
                    cv2.circle(draw_img1, rtips[k], 10, (255, 0, 0), 2)
                    cv2.line(draw_img1, rtips[k], rcenter, (255, 0, 0), 2)
                # '''
                # two hand is both one finger pointing, ONLY PLACE
                # '''
                if set([lnum_tips, rnum_tips]) == set(
                    [1, 1]) and len(self.boxls) > 0 and set([llabel, rlabel
                                                             ]) == set([1, 1]):
                    self.draw = draw_img1
                    '''
                    flag is 4
                    '''
                    self.mode = 4
                    self.two_hand_mode = 4
                    self.tip_deque1.appendleft((ltips[0][0], ltips[0][1]))
                    self.tip_deque2.appendleft((rtips[0][0], rtips[0][1]))
                    self.center = [list(lcenter), list(rcenter)]
                    return [[rtips[0][0], rtips[0][1]],
                            [ltips[0][0], ltips[0][1]],
                            [list(rcenter), list(lcenter)], 4]

                elif max(set([lnum_tips, rnum_tips])) >= 2 and min(
                        set([lnum_tips, rnum_tips])) == 1 and max(
                            set([llabel, rlabel])) < 4 and self.onehand_center:
                    sub_result = max(
                        [[rcenter, rtips], [lcenter, ltips]],
                        key=lambda x: distant(x[0], self.onehand_center))
                    center = sub_result[0]
                    tips = sub_result[1]
                    self.tip_deque.appendleft((tips[0][0], tips[0][1]))
                    self.draw = draw_img1

                    if max(set([lnum_tips, rnum_tips])) == 2 and set(
                        [lnum_tips, rnum_tips]) == set([1, 2]):
                        self.mode = 1
                        self.two_hand_mode = 1
                        return [[tips[0][0], tips[0][1]], 1]
                    else:
                        self.mode = 5
                        self.two_hand_mode = 5
                        return [[tips[0][0], tips[0][1]], 5]

                elif min(set([lnum_tips, rnum_tips])) == 1 and max(
                        set([llabel, rlabel])) == 4 and self.onehand_center:
                    sub_result = max(
                        [[rcenter, rtips], [lcenter, ltips]],
                        key=lambda x: distant(x[0], self.onehand_center))
                    center = sub_result[0]
                    tips = sub_result[1]
                    self.tip_deque.appendleft((tips[0][0], tips[0][1]))
                    self.draw = draw_img1
                    self.mode = 1
                    self.two_hand_mode = 1
                    return [[tips[0][0], tips[0][1]], 1]
        self.draw = draw_img1
示例#14
0
def not_found(url):
    server.set_code(404)
    server.set_header("Content-Type", "text/html")
    return utils.cache("web/not_found.html")
示例#15
0
 def teacherCode(self, data):
     utils.init()
     coeff = binom(*data)
     self.__postprocess.write('{}\n'.format(len(utils.cache())))
     return coeff
def taxibj_evaluation():
    # parameters
    DATAPATH = '../data'
    T = 48  # number of time intervals in one day
    CACHEDATA = True  # cache data or NOT

    len_closeness = 4  # length of closeness dependent sequence
    len_period = 0  # length of peroid dependent sequence
    len_trend = 0  # length of trend dependent sequence

    nb_flow = 2  # there are two types of flows: new-flow and end-flow
    days_test = 4 * 7  # 4 weeks
    len_test = T * days_test
    len_val = 2 * len_test

    map_height, map_width = 32, 32  # grid size

    cache_folder = 'Autoencoder/model3'
    path_cache = os.path.join(DATAPATH, 'CACHE', cache_folder)  # cache path
    if CACHEDATA and os.path.isdir(path_cache) is False:
        os.mkdir(path_cache)

    # load data
    print("loading data...")
    fname = os.path.join(
        path_cache,
        'TaxiBJ_withMeteo_C{}_P{}_T{}.h5'.format(len_closeness, len_period,
                                                 len_trend))
    if os.path.exists(fname) and CACHEDATA:
        X_train_all, Y_train_all, X_train, Y_train, \
        X_val, Y_val, X_test, Y_test, mmn, external_dim, \
        timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = read_cache(
            fname, 'preprocessing_bj.pkl')
        print("load %s successfully" % fname)
    else:
        X_train_all, Y_train_all, X_train, Y_train, \
        X_val, Y_val, X_test, Y_test, mmn, external_dim, \
        timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = TaxiBJ3d.load_data(
            T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend, len_test=len_test,
            len_val=len_val, preprocess_name='preprocessing_bj.pkl', meta_data=True, meteorol_data=True, holiday_data=True, datapath=DATAPATH)
        if CACHEDATA:
            cache(fname, X_train_all, Y_train_all, X_train, Y_train, X_val,
                  Y_val, X_test, Y_test, external_dim, timestamp_train_all,
                  timestamp_train, timestamp_val, timestamp_test)

    print(external_dim)
    print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])

    # build model
    model = build_model(len_closeness,
                        len_period,
                        len_trend,
                        nb_flow,
                        map_height,
                        map_width,
                        external_dim=external_dim,
                        encoder_blocks=3,
                        filters=[64, 64, 64, 64, 16],
                        kernel_size=3,
                        num_res=2)

    model_fname = 'model3resunit_doppia_attention.TaxiBJ9.c4.p0.t0.encoderblocks_3.kernel_size_3.lr_0.0007.batchsize_16.best.h5'
    model.load_weights(os.path.join('../best_models', 'model3', model_fname))

    # evaluate and save results
    dict_multi_score = multi_step_2D(model,
                                     X_test,
                                     Y_test,
                                     mmn,
                                     len_closeness,
                                     step=5)

    for i in range(len(dict_multi_score)):
        csv_name = os.path.join('results', f'taxibj_step{i+1}.csv')
        save_to_csv(dict_multi_score[i], csv_name)
def bikenyc_evaluation():
    DATAPATH = '../data'
    T = 24  # number of time intervals in one day
    CACHEDATA = True  # cache data or NOT

    len_closeness = 4  # length of closeness dependent sequence
    len_period = 0  # length of peroid dependent sequence
    len_trend = 0  # length of trend dependent sequence

    nb_flow = 2
    days_test = 10
    len_test = T * days_test
    len_val = 2 * len_test

    map_height, map_width = 16, 8

    cache_folder = 'Autoencoder/model3'
    path_cache = os.path.join(DATAPATH, 'CACHE', cache_folder)
    if CACHEDATA and os.path.isdir(path_cache) is False:
        os.mkdir(path_cache)

    # load data
    print("loading data...")
    fname = os.path.join(
        path_cache, 'BikeNYC_C{}_P{}_T{}.h5'.format(len_closeness, len_period,
                                                    len_trend))
    if os.path.exists(fname) and CACHEDATA:
        X_train_all, Y_train_all, X_train, Y_train, \
        X_val, Y_val, X_test, Y_test, mmn, external_dim, \
        timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = read_cache(
            fname, 'preprocessing_bikenyc.pkl')
        print("load %s successfully" % fname)
    else:
        X_train_all, Y_train_all, X_train, Y_train, \
        X_val, Y_val, X_test, Y_test, mmn, external_dim, \
        timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = BikeNYC3d.load_data(
            T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend,
            len_test=len_test,
            len_val=len_val, preprocess_name='preprocessing_bikenyc.pkl', meta_data=True, datapath=DATAPATH)
        if CACHEDATA:
            cache(fname, X_train_all, Y_train_all, X_train, Y_train, X_val,
                  Y_val, X_test, Y_test, external_dim, timestamp_train_all,
                  timestamp_train, timestamp_val, timestamp_test)

    # build model
    model = build_model(len_closeness,
                        len_period,
                        len_trend,
                        nb_flow,
                        map_height,
                        map_width,
                        external_dim=external_dim,
                        encoder_blocks=2,
                        filters=[64, 64, 64, 16],
                        kernel_size=3,
                        num_res=2)

    model_fname = 'model3resunit_doppia_attention.BikeNYC6.c4.p0.t0.encoderblocks_2.kernel_size_3.lr_0.0001.batchsize_16.best2.h5'
    model.load_weights(os.path.join('../best_models', 'model3', model_fname))

    # evaluate and save results
    dict_multi_score = multi_step_2D(model,
                                     X_test,
                                     Y_test,
                                     mmn,
                                     len_closeness,
                                     step=5)

    for i in range(len(dict_multi_score)):
        csv_name = os.path.join('results', f'bikenyc_step{i+1}.csv')
        save_to_csv(dict_multi_score[i], csv_name)
示例#18
0
def index(url):
    server.set_code(200)
    server.set_header("Content-Type", "text/html")
    server.set_header("Cache-Control", "public,max-age=31536000,immutable")
    return utils.cache("web/index.html")
示例#19
0
import csv,argparse,math
import numpy as np
from utils import cache,split_add
parser = argparse.ArgumentParser(description='Simulate a cache')
parser.add_argument('-t', '--trace_file', help='Tracefile containing instructions', required=True)
parser.add_argument('-w', '--ways', help='Number of Ways', required=True,type=int)
parser.add_argument('-size', '--cache_size', help='Size of Cache in KB', required=True,type=int)
parser.add_argument('-s', '--set', help='Number of set', required=True,type=int)
parser.add_argument('-type', '--c_type', help='associativity type', required=True)
arguments = vars(parser.parse_args())


cache_1=cache(arguments['set'],arguments['ways'],arguments['cache_size'],str(arguments['c_type']))
with open(arguments['trace_file'], 'rb') as csvfile:
	param_read = csv.reader(csvfile, delimiter=' ', quotechar='|')
	time=0
	for row in param_read:
		optype=row[0] #typr of Operation
		add=str(row[1])[2:] #memory address of cache
		idx=row[2] #number of non-memory operation before this one
		blockNum,indexNum,tagNum=split_add(add,arguments['set'],arguments['ways'],arguments['cache_size'])
		cache_1.read_write(blockNum,indexNum,tagNum)

print "miss: ",cache_1.missed,"Hits:",cache_1.hits
print "Hit Rate: %d/%d"%(cache_1.missed,cache_1.time)
print "Stall Cycles: %d"%(cache_1.missed)
示例#20
0
 def studentCode(self, data):
     utils.init()
     coeff = q1.binom(*data)
     self.__postprocess.write('{}\n'.format(len(utils.cache())))
     return coeff
示例#21
0
sizes=arguments['cache_size']
sizes=sizes.split('_')
sizes=map(int,sizes)

ways=arguments['ways']
ways=ways.split('_')
ways=map(int,ways)

sets=arguments['sets']
sets=sets.split('_')
sets=map(int,sets)

a_type=arguments['c_type']
a_type=a_type.split('_')

L1=cache(sets[0],ways[0],sizes[0],a_type[0])
L2=cache(sets[1],ways[1],sizes[1],a_type[1])
L3=cache(sets[2],ways[2],sizes[2],a_type[2])


with open(arguments['trace_file'], 'rb') as csvfile:
	param_read = csv.reader(csvfile, delimiter=' ', quotechar='|')
	time=0
	for row in param_read:
		optype=row[0] #typr of Operation
		add=str(row[1])[2:] #memory address of cache
		idx=row[2] #number of non-memory operation before this one
		L1_ind=split_add(add,sets[0],ways[0],sizes[0])
		L2_ind=split_add(add,sets[1],ways[1],sizes[1])
		L3_ind=split_add(add,sets[2],ways[2],sizes[2])
		l1_hit=L1.hits