Пример #1
0
 def test_valid_email(self, email, log_file, expected_result):
     func_result = valid_email(email)
     log(
         log_file,
         f"{email} -> expected_result is {expected_result} -> actual_result is {func_result}\n"
     )
     assert func_result == expected_result
Пример #2
0
def valid_move(string):
    # good example = "a1b3"
    if len(string) != 4:
        print("invalid move - string size invalid")
        return None
    # as (y, x)
    _from = (ord(string[0]) - ord('a'), ord(string[1]) - ord('1'))
    _to = (ord(string[2]) - ord('a'), ord(string[3]) - ord('1'))
    print("from = " + str(_from))
    print("to   = " + str(_to))
    valid = range(0, 8)
    if not ((_from[0] in valid) and (_from[1] in valid)):
        print("invalid move - from characters invalid")
        return None
    if not ((_to[0] in valid) and (_to[1] in valid)):
        print("invalid move - to charaters invalid")
        return None

    ret = position_board[_from[0]][_from[1]]
    if game_board[_from[0]][_from[1]] == PAWN and (_to[0] == 0 or _to[0]
                                                   == 7):  #y is 0 or 7
        diff = _from[1] - _to[1]  # diff from x
        if diff == -1:
            ret += turn_queen_right
        elif diff == 0:
            ret += turn_queen_normal
        elif diff == 1:
            ret += turn_queen_left
        else:
            log("invalid turn queen ??")
            print("invalid turn queen ??")
    else:
        ret += position_board[_to[0]][_to[1]]
    return ret
Пример #3
0
 def __init__(self, request, response):
     """ constructor """
     
     main.log('Initializing base page.')      
     self.initialize(request, response)
     self.session_store = sessions.get_store(request=request)
     self._initialize_login()
def run(args):
	detector = dlib.get_frontal_face_detector()
	front = frontalization.Front(args)
	global predictions, running, arguments
	predictions, running = [], True
	start_time = time.clock()
	q = []

	videoDevice = ALProxy('ALVideoDevice', args.ip, args.port)
	captureDevice = videoDevice.subscribeCamera('Camera', 0, 2, 13, 10)
	image = np.zeros((480, 640, 3), np.uint8)

	classifier = Classifier(args, start_time, 3, args.resource_dir, [88, 88, 3], args.scope, '/'+args.model+'/')
	classifier.load_model()

	while running:
		image = stream2cv(image, videoDevice.getImageRemote(captureDevice))
		if not image:
			detection = detector(image, 1)
			if detection:
				rgb_image = image[detection.top():detection.bottom(), detection.left():detection.right()]
				rgb_image = cv2.resize(rgb_image, (88, 88))
				front_image = front.frontalized(rgb_image)
				prediction = classifier.classify(front_image)
				predictions.append(prediction)
				main.log(args, '\n{:.5f}s'.format(time.clock() - start_time) + ' Prediction: ' + str(prediction))
				main.log(args, '\n{:.5f}s'.format(time.clock() - start_time) + ' Averages: ' + str(predictions))
def normalize(args, start_time, dirs=[]):
    if len(dirs) == 0:
        for i in [args.training_dir, args.testing_dir]:
            for j in ['rgb/', 'lbp/', 'frgb/', 'flbp/']:
                dirs.append(i + '/' + j)
    for dir in dirs:
        minimum, num_files = 1000000, []
        for folder in os.listdir(dir):
            path = dir + '/' + folder
            files = len([
                f for f in os.listdir(path)
                if os.path.isfile(os.path.join(path, f))
            ])
            num_files.append([files, path])
            main.log(
                args, '{:.5f}'.format(time.clock() - start_time) + 's ' +
                path + ' has ' + str(files) + ' files')
            if files < minimum:
                minimum = files
        main.log(
            args, '{:.5f}'.format(time.clock() - start_time) + 's minimum = ' +
            str(minimum))

        for i in range(len(num_files)):
            while num_files[i][0] > minimum:
                os.remove(num_files[i][1] + '/' +
                          random.choice(os.listdir(num_files[i][1])))
                num_files[i][0] -= 1
def run(args):
    detector = dlib.get_frontal_face_detector()
    front = frontalization.Front(args)
    global predictions, running, arguments
    predictions, running = [], True
    start_time = time.clock()
    q = []

    video = cv2.VideoCapture()

    classifier = Classifier(args, start_time, 3, args.resource_dir,
                            [88, 88, 3], args.scope, '/' + args.model + '/')
    classifier.load_model()

    if video.grab():
        while running:
            _, image = video.read()
            detection = detector(image, 1)
            if detection:
                rgb_image = image[detection.top():detection.bottom(),
                                  detection.left():detection.right()]
                rgb_image = cv2.resize(rgb_image, (88, 88))
                front_image = front.frontalized(rgb_image)
                prediction = classifier.classify(front_image)
                predictions.append(prediction)
                main.log(
                    args, '\n{:.5f}s'.format(time.clock() - start_time) +
                    ' Prediction: ' + str(prediction))
                main.log(
                    args, '\n{:.5f}s'.format(time.clock() - start_time) +
                    ' Averages: ' + str(predictions))
Пример #7
0
 def handle_error():
     text = "".join(cached)[:100000]
     if text:
         text = "An unexpected error has occurred:\n\n%s" % text
         main.log(text)
         report_error(text)
     del cached[:]
def run(args):
	global averages, running
	averages, running = True, []
	start_time = time.clock()
	video = cv2.VideoCapture()
	q = []

	detector = dlib.get_frontal_face_detector()
	front = frontalization.Front(args)

	classifiers = []
	classifiers.append(Classifier(args, start_time, args.classes, args.resource_dir, (150,150,3), colour=True, local=False))
	classifiers.append(Classifier(args, start_time, args.classes, args.resource_dir, (150, 150), colour=False, local=False))
	classifiers.append(Classifier(args, start_time, args.classes, args.resource_dir, (150,150,3), colour=True, local=False))
	classifiers.append(Classifier(args, start_time, args.classes, args.resource_dir, (150,150,3), colour=True, local=True))
	classifiers.append(Classifier(args, start_time, args.classes, args.resource_dir, (150, 150), colour=False, local=False))
	classifiers.append(Classifier(args, start_time, args.classes, args.resource_dir, (150, 150), colour=False, local=True))

	if video.grab():
		while running:
			_, frame = video.read()
			image = cv2.resize(frame, (150, 150, frame.shape[2]))
			detection = detector(image, 1)
			if detection:
				rgb_image = frame[detection.top():detection.bottom(), detection.left():detection.right()]
				lbp_image = feature.local_binary_pattern(cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY).astype(np.float64), 8, 1, 'uniform')
				frgb_image = front.frontalized(image)
				flbp_image = feature.local_binary_pattern(cv2.cvtColor(frgb_image, cv2.COLOR_BGR2GRAY).astype(np.float64), 8, 1, 'uniform')

				classifications = []
				classifiers[0].classify(rgb_image, args.log + 'rgb')
				classifiers[1].classify(lbp_image, (args.log + 'lbp'))
				classifiers[2].classify(frgb_image, (args.log + 'frgb'))
				classifiers[3].classify(frgb_image, (args.log + 'lfrgb'))
				classifiers[4].classify(flbp_image, (args.log + 'flbp'))
				classifiers[5].classify(flbp_image, (args.log + 'lflbp'))

				result = np.zeros(args.classes)
				for classification in classifications:
					for i in range(len(classification)):
						result[i] += classification[i] / 6

				temp = []
				if len(q) < 10:
					q.insert(0, result)
				else:
					q.pop()
					q.insert(0, result)
				for i in range(len(q[0])):
					average = 0
					for j in range(len(q)):
						average += q[j][i] / 10
					temp.append(average)
				averages = temp
				main.log(args, averages)
			else:
				main.log(args, 'No Face Found', True)
			if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
				raw_input()
				break
Пример #9
0
def board_move(string):
    global game_board
    # y, x
    _from = str_to_yx(string[0])
    _to = str_to_yx(string[1])
    move_side = -playing_as if game_board[_from[0]][
        _from[1]] < 0 else playing_as
    # if 1 if its the same as mine, else -1
    if _to == None:
        if string[1] == turn_queen_normal:
            _to = (_from[0] - move_side, _from[1])
            game_board[_from[0]][_from[1]] = QUEEN * move_side * playing_as
        elif string[1] == turn_queen_left:
            _to = (_from[0] - move_side, _from[1] - move_side)
            game_board[_from[0]][_from[1]] = QUEEN * move_side * playing_as
        elif string[1] == turn_queen_right:
            _to = (_from[0] - move_side, _from[1] + move_side)
            game_board[_from[0]][_from[1]] = QUEEN * move_side * playing_as
        else:
            log("someone choose to not take queen" + string)
            print("someone choose to not take queen" + string)
    if abs(game_board[_from[0]][_from[1]]) == KING:
        if abs(_to[1] - _from[1]) == 2:
            sgn = -1 if (_to[1] - _from[1]) < 0 else 1
            if _to[1] <= 2:
                _from_y = 0
            elif 5 <= _to[1]:
                _from_y = 7
            board_quick_move((_to[0], _from_y), (_to[0], _to[1] - sgn))

    board_quick_move(_from, _to)
Пример #10
0
def export_chats(chats, path, format, db, messages=None, skip=True, progress=None):
    """
    Exports the specified chats from the database under path.

    @param   chats     list of chat dicts, as returned from SkypeDatabase
    @param   path      full path of directory where to save
    @param   format    export format (html|txt|xlsx|csv|filename.ext).
                       If format is filename.ext, a single file is created:
                       for single chat exports and multi chat XLSX exports
                       (for multi-file exports, filenames are named by chats).
    @param   db        SkypeDatabase instance
    @param   messages  list messages to export if a single chat
    @param   skip      whether to skip chats with no messages
    @param   progress  function called before exporting each chat, with the
                       number of messages exported so far
    @return            (list of exported filenames, number of chats exported)
    """
    files, count = [], 0
    def make_filename(chat):
        if len(format) > 4: # Filename already given in format
            filename = os.path.join(path, format)
        else:
            args = collections.defaultdict(str); args.update(chat)
            filename = "%s.%s" % (conf.ExportChatTemplate % args, format)
            filename = os.path.join(path, util.safe_filename(filename))
            filename = util.unique_path(filename)
        return filename
    main.logstatus("Exporting %s from %s %sto %s.",
                   util.plural("chat", chats), db.filename,
                   "" if len(format) > 4 else "as %s " % format.upper(),
                   format if len(format) > 4 else path)

    if format.lower().endswith(".xlsx"):
        filename = make_filename(chats[0])
        count = export_chats_xlsx(chats, filename, db, messages, skip, progress)
        files.append(filename)
    else:
        if not os.path.exists(path):
            os.makedirs(path)
        export_func = (export_chats_xlsx if format.lower().endswith("xlsx")
                       else export_chat_csv if format.lower().endswith("csv")
                       else export_chat_template)
        message_count = 0
        for chat in chats:
            if skip and not messages and not chat["message_count"]:
                main.log("Skipping exporting %s: no messages.",
                         chat["title_long_lc"])
                if progress: progress(message_count)
                continue # continue for chat in chats
            main.status("Exporting %s.", chat["title_long_lc"])
            if progress: progress(message_count)
            filename = make_filename(chat)
            msgs = messages or db.get_messages(chat)
            chatarg = [chat] if "xlsx" == format.lower() else chat
            export_func(chatarg, filename, db, msgs)
            message_count += chat["message_count"]
            files.append(filename)
        count = len(files)
    return (files, count)
Пример #11
0
 def session(self):
     # Returns a session using the default cookie key.
     session = None
     if (self.session_store):
         session = self.session_store.get_session()
     else:
         main.log('Session store not found.')       
     return session
Пример #12
0
 def get(self):
     email = self.request.get('email')
     name = self.request.get('name')
     nick = self.request.get('nick')
     main.log('User '+ nick +' wants to login: '******'user_email'] = email
     self.redirect('/')
Пример #13
0
    def get(self):
        # self.response.headers['Content-Type'] = 'text/plain'
        uid = self.request.get("uid",None)
        dim_two_level_one = self.request.get("id",None)
        
        ##### FOR LOAD TESTING ONLY #######
        # generates the uid and dim2l1 id at random
        if not uid:
            uid = uuid.uuid4()
        if not dim_two_level_one:
            dim_two_level_one = DimensionTwoLevelOne.key_from_key_name("k:%02d:%02d"%(random.randint(0,9),random.randint(0,9)))
        
        # v1 = self.response.get("k1")
        # v2 = self.response.get("k1")

        logging.info("instance_id: %s"%instance_id)
        t = time.time()
        
        request_id = "%s:%s:%s"%(t,uid,dim_two_level_one)
        
        # 90 percent of the time we attach information to the request
        if random.randint(0,9) != 0:
            # level3:level2:level1
            dim_one_level_one_key_name = "k:%s:%s:%s"%(random.randint(0,9),random.randint(0,9),random.randint(0,9)) 
            dim_one_level_one = str(DimensionOneLevelOne.key_from_key_name(dim_one_level_one_key_name))
        else:
            dim_one_level_one_key_name = None
            dim_one_level_one = None
                
        
        logging_params = {"uid":uid,"dim2l1":dim_two_level_one,"t":t,"req":request_id}
        if dim_one_level_one:
            imp_id = request_id + ":" + str(dim_one_level_one)
            logging_params.update(dim1l1=dim_one_level_one_key_name,imp=imp_id)
            
        if self.request.get("local","0") == "0":
            deadline = self.request.get("deadline")
            deadline = float(deadline) if deadline else None    
            rpc = urlfetch.create_rpc(deadline=deadline) 
            data = urllib.urlencode(logging_params)
            url = "http://eventrackerscaletest.appspot.com/log/one"
            url = "http://localhost:8081/log/one"
            logging.info("URL: %s with deadline: %s"%(url+"?"+data,deadline))
            urlfetch.make_fetch_call(rpc,url+"?"+data)
            try:
                result = rpc.get_result()
                if result.status_code == 200:
                    html = result.content
                else:
                    html = "ERROR"    
            except urlfetch.DownloadError:
                html = None
            html = "WHO CARES"
        else:
            log(**logging_params)        
            html = "LOGGED"
            
        self.response.out.write('<html><head/><body><b>Hello, webapp World! %s %s %s %s <br/> %s <br/> %s </b></body></html>'%(request_id,dim_two_level_one,uid,t,logging_params, html))
Пример #14
0
def test_log(mock_datetime, mock_sensor, mock_controller):
    mock_sensor.value = 10
    mock_controller.value = 0
    t = datetime.datetime.now()
    mock_datetime.datetime.now.return_value = t
    with mock.patch("main.open", mock.mock_open()) as mock_open:
        main.log(mock_sensor, mock_controller)

        mock_open().write.assert_called_with(f"{t},10,0\n")
Пример #15
0
	def train(self, training_data, testing_data):
		x, y = [m[0] for m in training_data], [n[1] for n in training_data]
		monitor = MonitorCallback(self.args, self.start_time)
		self.model.fit(x, y, n_epoch=self.args.epochs, validation_set=0.1, shuffle=True, show_metric=True, batch_size=self.args.batch_size, snapshot_step=2000, snapshot_epoch=True,
					   run_id=self.args.model_name, callbacks=monitor)
		main.log(self.args, '{:.5f}s '.format(time.clock() - self.start_time) + str(self.count_trainable_vars()) + ' trainable parameters')
		self.model.save(self.save_path + 'model.model')
		predictions, labels = self.evaluate(testing_data)
		self.confusion_matrix(self.args, predictions, labels)
Пример #16
0
def run_network(args):
	global running
	try:
		ws = websocket.WebSocketApp(args.address, on_message=on_data, on_error=on_error, on_close=on_close)
		ws.on_open = on_open
		ws.run_forever()
		running = False
	except Exception as err:
		main.log(args, err.message, True)
Пример #17
0
 def save_best(self, eid):
     data_path = os.path.join(parent_path, "training_data")
     folder = os.path.join(data_path, self.name + "_data")
     filename = "{}/epoch".format(folder)
     self.saver.save(self.session, filename, global_step=eid)
     os.unlink("%s-%d.meta" % (filename, eid))
     file_name = os.path.join(parent_path,
                              self.name + "-best-evaluation.txt")
     log("best meta-optimizer up to now: eid = {}".format(eid), file_name)
     print self.name, "saved."
Пример #18
0
def send_report(content, type, screenshot=""):
    """Posts feedback or error data to the report web service."""
    global url_opener
    try:
        data = {"content": content.encode("utf-8"), "type": type,
                "screenshot": base64.b64encode(screenshot),
                "version": "%s-%s" % (conf.Version, get_install_type())}
        url_opener.open(conf.ReportURL, urllib.urlencode(data))
        main.log("Sent %s report to %s (%s).", type, conf.ReportURL, content)
    except:
        main.log("Failed to send %s to %s.\n\n%s", type, conf.ReportURL,
                 traceback.format_exc())
Пример #19
0
    def updateSliders(self):
        power = self.main.serialGet("power?\n", 150)
        degrees = self.main.serialGet("degrees?\n", 150)

        if not (power and degrees):
            main.log("Error getting values")
            return
        power = int(power)
        degrees = int(degrees)
        self.horizontalSlider_power.setValue(power)
        self.horizontalSlider_degrees.setValue(degrees)
        self.label_power.setNum(power)
        self.label_range.setNum(degrees)
Пример #20
0
def download_and_install(url):
    """
    Downloads and launches the specified file.
    """
    global update_window, url_opener
    try:
        is_cancelled = False
        parent = wx.GetApp().TopWindow
        filename, tmp_dir = os.path.split(url)[-1], tempfile.mkdtemp()
        dlg_progress = \
            controls.ProgressWindow(parent, "Downloading %s" % filename)
        dlg_progress.SetGaugeForegroundColour(conf.GaugeColour)
        dlg_progress.Position = (parent.Position.x + parent.Size.width -
                                 dlg_progress.Size.width, parent.Position.y +
                                 parent.Size.height - dlg_progress.Size.height)
        update_window = dlg_progress
        urlfile = url_opener.open(url)
        filepath = os.path.join(tmp_dir, filename)
        main.log("Downloading %s to %s.", url, filepath)
        filesize = int(urlfile.headers.get("content-length", sys.maxint))
        with open(filepath, "wb") as f:
            BLOCKSIZE = 65536
            bytes_downloaded = 0
            buf = urlfile.read(BLOCKSIZE)
            while len(buf):
                f.write(buf)
                bytes_downloaded += len(buf)
                percent = 100 * bytes_downloaded / filesize
                msg = "%d%% of %s" % (percent, util.format_bytes(filesize))
                is_cancelled = not dlg_progress.Update(percent, msg)
                if is_cancelled:
                    break  # break while len(buf)
                wx.YieldIfNeeded()
                buf = urlfile.read(BLOCKSIZE)
        dlg_progress.Destroy()
        update_window = None
        if is_cancelled:
            main.log("Upgrade cancelled, erasing temporary file %s.", filepath)
            util.try_until(lambda: os.unlink(filepath))
            util.try_until(lambda: os.rmdir(tmp_dir))
        else:
            main.log("Successfully downloaded %s of %s.",
                     util.format_bytes(filesize), filename)
            dlg_proceed = controls.NonModalOKDialog(
                parent, "Update information",
                "Ready to open %s. You should close %s before upgrading." %
                (filename, conf.Title))

            def proceed_handler(event):
                global update_window
                update_window = None
                dlg_proceed.Destroy()
                util.start_file(filepath)

            update_window = dlg_proceed
            dlg_proceed.Bind(wx.EVT_CLOSE, proceed_handler)
    except Exception:
        main.log("Failed to download new version from %s.\n\n%s", url,
                 traceback.format_exc())
Пример #21
0
def export_chats_xlsx(chats,
                      filename,
                      db,
                      messages=None,
                      skip=True,
                      progress=None):
    """
    Exports the chats to a single XLSX file with chats on separate worksheets.

    @param   chats     list of chat data dicts, as returned from SkypeDatabase
    @param   filename  full path and filename of resulting file
    @param   db        SkypeDatabase instance
    @param   messages  list of messages to export if a single chat
    @param   skip      whether to skip chats with no messages
    @param   progress  function called before exporting each chat, with the
                       number of messages exported so far
    @return            number of chats exported
    """
    count, style = 0, {0: "timestamp", 2: "wrap", 3: "hidden"}

    writer = xlsx_writer(filename, autowrap=[2])
    message_count = 0
    for chat in chats:
        if skip and not messages and not chat["message_count"]:
            main.log("Skipping exporting %s: no messages.",
                     chat["title_long_lc"])
            continue  # continue for chat in chats
        main.status("Exporting %s.", chat["title_long_lc"])
        if progress: progress(message_count)
        parser = skypedata.MessageParser(db, chat=chat, stats=False)
        writer.add_sheet(chat["title"])
        writer.set_header(True)
        writer.writerow(["Time", "Author", "Message", "Skype Name"],
                        {3: "boldhidden"})
        writer.set_header(False)
        msgs = messages or db.get_messages(chat)
        for m in msgs:
            text = parser.parse(m, output={"format": "text"})
            try:
                text = text.decode("utf-8")
            except UnicodeError:
                pass
            values = [m["datetime"], m["from_dispname"], text, m["author"]]
            style[1] = "local" if db.id == m["author"] else "remote"
            writer.writerow(values, style)
        message_count += chat["message_count"]
        count += 1
    writer.close()
    return count
Пример #22
0
 def _initialize_login(self):
     main.log('Initializing login from session.')   
         
     try:
         login_email = self.session['user_email']
         self.logged = webuser.WebUser.fromEmail(login_email)
         main.log('Login object built from email: ' + str(self.logged))
     except KeyError:
         main.log('Login object NOT FOUND')
         self.logged = webuser.WebUser()
         
     if self.logged.is_logged:
         main.log('Logged in with ' + self.logged.nick)
     else:
         main.log('User not logged in')
def save_image(args, start_time, save, data, type, images=True):
	detector, count = dlib.get_frontal_face_detector(), 0
	front = frontalization.Front(args)
	for image_file in data:
		if images:
			image = cv2.imread(image_file[0], cv2.IMREAD_COLOR)
		else:
			image = image_file[0]
		detections = detector(image, 1)
		for _, detection in enumerate(detections):
			#try:
			face = image[detection.top():detection.bottom(), detection.left():detection.right()]
			if face.shape[0] < 10 or face.shape[1] < 10:
				continue
			face = cv2.resize(face, (88, 88))
			images = []
			images.append(face)
			images.append(hue(face, 5))
			images.append(hue(face, -5))
			images.append(noisy('sp', images[0]))
			images.append(noisy('gauss', images[0]))
			images.append(hue(noisy('sp', images[0]), 5))
			images.append(hue(noisy('sp', images[0]), -5))
			images.append(hue(noisy('gauss', images[0]), 5))
			images.append(hue(noisy('gauss', images[0]), -5))
			for _image in images:
				if images:
					cv2.imwrite(save + '/rgb/' + str(image_file[1]) + '/' + str(count) + '.jpg', _image)
				else:
					cv2.imwrite(save + '/rgb/' + str(image_file[1]) + '/' + str(count) + '.jpg', _image)
				#lbp_image = feature.local_binary_pattern(cv2.cvtColor(_image, cv2.COLOR_BGR2GRAY).astype(np.float64), 8, 1, 'uniform').astype(np.uint8)
				#lbp_image *= (255 / lbp_image.max())
				#cv2.imwrite(save + '/lbp/' + str(image_file[1]) + '/' + str(count + 1) + '.jpg', lbp_image)
				frgb_image = front.frontalized(image)
				if frgb_image is None:
					continue
				if frgb_image.shape[0] < 10 or frgb_image.shape[1] < 10:
					continue
				cv2.imwrite(save + '/frgb/' + str(image_file[1]) + '/' + str(count + 2) + '.jpg', cv2.resize(frgb_image, (88,88)))
				#flbp_image = feature.local_binary_pattern(cv2.cvtColor(frgb_image, cv2.COLOR_BGR2GRAY).astype(np.float64), 8, 1, 'uniform').astype(np.uint8)
				#flbp_image *= (255 / flbp_image.max())
				#cv2.imwrite(save + '/flbp/' + str(image_file[1]) + '/' + str(count + 3) + '.jpg', flbp_image)
				count += 4
				if count % 100 == 0:
					main.log(args, '{:.5f}'.format(time.clock() - start_time) + 's ' + str(count) + ' ' + type +' images extracted')
			#except:
				#pass
	main.log(args, str(time.clock() - start_time) + ' ' + type + ' Images Extracted')
Пример #24
0
def download_and_install(url):
    """
    Downloads and launches the specified file.
    """
    global update_window, url_opener
    try:
        is_cancelled = False
        parent = wx.GetApp().TopWindow
        filename, tmp_dir = os.path.split(url)[-1], tempfile.mkdtemp()
        dlg_progress = \
            controls.ProgressWindow(parent, "Downloading %s" % filename)
        dlg_progress.SetGaugeForegroundColour(conf.GaugeColour)
        dlg_progress.Position = (
            parent.Position.x + parent.Size.width  - dlg_progress.Size.width,
            parent.Position.y + parent.Size.height - dlg_progress.Size.height)
        update_window = dlg_progress
        urlfile = url_opener.open(url)
        filepath = os.path.join(tmp_dir, filename)
        main.log("Downloading %s to %s.", url, filepath)
        filesize = int(urlfile.headers.get("content-length", sys.maxint))
        with open(filepath, "wb") as f:
            BLOCKSIZE = 65536
            bytes_downloaded = 0
            buf = urlfile.read(BLOCKSIZE)
            while len(buf):
                f.write(buf)
                bytes_downloaded += len(buf)
                percent = 100 * bytes_downloaded / filesize
                msg = "%d%% of %s" % (percent, util.format_bytes(filesize))
                is_cancelled = not dlg_progress.Update(percent, msg)
                if is_cancelled:
                    break # break while len(buf)
                wx.YieldIfNeeded()
                buf = urlfile.read(BLOCKSIZE)
        dlg_progress.Destroy()
        update_window = None
        if is_cancelled:
            main.log("Upgrade cancelled, erasing temporary file %s.", filepath)
            util.try_until(lambda: os.unlink(filepath))
            util.try_until(lambda: os.rmdir(tmp_dir))
        else:
            main.log("Successfully downloaded %s of %s.",
                     util.format_bytes(filesize), filename)
            dlg_proceed = controls.NonModalOKDialog(parent,
                "Update information",
                "Ready to open %s. You should close %s before upgrading."
                % (filename, conf.Title))
            def proceed_handler(event):
                global update_window
                update_window = None
                dlg_proceed.Destroy()
                util.start_file(filepath)
            update_window = dlg_proceed
            dlg_proceed.Bind(wx.EVT_CLOSE, proceed_handler)
    except Exception:
        main.log("Failed to download new version from %s.\n\n%s", url,
                 traceback.format_exc())
Пример #25
0
def grid_search():
    # get gold rankings and store in structure
    with open(GOLD_RANKINGS_FILENAME) as goldFile:
        gold_rankings = getSystemRankings(goldFile)

    # make sure the necessary files are loaded
    main.load_the_files(opt=True)

    # get the normalization coefficients
    if NORMALIZE:
        swiki_max = max(main.unigram_frequencies.values())
        #ilen_max = max(list(map(len, unigram_frequencies.keys())))
        ilen_max = 28   # too much garbage, hard to normalize
                        # set to 28, for "antidisestablishmentarianism"
        #wnet_max = max([get_wordnet_score(w) for w in unigram_frequencies])
        wnet_max = 75   # for "break", given by the above expression
    else:
        swiki_max = ilen_max = wnet_max = 1.0

    ilen_range  = [1.0, 10.0, 100.0, 1000.0, 100000.0, 1000000.0, 10000000.0]
    wnet_range  = [1.0, 10.0, 100.0, 1000.0, 100000.0, 1000000.0, 10000000.0]
    swiki_range = [1.0, 10.0, 100.0, 1000.0, 100000.0, 1000000.0, 10000000.0]
    cv_range    = [1.0, 10.0, 100.0, 1000.0, 100000.0, 1000000.0, 10000000.0]
    results = Counter()

    i = 0
    for ilen_weight in ilen_range:
        for wnet_weight in wnet_range:
            for swiki_weight in swiki_range:

                i += 1
                log("iteration %d/%d"%(i,7*7*7))
                weights = [ilen_weight/ilen_max,
                           wnet_weight/wnet_max,
                           swiki_weight/swiki_max,
                           0.0]
                stream = io.StringIO()
                main.main( weights=weights, output_stream=stream, opt=True )
                stream.seek(0)
                system_rankings = getSystemRankings(stream)
                score = getScore(system_rankings, gold_rankings)
                results[tuple(weights)] = score
                print("Normalized system score for "+str(weights)+":", score)

    log("done")
    for res in results.most_common(10):
        print(str(res)+"\t\t"+str(results[res]))
Пример #26
0
	def run(self):
		if self.id == 0:
			main.log(self.args, 'Starting ' + str(self.id))
			run(self.args)
			main.log(self.args, 'Exiting ' + str(self.id))
		elif self.id == 1:
			main.log(self.args, 'Starting ' + str(self.id))
			run_network(self.args)
			main.log(self.args, 'Exiting ' + str(self.id))
def build_structure(args, start_time, classes):
	for folder in [args.training_dir, args.testing_dir]:
		if not os.path.exists(folder):
			os.makedirs(folder)
			os.makedirs(folder + '/rgb')
			for i in range(classes):
				os.makedirs(folder + '/rgb/' + str(i))
			# os.makedirs(folder + '/lbp')
			# for i in range(classes):
			# 	os.makedirs(folder + '/lbp/' + str(i))
			os.makedirs(folder + '/frgb')
			for i in range(classes):
				os.makedirs(folder + '/frgb/' + str(i))
			# os.makedirs(folder + '/flbp')
			# for i in range(classes):
			# 	os.makedirs(folder + '/flbp/' + str(i))
	main.log(args, '{:.5f}'.format(time.clock() - start_time) + 's Folder Structure Built')
Пример #28
0
def export_chats_xlsx(chats, filename, db, messages=None, skip=True, progress=None):
    """
    Exports the chats to a single XLSX file with chats on separate worksheets.

    @param   chats     list of chat data dicts, as returned from SkypeDatabase
    @param   filename  full path and filename of resulting file
    @param   db        SkypeDatabase instance
    @param   messages  list of messages to export if a single chat
    @param   skip      whether to skip chats with no messages
    @param   progress  function called before exporting each chat, with the
                       number of messages exported so far
    @return            number of chats exported
    """
    count, style = 0, {0: "timestamp", 2: "wrap", 3: "hidden"}

    writer = xlsx_writer(filename, autowrap=[2])
    message_count = 0
    for chat in chats:
        if skip and not messages and not chat["message_count"]:
            main.log("Skipping exporting %s: no messages.",
                     chat["title_long_lc"])
            continue # continue for chat in chats
        main.status("Exporting %s.", chat["title_long_lc"])
        if progress: progress(message_count)
        parser = skypedata.MessageParser(db, chat=chat, stats=False)
        writer.add_sheet(chat["title"])
        writer.set_header(True)
        writer.writerow(["Time", "Author", "Message", "Skype Name"],
                        {3: "boldhidden"})
        writer.set_header(False)
        msgs = messages or db.get_messages(chat)
        for m in msgs:
            text = parser.parse(m, output={"format": "text"})
            try:
                text = text.decode("utf-8")
            except UnicodeError: pass
            values = [m["datetime"], m["from_dispname"], text, m["author"]]
            style[1] = "local" if db.id == m["author"] else "remote"
            writer.writerow(values, style)
        message_count += chat["message_count"]
        count += 1
    writer.close()
    return count
Пример #29
0
def send_report(content, type, screenshot=""):
    """
    Posts feedback or error data to the report web service.
    
    @return    True on success, False on failure
    """
    global url_opener
    try:
        data = {"content": content.encode("utf-8"), "type": type,
                "screenshot": base64.b64encode(screenshot),
                "version": "%s-%s" % (conf.Version, get_install_type())}
        url_opener.open(conf.ReportURL, urllib.urlencode(data))
        main.log("Sent %s report to %s (%s).", type, conf.ReportURL, content)
        result = True
    except Exception:
        main.log("Failed to send %s to %s.\n\n%s", type, conf.ReportURL,
                 traceback.format_exc())
        result = False
    return result
Пример #30
0
    def run(self):
        self._is_running = True
        while self._is_running:
            search = self._queue.get()
            self._stop_work = False
            self._drop_results = False
            found = {}  # { Skype handle: 1, }
            result = {"search": search, "results": []}
            if search and search["handler"]:
                for value in search["values"]:
                    main.log("Searching Skype contact directory for '%s'.",
                             value)

                    try:
                        for user in search["handler"].search_users(value):
                            if user.Handle not in found:
                                result["results"].append(user)
                                found[user.Handle] = 1

                            if not (self._drop_results
                                    or len(result["results"]) %
                                    conf.SearchContactsChunk):
                                self.postback(result)
                                result = {"search": search, "results": []}

                            if self._stop_work:
                                break  # break for user in search["handler"].searc..
                    except Exception:
                        main.log("Error searching Skype contacts:\n\n%s",
                                 traceback.format_exc())

                    if result["results"] and not self._drop_results:
                        self.postback(result)
                        result = {"search": search, "results": []}

                    if self._stop_work:
                        break  # break for value in search["values"]

                if not self._drop_results:
                    result["done"] = True
                    self.postback(result)
Пример #31
0
def adminParam(param, state):
    if param == "rem":
        main.Clear_Old_Record_in_DB()
        files_utils.RemoveAllDocs()
    elif param == "ret":
        main.Clear_Old_Record_in_DB()
        files_utils.RemoveAllDocs()
        main.Insert_New_Docs()
    else:
        if state == "ignore":
            main.log("ignoring: " + param)
            main.Ignore_Document(param)
            print("done")
        elif state == "unignore":
            main.log("Unignoring: " + param)
            main.UnIgnore_Document(param)
            print("done")
        else:
            print("wait")

    return redirect("/admin.html")
Пример #32
0
    def run(self):
        self._is_running = True
        while self._is_running:
            search = self._queue.get()
            self._stop_work = False
            self._drop_results = False
            found = {} # { Skype handle: 1, }
            result = {"search": search, "results": []}
            if search and search["handler"]:
                for i, value in enumerate(search["values"]):
                    main.log("Searching Skype contact directory for '%s'.",
                             value)

                    try:
                        for user in search["handler"].search_users(value):
                            if user.Handle not in found:
                                result["results"].append(user)
                                found[user.Handle] = 1

                            if not (self._drop_results 
                            or len(result["results"]) % conf.SearchContactsChunk):
                                self.postback(result)
                                result = {"search": search, "results": []}

                            if self._stop_work:
                                break # break for user in search["handler"].searc..
                    except Exception as e:
                        main.log("Error searching Skype contacts:\n\n%s",
                                 traceback.format_exc())

                    if result["results"] and not self._drop_results:
                        self.postback(result)
                        result = {"search": search, "results": []}

                    if self._stop_work:
                        break # break for i, value in enumerate(search_values)

                if not self._drop_results:
                    result["done"] = True
                    self.postback(result)
Пример #33
0
	def confusion_matrix(args, predictions, labels):
		y_actu = np.zeros(len(labels))
		for i in range(len(labels)):
			for j in range(len(labels[i])):
				if labels[i][j] == 1.00:
					y_actu[i] = j
		y_pred = np.zeros(len(predictions))
		for i in range(len(predictions)):
			y_pred[i] = np.argmax(predictions[i])

		p_labels = pd.Series(y_pred)
		t_labels = pd.Series(y_actu)
		df_confusion = pd.crosstab(t_labels, p_labels, rownames=['Actual'], colnames=['Predicted'], margins=True)
		main.log(args, '\nAccuracy = ' + str(accuracy_score(y_true=y_actu, y_pred=y_pred, normalize=True)) + '\n')
		main.log(args, df_confusion)
		main.log(args, ' ')
		main.log(args, classification_report(y_actu, y_pred))
Пример #34
0
    def run(self):
        self._is_running = True
        while self._is_running:
            search = self._queue.get()
            self._stop_work = False
            self._drop_results = False
            found = {}  # { Skype handle: 1, }
            result = {"search": search, "results": []}
            if search and search["handler"]:
                for i, value in enumerate(search["values"]):
                    main.log("Searching Skype contact directory for '%s'.",
                             value)

                    for user in search["handler"].search_users(value):
                        if user.Handle not in found:
                            result["results"].append(user)
                            found[user.Handle] = 1

                        if not (self._drop_results or len(result["results"]) %
                                conf.ContactResultsChunk):
                            self.postback(result)
                            result = {"search": search, "results": []}

                        if self._stop_work:
                            break  # break for user in search["handler"].searc..

                    if result["results"] and not self._drop_results:
                        self.postback(result)
                        result = {"search": search, "results": []}

                    if self._stop_work:
                        break  # break for i, value in enumerate(search_values)

                if not self._drop_results:
                    result["done"] = True
                    self.postback(result)
Пример #35
0
def train(args):
    start_time = time.clock()
    #rgb_data = [retrieve_data(args.training_dir + '/rgb'), retrieve_data(args.testing_dir + '/rgb')]
    front_data = [
        retrieve_data(args.training_dir + '/frgb'),
        retrieve_data(args.testing_dir + '/frgb')
    ]

    #main.log(args, '\n---------- DeXpression, No Frontalization ----------')
    #Dexrgb = Classifier(args, start_time, len(rgb_data[0][0][1]), args.resource_dir, rgb_data[0][0][0].shape, '/Dexrgb/', 'DeXpression')
    #Dexrgb.train(rgb_data[0], rgb_data[1])

    main.log(args, '\n---------- DeXpression, Frontalization ----------')
    Dexfront = Classifier(args, start_time, len(front_data[0][0][1]),
                          args.resource_dir, front_data[0][0][0].shape,
                          '/Dexfront/', 'DeXpression')
    Dexfront.train(front_data[0], front_data[1])

    #main.log(args, '\n---------- DeepFace, No Frontalization ----------')
    #Deeprgb = Classifier(args, start_time, len(rgb_data[0][0][1]), args.resource_dir, rgb_data[0][0][0].shape, '/Deeprgb/', 'DeepFace')
    #Deeprgb.train(rgb_data[0], rgb_data[1])

    main.log(args, '\n---------- DeepFace, Frontalization ----------')
    Deepfront = Classifier(args, start_time, len(front_data[0][0][1]),
                           args.resource_dir, front_data[0][0][0].shape,
                           '/Deepfront/', 'DeepFace')
    Deepfront.train(front_data[0], front_data[1])

    #main.log(args, '\n---------- Song, No Frontalization ----------')
    #Songrgb = Classifier(args, start_time, len(rgb_data[0][0][1]), args.resource_dir, rgb_data[0][0][0].shape, '/Songrgb/', 'Song')
    #Songrgb.train(rgb_data[0], rgb_data[1])

    main.log(args, '\n---------- Song, Frontalization ----------')
    Songfront = Classifier(args, start_time, len(front_data[0][0][1]),
                           args.resource_dir, front_data[0][0][0].shape,
                           '/Songfront/', 'Song')
    Songfront.train(front_data[0], front_data[1])

    main.log(
        args,
        '\n Completed Comparison in ' + str(time.clock() - start_time) + 's\n')
Пример #36
0
 def queue_events(self, timeout):
   time.sleep(timeout)
   if not _paused():
     new_snapshot = self._make_snapshot(self.watch.path)
     created, deleted, modified = self._snapshot.diff(new_snapshot)
     self._snapshot = new_snapshot
     
     if deleted:
       log("poller: delete event appeared in %s" % deleted)
     if created:
       log("poller: create event appeared in %s" % created)
     if modified and not(deleted or created):
       log("poller: modify event appeared in %s" % modified)
     
     if modified or deleted:
       self.queue_event(DirDeletedEvent(self.watch.path + '*'))
     if modified or created:
       self.queue_event(DirCreatedEvent(self.watch.path + '*'))
Пример #37
0
def hill_climbing_search():

    log("Started at %s"%time.ctime())
    # get gold rankings and store in structure
    with open(GOLD_RANKINGS_FILENAME) as goldFile:
        global gold_rankings
        gold_rankings = getSystemRankings(goldFile)

    # make sure the necessary files are loaded
    main.load_the_files(opt=True)

    # get the normalization coefficients
    if NORMALIZE:
        swiki_max = max(main.unigram_frequencies.values())
        #ilen_max = max(list(map(len, unigram_frequencies.keys())))
        ilen_max = 28   # too much garbage, hard to normalize
                        # set to 28, for "antidisestablishmentarianism"
        #wnet_max = max([get_wordnet_score(w) for w in unigram_frequencies])
        wnet_max = 75   # for "break", given by the above expression
    else:
        swiki_max = ilen_max = wnet_max = 1.0

    global check_range, norm_coefficients
    check_range = [1.0, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, 10000000.0]
    norm_coefficients = [ilen_max, wnet_max, swiki_max, 1.0]

    results = Counter()
    old_position = (0,0,0,0)

    score = evaluate_solution(old_position)
    results[old_position] = score
    print("\t"+str(old_position)+":", score)
    i = 0; j = 1

    while True:
        i += 1
        log("%d positions done, %d calculated"%(i,j))

        # look at the neighbors
        neighbors = get_neighbors(results, old_position)

        # evaluate them
        for pos in neighbors:
            j += 1
            score = evaluate_solution(pos)
            results[pos] = score
            print("\t"+str(pos)+":", score)
            log("%d positions done, %d calculated"%(i,j))

        # see if any one of them is better
        best = results[old_position]
        new_position = None
        for pos in neighbors:
            if results[pos] > best:
                best = results[pos]
                new_position = pos

        if new_position:
            old_position = new_position
        else:
            break

    log("done") 
    print("best result: %s, score is %s" \
            %(str(old_position), str(results[old_position])))
    for res in results.most_common(10):
        print("position %s\t\tscore:%s\n"%(str(res[0]), str(res[1])))
Пример #38
0
    def run(self):
        self._is_running = True
        # For identifying "chat:xxx" and "from:xxx" keywords
        query_parser = searchparser.SearchQueryParser()
        result = None
        while self._is_running:
            try:
                search = self._queue.get()
                if not search:
                    continue # continue while self._is_running

                is_text_output = ("text" == search.get("output"))
                wrap_html = None # MessageParser wrap function, for HTML output
                if is_text_output:
                    TEMPLATES = {
                        "chat":    templates.SEARCH_ROW_CHAT_TXT, 
                        "contact": templates.SEARCH_ROW_CONTACT_TXT,
                        "message": templates.SEARCH_ROW_MESSAGE_TXT,
                        "table":   templates.SEARCH_ROW_TABLE_HEADER_TXT,
                        "row":     templates.SEARCH_ROW_TABLE_TXT, }
                    wrap_b = lambda x: "**%s**" % x.group(0)
                    output = {"format": "text"}
                else:
                    TEMPLATES = {
                        "chat":    templates.SEARCH_ROW_CHAT_HTML, 
                        "contact": templates.SEARCH_ROW_CONTACT_HTML,
                        "message": templates.SEARCH_ROW_MESSAGE_HTML,
                        "table":   templates.SEARCH_ROW_TABLE_HEADER_HTML,
                        "row":     templates.SEARCH_ROW_TABLE_HTML, }
                    wrap_b = lambda x: "<b>%s</b>" % x.group(0)
                    output = {"format": "html"}
                    width = search.get("width", -1)
                    if width > 0:
                        dc = wx.MemoryDC()
                        dc.SetFont(wx.Font(8, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, 
                            wx.FONTWEIGHT_NORMAL, face=conf.HistoryFontName))
                        wrap_html = lambda x: wx.lib.wordwrap.wordwrap(x, width, dc)
                        output["wrap"] = True
                main.log("Searching for \"%(text)s\" in %(table)s (%(db)s)." %
                         search)
                self._stop_work = False
                self._drop_results = False

                parser = skypedata.MessageParser(search["db"],
                                                 wrapfunc=wrap_html)
                # {"output": text with results, "map": link data map}
                # map data: {"contact:666": {"contact": {contact data}}, }
                result_type, result_count, count = None, 0, 0
                result = {"output": "", "map": {},
                          "search": search, "count": 0}
                sql, params, match_words = query_parser.Parse(search["text"])

                # Turn wildcard characters * into regex-compatible .*
                match_words_re = [".*".join(map(re.escape, w.split("*")))
                                  for w in match_words]
                patt = "(%s)" % "|".join(match_words_re)
                # For replacing matching words with <b>words</b>
                pattern_replace = re.compile(patt, re.IGNORECASE)

                # Find chats with a matching title or matching participants
                chats = []
                if search["table"] in ["conversations", "messages"]:
                    chats = search["db"].get_conversations()
                    chats.sort(key=lambda x: x["title"])
                    chat_map = {} # {chat id: {chat data}}
                    template_chat = step.Template(TEMPLATES["chat"])
                for chat in chats:
                    chat_map[chat["id"]] = chat
                    if "conversations" == search["table"] and match_words:
                        title_matches = False
                        matching_authors = []
                        if self.match_all(chat["title"], match_words):
                            title_matches = True
                        for participant in chat["participants"]:
                            contact = participant["contact"]
                            if contact:
                                for n in filter(None, [contact["fullname"],
                                contact["displayname"], contact["identity"]]):
                                    if self.match_all(n, match_words) \
                                    and contact not in matching_authors:
                                        matching_authors.append(contact)

                        if title_matches or matching_authors:
                            count += 1
                            result_count += 1
                            result["output"] += template_chat.expand(locals())
                            key = "chat:%s" % chat["id"]
                            result["map"][key] = {"chat": chat["id"]}
                            if not count % conf.SearchResultsChunk \
                            and not self._drop_results:
                                result["count"] = result_count
                                self.postback(result)
                                result = {"output": "", "map": {},
                                          "search": search, "count": 0}
                    if self._stop_work:
                        break # break for chat in chats
                if result["output"] and not self._drop_results:
                    result["count"] = result_count
                    self.postback(result)
                    result = {"output": "", "map": {}, "search": search,
                              "count": 0}

                # Find contacts with a matching name
                if not self._stop_work and "contacts" == search["table"] \
                and match_words:
                    count = 0
                    contacts = search["db"].get_contacts()
                    # Possibly more: country (ISO code, need map), birthday
                    # (base has YYYYMMDD in integer field).
                    match_fields = [
                        "displayname", "skypename", "province", "city",
                        "pstnnumber", "phone_home", "phone_office",
                        "phone_mobile", "homepage", "emails", "about",
                        "mood_text",
                    ]
                    template_contact = step.Template(TEMPLATES["contact"])
                    for contact in contacts:
                        match = False
                        fields_filled = {}
                        for field in match_fields:
                            if contact[field]:
                                val = contact[field]
                                if self.match_all(val, match_words):
                                    match = True
                                    val = pattern_replace.sub(wrap_b, val)
                                fields_filled[field] = val
                        if match:
                            count += 1
                            result_count += 1
                            result["output"] += template_contact.expand(locals())
                            if not (self._drop_results
                            or count % conf.SearchResultsChunk):
                                result["count"] = result_count
                                self.postback(result)
                                result = {"output": "", "map": {},
                                          "search": search, "count": 0}
                        if self._stop_work:
                            break # break for contact in contacts
                if result["output"] and not self._drop_results:
                    result["count"] = result_count
                    self.postback(result)
                    result = {"output": "", "map": {},
                              "search": search, "count": 0}

                # Find messages with a matching body
                if not self._stop_work and "messages" == search["table"]:
                    template_message = step.Template(TEMPLATES["message"])
                    count, result_type = 0, "messages"
                    chat_messages = {} # {chat id: [message, ]}
                    chat_order = []    # [chat id, ]
                    messages = search["db"].get_messages(
                        additional_sql=sql, additional_params=params,
                        ascending=False, use_cache=False)
                    for m in messages:
                        chat = chat_map.get(m["convo_id"])
                        body = parser.parse(m, pattern_replace if match_words 
                                            else None, output)
                        count += 1
                        result_count += 1
                        result["output"] += template_message.expand(locals())
                        key = "message:%s" % m["id"]
                        result["map"][key] = {"chat": chat["id"],
                                              "message": m["id"]}
                        if is_text_output or (not self._drop_results
                        and not count % conf.SearchResultsChunk):
                            result["count"] = result_count
                            self.postback(result)
                            result = {"output": "", "map": {},
                                      "search": search, "count": 0}
                        if self._stop_work or (not is_text_output
                        and count >= conf.MaxSearchMessages):
                            break # break for m in messages

                infotext = search["table"]
                if not self._stop_work and "all tables" == search["table"]:
                    infotext, result_type = "", "table row"
                    # Search over all fields of all tables.
                    template_table = step.Template(TEMPLATES["table"])
                    template_row = step.Template(TEMPLATES["row"])
                    for table in search["db"].get_tables():
                        table["columns"] = search["db"].get_table_columns(
                            table["name"])
                        sql, params, words = \
                            query_parser.Parse(search["text"], table)
                        if not sql:
                            continue # continue for table in search["db"]..
                        infotext += (", " if infotext else "") + table["name"]
                        rows = search["db"].execute(sql, params)
                        row = rows.fetchone()
                        if not row:
                            continue # continue for table in search["db"]..
                        result["output"] = template_table.expand(locals())
                        count = 0
                        while row:
                            count += 1
                            result_count += 1
                            result["output"] += template_row.expand(locals())
                            key = "table:%s:%s" % (table["name"], count)
                            result["map"][key] = {"table": table["name"],
                                                  "row": row}
                            if not count % conf.SearchResultsChunk \
                            and not self._drop_results:
                                result["count"] = result_count
                                self.postback(result)
                                result = {"output": "", "map": {},
                                          "search": search, "count": 0}
                            if self._stop_work or (not is_text_output
                            and result_count >= conf.MaxSearchTableRows):
                                break # break while row
                            row = rows.fetchone()
                        if not self._drop_results:
                            if not is_text_output:
                                result["output"] += "</table>"
                            result["count"] = result_count
                            self.postback(result)
                            result = {"output": "", "map": {},
                                      "search": search, "count": 0}
                        infotext += " (%s)" % util.plural("result", count)
                        if self._stop_work or (not is_text_output
                        and result_count >= conf.MaxSearchTableRows):
                            break # break for table in search["db"]..
                    single_table = ("," not in infotext)
                    infotext = "table%s: %s" % \
                               ("" if single_table else "s", infotext)
                    if not single_table:
                        infotext += "; %s in total" % \
                                    util.plural("result", result_count)
                final_text = "No matches found."
                if self._drop_results:
                    result["output"] = ""
                if result_count:
                    final_text = "Finished searching %s." % infotext

                if self._stop_work:
                    final_text += " Stopped by user."
                elif "messages" == result_type and not is_text_output \
                and count >= conf.MaxSearchMessages:
                    final_text += " Stopped at %s limit %s." % \
                                  (result_type, conf.MaxSearchMessages)
                elif "table row" == result_type and not is_text_output \
                and count >= conf.MaxSearchTableRows:
                    final_text += " Stopped at %s limit %s." % \
                                  (result_type, conf.MaxSearchTableRows)

                result["output"] += "</table><br /><br />%s</font>" % final_text
                if is_text_output: result["output"] = ""
                result["done"] = True
                result["count"] = result_count
                self.postback(result)
                main.log("Search found %(count)s results." % result)
            except Exception as e:
                if not result:
                    result = {}
                result["done"], result["error"] = True, traceback.format_exc()
                result["error_short"] = "%s: %s" % (type(e).__name__, e.message)
                self.postback(result)
def build_dataset(args):
	start_time = time.clock()
	if args.dataset == 'CK+':
		build_structure(args, start_time, 8)
		image_files = []
		for outer_folder in os.listdir(args.data_dir):
			if os.path.isdir(args.data_dir):
				for inner_folder in os.listdir(args.data_dir + '/' + outer_folder):
					if os.path.isdir(args.data_dir + '/' + outer_folder + '/' + inner_folder):
						for input_file in os.listdir(args.data_dir + '/' + outer_folder + '/' + inner_folder):
							data_type = input_file.split('.')[1].lower()
							if not(data_type == 'png' or data_type == 'jpg' or data_type == 'tiff'):
								break
							label_file = args.label_dir + '/' + outer_folder + '/' + inner_folder + '/' + input_file[:-4] + '_emotion.txt'
							if os.path.isfile(label_file):
								read_file = open(label_file, 'r')
								label = int(float(read_file.readline()))
								for i in range(-1, -6, -1):
									image_file = sorted(os.listdir(args.data_dir + '/' + outer_folder + '/' + inner_folder))[i]
									data_type = image_file.split('.')[1].lower()
									if (data_type == 'png' or data_type == 'jpg' or data_type == 'tiff'):
										image_files.append((args.data_dir + '/' + outer_folder + '/' + inner_folder + '/' + image_file, label))
								neutral_file = sorted(os.listdir(args.data_dir + '/' + outer_folder + '/' + inner_folder))[0]
								data_type = neutral_file.split('.')[1]
								if not(data_type == 'png' or data_type == 'jpg' or data_type == 'tiff'):
									neutral_file = sorted(os.listdir(args.data_dir + '/' + outer_folder + '/' + inner_folder))[1]
								image_files.append((args.data_dir + '/' + outer_folder + '/' + inner_folder + '/' + neutral_file, 0))
		main.log(args, '{:.5f}'.format(time.clock() - start_time) + 's ' + str(len(image_files)) + ' Images File Collected')
		extract_images(args, start_time, image_files)

	elif args.dataset == 'KDEF':
		build_structure(args, start_time, 7)
		image_files = []
		for folder in os.listdir(args.data_dir):
			if os.path.isdir(args.data_dir + '/' + folder):
				for file in os.listdir(args.data_dir + '/' + folder):
					data_type = file.split('.')[1].lower()
					if (data_type == 'png' or data_type == 'jpg' or data_type == 'tiff') and file[6] != 'F':
						label = 0
						if file[4:6] == 'AF':
							label = 3
						elif file[4:6] == 'AN':
							label = 1
						elif file[4:6] == 'DI':
							label = 2
						elif file[4:6] == 'HA':
							label = 4
						elif file[4:6] == 'NE':
							label = 0
						elif file[4:6] == 'SA':
							label = 5
						elif file[4:6] == 'SU':
							label = 6
						image_files.append((args.data_dir + '/' + folder + '/' + file, label))
		main.log(args, '{:.5f}'.format(time.clock() - start_time) + 's ' + str(len(image_files)) + ' Images File Collected')
		extract_images(args, start_time, image_files)

	elif args.dataset == 'AMFED':
		build_structure(args, start_time, 3)
		images = []
		for video in os.listdir(args.data_dir):
			if video.split('.')[1] == 'avi':
				filename = args.data_dir + video
				#print filename
				try:
					cap = skvideo.io.vread(filename)
				except:
					print 'Error with ' + filename
				with open(args.label_dir + '/' + video[:-4] + '-label.csv', 'rU') as csvfile:
					reader = csv.reader(csvfile, delimiter=',', dialect=csv.excel_tab)
					skip = True
					for row in reader:
						if skip:
							skip = False
							continue
						if float(row[1]) > 33:
							frame_no = float(row[0]) * 14
							images.append((cap[int(frame_no)], 2))
						elif float(row[3]) > 33:
							frame_no = float(row[0]) * 14
							images.append((cap[int(frame_no)], 1))
						else:
							frame_no = float(row[0]) * 14
							images.append((cap[int(frame_no)], 0))
		main.log(args, '{:.5f}'.format(time.clock() - start_time) + 's ' + str(len(images)) + ' Images File Collected')
		extract_images(args, start_time, images, False)

	else:
		main.log(args, 'Please specify a dataset \'--dataset\'', True)
	if args.split_dir != 'none':
		splits = split_images(args, start_time)
		main.log(args, '{:.5f}'.format(time.clock() - start_time) + 's ' + 'Images have been split ')
	if args.normalize:
		normalize(args, start_time)
		if args.split_dir != 'none':
			normalize(args, start_time, dirs=splits)
Пример #40
0
                    m["from_dispname"].encode("utf-8"),
                    parsed_text.encode("utf-8")
                ]
                csv_writer.writerow(values)
        if is_html:
            f.write("</table>\r\n</td></tr></table>\r\n<div class='footer'>" \
                "Exported with %(app)s on %(now)s.</div>\r\n" \
                "</body>\r\n</html>" % header_data
            )
        f.close()
        result = True
    except Exception, e:
        if f:
            f.close()
        main.log("Export cannot access %s.\n%s", filename,
            traceback.format_exc()
        )
    return result


def export_grid(grid, filename, title, db, sql="", table=""):
    """
    Exports the current contents of the specified wx.Grid to file.

    @param   grid      a wx.Grid object
    @param   filename  full path and filename of resulting file, file extension
                       .html|.csv|.sql determines file format
    @param   title     title used in HTML
    @param   db        SkypeDatabase instance
    @param   sql       the SQL query producing the grid contents, if any
    @param   table     name of the table producing the grid contents, if any
Пример #41
0
    if args.block.lower() == "server":
        main.change_nginx_config(
            domain=str(args.domain),
            block_type="server",
            prev_value=args.prev_value,
            new_value=args.new_value,
            directive=args.directive,
            attribute=args.attribute,
        )
    elif args.block.lower() == "upstream":
        main.change_nginx_config(
            domain=args.domain,
            block_type="upstream",
            prev_value=args.prev_value,
            new_value=args.new_value,
            attribute=args.attribute,
        )
        main.log("Total Execution Time: {:.4f} Secounds :)".format(
            time.time() - start_time), "info")

    else:
        exit("cant Identify Block Name")
else:
    main.add_subdomain(domain=args.domain, sub_name=args.new_subdomain,)

# answer = args.square**2
# if args.verbose:
#     print("the square of {} equals {}".format(args.square, answer))
# else:
#     print(answer)
Пример #42
0
 def performLogin(self, name, email, nick):
     main.log('User '+ nick +' wants to login: '******'user_email'] = email
 def on_train_end(self, state):
     main.log(
         self.args, '\n{:.5f}s'.format(time.clock() - self.start) +
         ' Validation Loss = {:.5f}'.format(state.val_loss) +
         ' Validation Accuracy = ' + str(state.val_acc))
Пример #44
0
 def test_logarithm(self):
     self.assertAlmostEqual(main.log(5), 0.6989700043360189, 'log10 fail')
Пример #45
0
import socket,thread,sys,time
import main

host = ""   #ip of device
port = 1998	#port to connect to



s = socket.socket()
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
s.bind((host,port))
##s.settimeout(20)
main.log("ready for connections")
print "Ready for connections"
s.listen(5)



while(True):
	try:
		c,addr = s.accept()
		c.settimeout(20)
		address = addr[0]
		main.log("Got connection from "+str(address))
		print "Got a connection from ",str(address)
		
		thread.start_new_thread(main.grabConnection,(c,addr))
	except Exception as e:
		main.log("Error for connection thread(Still running) "+str(e))
		print "Error at step 1 .. "+str(e)
		try:
Пример #46
0
def check_newest_version(callback=None):
    """
    Queries the Skyperious download page for available newer releases.

    @param   callback  function to call with check result, if any
             @result   (version, url, changes) if new version up,
                       () if up-to-date, None if query failed
    """
    global update_window, url_opener
    result = ()
    update_window = True
    try:
        main.log("Checking for new version at %s.", conf.DownloadURL)
        html = url_opener.open(conf.DownloadURL).read()
        links = re.findall("<a[^>]*\shref=['\"](.+)['\"][^>]*>", html, re.I)
        links = [urllib.basejoin(conf.DownloadURL, x) for x in links[:3]]
        if links:
            # Determine release types
            linkmap = {} # {"src": link, "x86": link, "x64": link}
            for link in links:
                link_text = link.lower()
                if link_text.endswith(".zip"):
                    linkmap["src"] = link
                elif link_text.endswith(".exe") and "_x64" in link_text:
                    linkmap["x64"] = link
                elif link_text.endswith(".exe"):
                    linkmap["x86"] = link

            install_type = get_install_type()
            link = linkmap[install_type]
            # Extract version number like 1.3.2a from skyperious_1.3.2a_x64.exe
            version = (re.findall("(\d[\da-z.]+)", link) + [None])[0]
            main.log("Newest %s version is %s.", install_type, version)
            try:
                if (version != conf.Version
                and canonic_version(conf.Version) >= canonic_version(version)):
                    version = None
            except Exception: pass
            if version and version != conf.Version:
                changes = ""
                try:
                    main.log("Reading changelog from %s.", conf.ChangelogURL)
                    html = url_opener.open(conf.ChangelogURL).read()
                    match = re.search("<h4[^>]*>(v%s,.*)</h4\s*>" % version,
                                      html, re.I)
                    if match:
                        ul = html[match.end(0):html.find("</ul", match.end(0))]
                        lis = re.findall("(<li[^>]*>(.+)</li\s*>)+", ul, re.I)
                        items = [re.sub("<[^>]+>", "", x[1]) for x in lis]
                        items = map(HTMLParser.HTMLParser().unescape, items)
                        changes = "\n".join("- " + i.strip() for i in items)
                        if changes:
                            title = match.group(1)
                            changes = "Changes in %s\n\n%s" % (title, changes)
                except Exception:
                    main.log("Failed to read changelog.\n\n%s.",
                             traceback.format_exc())
                url = urllib.basejoin(conf.DownloadURL, link)
                result = (version, url, changes)
    except Exception:
        main.log("Failed to retrieve new version from %s.\n\n%s",
                 conf.DownloadURL, traceback.format_exc())
        result = None
    update_window = None
    if callback:
        callback(result)
    return result
Пример #47
0
 def get(self):     
     who = self.request.get('user')
     main.log('Asked to delete user: '******'/users')
Пример #48
0
 def handle_error():
     text = "".join(cached)[:100000]
     if text:
         main.log(text)
         report_error(text)
     cached[:] = []
Пример #49
0
 def handle_error():
     text = "".join(cached)[:100000]
     if text:
         main.log(text)
         report_error(text)
     cached[:] = []
Пример #50
0
def train_optimizer(task):
    '''Train an RNN optimizer.

    Args:
        task: A dictionary in task_list.py which specifies the model of the optimizer and
            the tricks and the optimizee to use when training the optimizer.
    '''
    flags = tf.app.flags.FLAGS
    session = tf.get_default_session()

    curr_path = os.getcwd()
    parent_path = os.path.split(curr_path)[0]

    task['optimizee']['train'].build()

    n_steps = task['n_steps'] if 'n_steps' in task else flags.n_steps
    n_bptt_steps = task[
        'n_bptt_steps'] if 'n_bptt_steps' in task else flags.n_bptt_steps
    use_avg_loss = task['use_avg_loss'] if 'use_avg_loss' in task else False
    options = task['options'] if 'options' in task else {}
    lr_decay_name = task['lr_decay_name'] if 'lr_decay_name' in task else None
    assert n_steps % n_bptt_steps == 0

    model = task['model'](name=task_id(),
                          optimizee=task['optimizee']['train'],
                          n_bptt_steps=n_bptt_steps,
                          n_steps=n_steps,
                          decay_step=20,
                          lr_decay_name=lr_decay_name,
                          decay_rate=0.96,
                          stair_case=False,
                          lr=task['training lr'],
                          use_avg_loss=use_avg_loss,
                          **options)
    model.prepare_train_optimizee(task['optimizee']['tests'])

    log_path = os.path.join(parent_path, "log")
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    log_filename = os.path.join(log_path, model.name + "_training.txt")

    fig_path = os.path.join(parent_path, "training_data", model.name + "_data")
    if not os.path.exists(fig_path):
        os.makedirs(fig_path)
    fig_filename = os.path.join(fig_path, model.name + ".png")

    session.run(tf.global_variables_initializer())

    eid = flags.eid
    if eid != 0:
        model.restore(flags.eid)
        model.bid = flags.eid * flags.n_batches

        file_name = os.path.join(fig_path, "loss_curve.pkl")
        pickle_file = open(file_name, "rb")
        loss_curve = pickle.load(pickle_file)
        pickle_file.close()
        train_loss = loss_curve[:eid]
        val_loss = loss_curve[eid:2 * eid]
        val_gd_loss = loss_curve[2 * eid:3 * eid]
        val_loss1 = loss_curve[3 * eid:4 * eid]
        val_gd_loss1 = loss_curve[4 * eid:5 * eid]
    elif eid == 0:
        train_loss = []
        val_loss = []
        val_gd_loss = []
        val_loss1 = []
        val_gd_loss1 = []
    else:
        print("check eid")

    best_evaluation = float("inf")

    while flags.n_epochs == 0 or eid < flags.n_epochs:
        eid += 1

        loss_values = []
        for i in range(flags.n_batches):
            ret = model.train_one_iteration(n_steps)

            loss_value = ret['loss']

            loss_values.append(loss_value)

            # sys.stdout.write("\r\033[K")
            msg = "\riteration #%d" % i
            msg += ": loss = %.5f avg loss = %.5f" % (
                loss_value, np.mean(loss_values))  # mean loss of a batch
            sys.stdout.write(msg)
            sys.stdout.flush()

        # sys.stdout.write("\r\033[K")
        msg = "\repoch #%d" % eid
        msg += ": loss = %.5f" % np.mean(loss_values)
        log(msg, log_filename)
        log(str(loss_values), log_filename)
        train_loss.append(np.mean(loss_values))

        if eid % 10 == 0:
            model.save(eid)

        test_loss_values = {}
        for i in range(flags.n_tests):  # flag.n_tests: batch size in test
            for name, avg_loss_value, gd_avg_loss_value, test_loss, gd_test_loss in model.test(
                    eid):
                if name not in test_loss_values:
                    test_loss_values[name] = {
                        'nn': [],
                        'gd': [],
                        'nn1': [],
                        'gd1': []
                    }
                test_loss_values[name]['nn'].append(avg_loss_value)
                test_loss_values[name]['gd'].append(gd_avg_loss_value)
                test_loss_values[name]['nn1'].append(np.mean(test_loss))
                test_loss_values[name]['gd1'].append(np.mean(gd_test_loss))
        for name in test_loss_values:
            log(
                "epoch #%d test %s: loss = %.5f gd_loss = %.5f" %
                (eid, name, np.mean(test_loss_values[name]['nn']),
                 np.mean(test_loss_values[name]['gd'])), log_filename)
            val_loss.append(np.mean(test_loss_values[name]['nn']))
            val_gd_loss.append(np.mean(test_loss_values[name]['gd']))
            val_loss1.append(np.mean(test_loss_values[name]['nn1']))
            val_gd_loss1.append(np.mean(test_loss_values[name]['gd1']))
            fig_legend = "val_gd_" + task['optimizee']['tests'][name]['gdAlgo']

        if val_loss[-1] < best_evaluation:
            model.save_best(eid)
            best_evaluation = val_loss[-1]

        if eid % 10 == 0:
            file_name = os.path.join(fig_path, "loss_curve.pkl")
            pickle_file = open(file_name, "wb")
            pickle.dump(
                train_loss + val_loss + val_gd_loss + val_loss1 + val_gd_loss1,
                pickle_file)
            pickle_file.close()

        plt.figure(1)
        plt.plot(np.arange(1, eid + 1, 1), train_loss, "r-")
        plt.plot(np.arange(1, eid + 1, 1), val_loss, "b-")
        plt.plot(np.arange(1, eid + 1, 1), val_gd_loss, "k-")

        plt.plot(np.arange(1, eid + 1, 1), val_loss1, "b--")
        plt.plot(np.arange(1, eid + 1, 1), val_gd_loss1, "k--")

        plt.legend(["train", "val", fig_legend, 'val_nn', 'val_gd'])
        plt.xlabel("epochs")
        plt.ylabel("loss")
        plt.draw()
        plt.pause(0.0001)
        plt.savefig(fig_filename)
Пример #51
0
def export_grid(grid, filename, title, db, sql="", table=""):
    """
    Exports the current contents of the specified wx.Grid to file.

    @param   grid      a wx.Grid object
    @param   filename  full path and filename of resulting file, file extension
                       .html|.csv|.sql determines file format
    @param   title     title used in HTML
    @param   db        SkypeDatabase instance
    @param   sql       the SQL query producing the grid contents, if any
    @param   table     name of the table producing the grid contents, if any
    """
    result = False
    f = None
    # @todo do with BeautifulSoup?
    try:
        if sql: # not related to is_sql
            sql = sql.encode("utf-8")
        with open(filename, "w") as f:
            is_html = filename.lower().endswith(".html")
            is_csv  = filename.lower().endswith(".csv")
            is_sql  = filename.lower().endswith(".sql")
            columns = \
                [col["name"].encode("utf-8") for col in grid.Table.columns]
            main_data = {
                "title": title,
                "db": db.filename,
                "app": conf.Title,
                "now": datetime.datetime.now().strftime("%d.%m.%Y %H:%M"),
            }
            if is_html:
                main_data = dict([(k, escape(v)) for k,v in main_data.items()])
                # Write HTML header and table header
                info = "<b>SQL:</b> %s" % escape(sql) if sql \
                       else ""
                f.write(GRID_HTML_HEADER % {"info": info,
                    "title": escape(title), "db": escape(db.filename),
                    "count": grid.NumberRows
                })
                f.write("<tr><th>#</th><th>%s</th></tr>" \
                    % "</th><th>".join(columns)
                )
            elif is_csv:
                # Initialize CSV writer and write header row
                dialect = csv.excel
                # Default is "," which is actually not Excel
                dialect.delimiter = ";"
                # Default is "\r\n", which causes another "\r" to be written
                dialect.lineterminator = "\r"
                csv_writer = csv.writer(f, dialect)
                csv_writer.writerow(["%(title)s, source: %(db)s. " \
                    "Exported with %(app)s on %(now)s." % main_data
                ])
                if sql:
                    csv_writer.writerow(["SQL: %s" \
                        % sql.replace("\r", " ").replace("\n", " ")
                    ])
                csv_writer.writerow(columns)
            elif is_sql:
                f.write("-- %(title)s.\n-- Source: %(db)s.\n" \
                    "-- Exported with %(app)s on %(now)s.\n" % main_data
                )
                if sql:
                    f.write("# SQL: %s\n" % sql)
                str_cols = ", ".join(columns)
                str_vals = "%(" + ")s, %(".join(columns) + ")s"
                if table:
                    # Add CREATE TABLE statement.
                    create_sql = db.tables[table.lower()]["sql"]
                    re_sql = re.compile(
                        "^(CREATE\s+TABLE\s+)", re.IGNORECASE | re.MULTILINE
                    )
                    create_sql = re_sql.sub(
                        lambda m: "%sIF NOT EXISTS " % m.group(1), create_sql
                    )
                    f.write("%s;\n\n" % create_sql)

            for i in range(grid.NumberRows):
                data = grid.Table.GetRow(i)
                values = []
                if is_sql:
                    for col_name in columns:
                        value = data[col_name]
                        if isinstance(value, unicode):
                            value = value.encode("utf-8")
                        if isinstance(value, basestring):
                            value = "\"%s\"" % value.encode("string-escape") \
                                                    .replace("\"", "\"\"")
                        elif value is None:
                            value = "NULL"
                        else:
                            value = str(value)
                        values.append(value)
                    stmt =  u"INSERT INTO %s (%s) VALUES (" % (table, str_cols)
                    stmt += ", ".join(values) + ");\n"
                    f.write(stmt)
                else:
                    for col_name in columns:
                        if isinstance(data[col_name], unicode):
                            values.append(data[col_name].encode("utf-8"))
                        elif data[col_name] is None:
                            values.append("")
                        else:
                            values.append(str(data[col_name]))
                if is_html:
                    # Some values can contain HTML, need to make it safe
                    values = map(lambda x: escape(x, utf=False), values)
                    values.insert(0, str(i + 1))
                    f.write("<tr><td>%s</td></tr>" % "</td><td>".join(values))
                elif is_csv:
                    csv_writer.writerow(values)

            if is_html:
                # Write HTML footer
                f.write("</table>\r\n</td></tr></table>\r\n" \
                    "<div class='footer'>Exported with %(app)s on %(now)s." \
                    "</div>\r\n</body>\r\n</html>" % main_data
                )
            f.close()
            result = True
    except Exception, e:
        if f:
            f.close()
        main.log("Export cannot access %s.\n%s", filename,
            traceback.format_exc()
        )