示例#1
0
 def on_key_press(self, symbol, modifier):
     if symbol == key.ESCAPE:
         self.render.disable_GL_STATE()
         self.render.unbind_all()
         if not STIMULI_ONLY:
             self.inclinometer.close()
             self.__compile_data()
         exit(1)
     elif symbol == key.SPACE:
         if not self.on_break or self.states[0] == self.__display_message:
             if self.run_state == self.__display_stimuli:
                 if hasattr(self, 'reaction_times'):
                     self.reaction_times.append(time() - self.stimuli_presented)
                 press_time = time()
                 self.slants.append(self.inclinometer.get_Y() + 90.0)
                 print get_time(press_time, time())
                 Beep(2500, 500)
             # If there are still some states to be run
             # We continue onto them
             if self.states:
                 if not self.run_state == self.__display_fix:
                     # Move to the next state
                     self.run_state = self.states.pop(0)
                     # And run it
                     self.run_state()
             # Else we exit the experiment
             else:
                 exit(1)
示例#2
0
 def on_key_press(self, symbol, modifier):
     if symbol == key.ESCAPE:
         self.render.disable_GL_STATE()
         self.render.unbind_all()
         if not STIMULI_ONLY:
             self.inclinometer.close()
             self.__compile_data()
         exit(1)
     elif symbol == key.SPACE:
         if not self.on_break or self.states[0] == self.__display_message:
             if self.run_state == self.__display_stimuli:
                 if hasattr(self, 'reaction_times'):
                     self.reaction_times.append(time() -
                                                self.stimuli_presented)
                 press_time = time()
                 self.slants.append(self.inclinometer.get_Y() + 90.0)
                 print get_time(press_time, time())
                 Beep(2500, 500)
             # If there are still some states to be run
             # We continue onto them
             if self.states:
                 if not self.run_state == self.__display_fix:
                     # Move to the next state
                     self.run_state = self.states.pop(0)
                     # And run it
                     self.run_state()
             # Else we exit the experiment
             else:
                 exit(1)
示例#3
0
    def test(self):
        pw = self.password_entry.get()

        show = False
        strength_factor = 0
        break_time = 0

        if pw:
            if len(pw) >= 7:
                strength_factor = tools.pw_strength(pw)
                break_time = tools.get_break_time(pw)
                show = True
            else:
                self.out.configure(
                    text='The password must be at least 7 characters long!',
                    fg='red')
        else:
            msg.showerror(title='Void entry error',
                          message='The entry can\'t be void')

        if show:
            self.password_s.configure(
                text='Your password is: {}'.format(
                    settings.SECURITY_STATES[strength_factor]),
                fg=settings.COLORS_STATES[strength_factor])
            time = tools.get_time(break_time)
            self.time.configure(
                text='Time to break your password: {0}{1}.'.format(
                    time[0], time[1]))
示例#4
0
def save_blog():
    if 'user' in session and not escape(session['user']) == 'stranger':
        if time.time() - int(escape(session['time'])) < remember_login_time:
            content = request.form['content']
            title = request.form['title']
            user = escape(session['user'])
            c = get_content(content)
            now = get_time()
            path = save_md(content, user)
            Database.connect()
            Database.add_blog(title=title,
                              auth=user,
                              create_date=now,
                              content=c,
                              path=path)
            Database.close()
            return render_template('md_editor_edit.html',
                                   user=escape(session['user']))
        else:
            session['user'] = '******'
            session['password'] = '******'
            return render_template('md_editor_edit.html',
                                   user=escape(session['user']))
    else:
        return render_template('md_editor_edit.html', user='******')
示例#5
0
文件: server.py 项目: Fisab/wom
 def garbage_grabber(self):  #delete timeout users
     if not self.timeout:
         return
     cur_time = tools.get_time()
     for_delete = []
     for i in range(len(self.clients)):
         if cur_time - self.clients[i]['last_req'] > self.timeout_time:
             print('drop timeout - ', self.clients[i]['color'])
             for_delete.append(self.clients[i])
     for i in for_delete:
         self.clients.remove(i)
示例#6
0
    def __stimuli_VBO(self, scaleX, scaleY):
        """
        Create the Vertex Buffer Objects for stimuli display
        """
        # Initiate the VBOs pertaining to the stimuli texture
        self.stimuli_VBO['vertex'] = []
        self.stimuli_VBO['texcoord'] = []
        self.stimuli_VBO['normal'] = []

        t0 = time()
        # Iterate through the points, moving up by the preset value:
        # STEP_SIZE
        for X in xrange(0, WIDTH - STEP_SIZE + 1, STEP_SIZE):
            for Y in xrange(0, HEIGHT - STEP_SIZE + 1, STEP_SIZE):

                # BOTTOM LEFT CORNER
                x, y, vertX, vertY = get_coords(X, Y, scaleX, scaleY)
                self.stimuli_VBO['vertex'].append([vertX, vertY])
                self.stimuli_VBO['texcoord'].append([x, y])

                # BOTTOM RIGHT CORNER
                x, y, vertX, vertY = get_coords(X + STEP_SIZE, Y,
                                                scaleX, scaleY)
                self.stimuli_VBO['vertex'].append([vertX, vertY])
                self.stimuli_VBO['texcoord'].append([x, y])

                # TOP RIGHT CORNER
                x, y, vertX, vertY = get_coords(X + STEP_SIZE,
                                                Y + STEP_SIZE,
                                                scaleX, scaleY)
                self.stimuli_VBO['vertex'].append([vertX, vertY])
                self.stimuli_VBO['texcoord'].append([x, y])

                # TOP LEFT CORNER
                x, y, vertX, vertY = get_coords(X, Y + STEP_SIZE,
                                                scaleX, scaleY)
                self.stimuli_VBO['vertex'].append([vertX, vertY])
                self.stimuli_VBO['texcoord'].append([x, y])

                for i in range(0, 4):
                    self.stimuli_VBO['normal'].append([0.0, 0.0, 1.0])

        # Instatiate the Vertex Buffer Objects using VertexBuffer
        self.stimuli_VBO['vertex'] = \
        VertexBuffer(array(self.stimuli_VBO['vertex'], dtype=float32))
        self.stimuli_VBO['texcoord'] = \
        VertexBuffer(array(self.stimuli_VBO['texcoord'], dtype=float32))
        self.stimuli_VBO['normal'] = \
        VertexBuffer(array(self.stimuli_VBO['normal'], dtype=float32))

        print 'Preparing the VBOs took %s seconds' % get_time(t0, time())
示例#7
0
    def __stimuli_VBO(self, scaleX, scaleY):
        """
        Create the Vertex Buffer Objects for stimuli display
        """
        # Initiate the VBOs pertaining to the stimuli texture
        self.stimuli_VBO['vertex'] = []
        self.stimuli_VBO['texcoord'] = []
        self.stimuli_VBO['normal'] = []

        t0 = time()
        # Iterate through the points, moving up by the preset value:
        # STEP_SIZE
        for X in xrange(0, WIDTH - STEP_SIZE + 1, STEP_SIZE):
            for Y in xrange(0, HEIGHT - STEP_SIZE + 1, STEP_SIZE):

                # BOTTOM LEFT CORNER
                x, y, vertX, vertY = get_coords(X, Y, scaleX, scaleY)
                self.stimuli_VBO['vertex'].append([vertX, vertY])
                self.stimuli_VBO['texcoord'].append([x, y])

                # BOTTOM RIGHT CORNER
                x, y, vertX, vertY = get_coords(X + STEP_SIZE, Y, scaleX,
                                                scaleY)
                self.stimuli_VBO['vertex'].append([vertX, vertY])
                self.stimuli_VBO['texcoord'].append([x, y])

                # TOP RIGHT CORNER
                x, y, vertX, vertY = get_coords(X + STEP_SIZE, Y + STEP_SIZE,
                                                scaleX, scaleY)
                self.stimuli_VBO['vertex'].append([vertX, vertY])
                self.stimuli_VBO['texcoord'].append([x, y])

                # TOP LEFT CORNER
                x, y, vertX, vertY = get_coords(X, Y + STEP_SIZE, scaleX,
                                                scaleY)
                self.stimuli_VBO['vertex'].append([vertX, vertY])
                self.stimuli_VBO['texcoord'].append([x, y])

                for i in range(0, 4):
                    self.stimuli_VBO['normal'].append([0.0, 0.0, 1.0])

        # Instatiate the Vertex Buffer Objects using VertexBuffer
        self.stimuli_VBO['vertex'] = \
        VertexBuffer(array(self.stimuli_VBO['vertex'], dtype=float32))
        self.stimuli_VBO['texcoord'] = \
        VertexBuffer(array(self.stimuli_VBO['texcoord'], dtype=float32))
        self.stimuli_VBO['normal'] = \
        VertexBuffer(array(self.stimuli_VBO['normal'], dtype=float32))

        print 'Preparing the VBOs took %s seconds' % get_time(t0, time())
示例#8
0
文件: server.py 项目: Fisab/wom
 def add_client(self, ip, port, color):
     map_size = self.map.get_size()
     pos = [randint(0, map_size[0]), randint(0, map_size[1])]
     r = {
         #system
         'ip': ip,
         'port': port,
         #advanced
         #'name': name,
         'color': color,
         'id': self.gen_id(),
         'pos': pos,
         'last_req': tools.get_time()
     }
     self.clients.append(r)
示例#9
0
    def __setup(self, eyeshift=[-3.0, 3.0], stereo=False):
        """
        Initialize parameters for OpenGL and ready the
        texture for rendering
        """
        # Set the projection settings for OpenGL
        # self.projection = Projection(self.width, self.height, eyeshift)

        # Prepare OpenGL and display textures for rendering
        self.render = Render(self.width, self.height, eyeshift, stereo)

        # Create the Vertex Buffer Objects needed
        self.render.create_VBO(self.width, self.height)

        # Create the shaders as necessary
        self.render.create_shaders()

        # Initiate the variable that will hold the current image to
        # be rendered
        self.current_img = None

        # Instantiate the stimuli class and setup the order in which
        # to display the stimuli for practice or for the experimental
        # block
        self.stimulus = Stimuli()
        # Now setup the stimuli
        if hasattr(self, 'practice'):
            self.stimulus.setup(self.practice)
        else:
            self.stimulus.setup()
        # Ready the next stimuli block
        self.stimulus.run_state()

        # Keep a list of possible stimuli rotations about the Z-axis
        # self.z_rotation = [0]

        if not STIMULI_ONLY:
            self.inclinometer = Inclinometer()
            self.inclinometer.set_offset_Y()
            self.slants = []

        print 'Setting up of the experiment took %s seconds'\
              % get_time(self.exp_start, time())
        # Run the state that is required
        self.run_state()
示例#10
0
    def __setup(self, eyeshift=[-3.0, 3.0], stereo=False):
        """
        Initialize parameters for OpenGL and ready the
        texture for rendering
        """
        # Set the projection settings for OpenGL
        # self.projection = Projection(self.width, self.height, eyeshift)

        # Prepare OpenGL and display textures for rendering
        self.render = Render(self.width, self.height, eyeshift, stereo)

        # Create the Vertex Buffer Objects needed
        self.render.create_VBO(self.width, self.height)

        # Create the shaders as necessary
        self.render.create_shaders()

        # Initiate the variable that will hold the current image to
        # be rendered
        self.current_img = None

        # Instantiate the stimuli class and setup the order in which
        # to display the stimuli for practice or for the experimental
        # block
        self.stimulus = Stimuli()
        # Now setup the stimuli
        if hasattr(self, 'practice'):
            self.stimulus.setup(self.practice)
        else:
            self.stimulus.setup()
        # Ready the next stimuli block
        self.stimulus.run_state()

        # Keep a list of possible stimuli rotations about the Z-axis
        # self.z_rotation = [0]

        if not STIMULI_ONLY:
            self.inclinometer = Inclinometer()
            self.inclinometer.set_offset_Y()
            self.slants = []

        print 'Setting up of the experiment took %s seconds'\
              % get_time(self.exp_start, time())
        # Run the state that is required
        self.run_state()
示例#11
0
    def __compile_data(self, exp_ended=True):
        if exp_ended:
            total_exp_time = get_time(self.exp_start, time())
            print 'The experiment ran for %s seconds' % total_exp_time

        compiled_data = {}
        for count, data in enumerate(self.data):
            if count < len(self.reaction_times):
                compiled_data[count] = {}
                compiled_data[count]['image'] = data[0]
                compiled_data[count]['height_ratio'] = data[1]
                compiled_data[count]['slant'] = data[2]
                compiled_data[count]['rt'] = self.reaction_times[count]
                compiled_data[count]['answer'] = self.slants[count]
            else:
                break

        if compiled_data:
            self.data = []
            self.reaction_times = []
            self.slants = []

            path = 'session'
            if not exp_ended:
                path = 'practice'
            ext = ''
            loop = 0
            while True:
                t = join(self.directory, '%s_%d%s.json' % (path,
                                                           self.session, ext))
                if not isfile(t):
                    path = t
                    break
                ext = chr(65 + loop)
                loop += 1

            with open(path, 'wb') as f:
                dump(compiled_data, f)
示例#12
0
    def __compile_data(self, exp_ended=True):
        if exp_ended:
            total_exp_time = get_time(self.exp_start, time())
            print 'The experiment ran for %s seconds' % total_exp_time

        compiled_data = {}
        for count, data in enumerate(self.data):
            if count < len(self.reaction_times):
                compiled_data[count] = {}
                compiled_data[count]['image'] = data[0]
                compiled_data[count]['height_ratio'] = data[1]
                compiled_data[count]['slant'] = data[2]
                compiled_data[count]['rt'] = self.reaction_times[count]
                compiled_data[count]['answer'] = self.slants[count]
            else:
                break

        if compiled_data:
            self.data = []
            self.reaction_times = []
            self.slants = []

            path = 'session'
            if not exp_ended:
                path = 'practice'
            ext = ''
            loop = 0
            while True:
                t = join(self.directory,
                         '%s_%d%s.json' % (path, self.session, ext))
                if not isfile(t):
                    path = t
                    break
                ext = chr(65 + loop)
                loop += 1

            with open(path, 'wb') as f:
                dump(compiled_data, f)
示例#13
0
def reddit_save_blog():
    Database.connect()
    blogid = request.form['blogid']
    content = request.form['content']
    auth = Database.check_blog_haver(blogid)
    if "time" in session and time.time() - int(escape(
            session['time'])) < remember_login_time:
        user = escape(session['user'])
    else:
        session['user'] = '******'
        session['password'] = '******'
        user = '******'
    authority = Database.check_user(user, password='******', name_only=True)
    path = Database.get_blog(blogid)
    if auth == user or authority == 1:
        with open(path, 'wb') as fi:
            fi.write(content.encode('utf-8'))
        date = get_time()
        c = get_content(content)
        Database.change_blog(blogid, c, date)

    Database.close()
    return redirect('/md-editor/show/' + blogid)
示例#14
0
文件: server.py 项目: mitcom/counter
def index():
    form = MovesForm(request.form)
    fields = {'form': form, 'ts': get_time()}
    resp = make_response(render_template('index.html', **fields))

    # resp.set_cookie('file', expires=0)
    filename = request.cookies.get('file')

    print(request.remote_addr, filename)
    if filename is None:
        filename = random_filename()
        resp.set_cookie('file', filename)

    filename = 'static/generated/' + filename
    if request.method == 'POST':
        data = form.data
        ff_type = pop_ff(data)
        remove_items(data)
        err = decide(data, ff_type, filename)
        if err is not None:
            return err

    return resp
示例#15
0
def initparam():
    """ 初始化参数
    """
    params = OrderedDict()

    print "【从前端获得参数】 %s" % str(request.values)
    print "【正在处理参数】"

    # 【开始时间】
    params["start_time"] = request.form.get("startTime", type=str)
    if is_timestr(params['start_time']) is False:
        params['start_time'] = get_time()
    print "\t[开始时间] %s" % params["start_time"]

    # 【结束时间】
    params["end_time"] = request.form.get("endTime", type=str)
    if is_timestr(params['end_time']) is False:
        params['end_time'] = "未设置" # 不结束
    print "\t[结束时间] %s" %params["end_time"]

    # 【时间间隔】
    params["interval"] = request.form.get("interval", type=int)
    params['step'] = params['interval'] / 5  # 步长
    print "\t[时间间隔] %d" % params["interval"]
    print "\t[保存图片的步长] %d" % params["step"]

    # 【处理点】
    points = {
        'center_point' : request.form.get("centerPoint", type=str),
        'north_west_point' : request.form.get("northWestPoint", type=str),
        'north_east_point' : request.form.get("northEastPoint", type=str),
        'south_east_point' : request.form.get("southEastPoint", type=str),
        'south_west_point' : request.form.get("southWestPoint", type=str)
    }
    print '\t[点] %s' % str(points)
    for point_name,value in points.items():
        params[point_name] = get_latlng(
            value
        )

    # 【保存文件夹】
    params["save_file_dir"] = request.form.get("saveFileDir", type=str)
    print "\t[保存文件夹] %s" % params["save_file_dir"]
    save_dir = params["save_file_dir"]
    if not os.path.exists(save_dir):
        # 选择的文件夹不存在
        print "\t【WARNING】 选择的文件夹不存在,跳转到初始页面"
        print "【params】" + str(params)
        return redirect_index("文件夹路径不存在,请重新输入!")
    # # 【输出文件夹】
    out_dir = os.path.join(save_dir, params['start_time'])
    params['out_dir'] = out_dir
    print "\t[图像输出文件夹] %s" % params["out_dir"]
    if os.path.exists(out_dir):
        # 输出文件夹已经存在
        print "【params】" + str(params)
        return redirect_index("输出文件夹已经存在,%s<Br/>请重新输入文件夹!" % out_dir)
    os.makedirs(out_dir)

    # 项目备注
    params["remark"] = request.form.get("remark")
    print "\t[项目备注] %s" % params["remark"]

    img_len , request_points = init_crawler(params) #计算每次请求的中心点
    print request_points
    if img_len==0:
        return "抱歉,任务失败!<br/>框选区域没有雷达降水图。"
    else:
        # 初始化文件夹
        init_dir(params, request_points)

        # 保存爬取参数
        print "【params】" + str(params)
        save_json(out_dir, u"0 param - 爬取参数", params) #保存成json
        file_str = save_params_file(params) #保存给用户看
        html_str = file_str.replace('\n', '<br/>') #输出的HTML字符串

        # 开启任务,异步进程
        executor.submit(
            start(params, request_points)
        )

        return '任务已在后台运行!<br/>每次需要爬取{}张图片!<br/>{}'.format(img_len, html_str)
示例#16
0
        self.dev_per_batch = 5000  # 多少轮验证一次

        self.train_path = './tfrecord/train.record'
        self.dev_path = './tfrecord/dev.record'
        self.num_epochs = 1000

    def get_all_number(self):
        rt = {}
        for key, value in vars(self).items():
            rt[key] = value
        return rt


if __name__ == '__main__':
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    now_tim = get_time()

    l_path = './save/albert_tiny/albert_model.ckpt'
    # l_path = None
    # l_path = './save/20191207203554/model.ckpt'

    path = './save/%s/model.ckpt' % (now_tim)

    config = Config()
    oj = Dee(config)
    if not os.path.exists('./save/%s/' % (now_tim)):
        os.makedirs('./save/%s/' % (now_tim))

    # 开始训练
    oj.train(l_path, path, "./data/log/", True)
示例#17
0
def main(epochs: int, learning_rate: float):
    global best_acc
    global best_path
    global note
    global best_state

    criterion = nn.CrossEntropyLoss().to(config_device)
    optimizer = optim.Adam(net.parameters(), lr=learning_rate)
    # optimizer = optim.SGD(net.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     mode='min',
                                                     factor=0.95,
                                                     patience=3,
                                                     verbose=True,
                                                     min_lr=3e-9)
    warmup_scheduler = optim.lr_scheduler.LambdaLR(optimizer,
                                                   lr_lambda=lambda ep: 1e-2
                                                   if ep < 3 else 1)

    loss = 0.0
    train_loss = 0.0

    global_batch_idx = 1
    acc_all = []
    test_best_loss = 1e5

    def evaluate(now_ep) -> float:
        test_loss = 0.0
        logging('starting evaluate!')
        net.eval()
        with torch.no_grad():
            correct = 0
            total = 0
            for (X, label) in tqdm(test_data):
                X, label = X.to(config_device), label.to(config_device)
                logits = net(X)
                test_loss += criterion(logits, label).item()
                predict = logits.argmax(dim=1)
                correct += predict.eq(label).float().sum().item()
                total += X.size(0)

        correct /= total
        test_loss /= len(test_data)
        logging(
            f'epoch {now_ep} evaluate done! test acc {correct:.5f}, best acc{best_acc:.5f}, test batch loss {test_loss:.5f}'
        )
        return correct, test_loss

    for ep in range(epochs):
        logging(f'epoch {ep} start!')
        net.train()
        train_loss = 0.0
        for batch_idx, (X, label) in enumerate(tqdm(train_data)):
            X, label = X.to(config_device), label.to(config_device)
            logits = net(X)
            loss = criterion(logits, label)

            train_loss += loss.item()
            global_batch_idx += 1

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if global_batch_idx % 2000 == 0:
                logging(
                    f'global batch {global_batch_idx}, loss is {train_loss/(batch_idx+1):.5f}'
                )

        train_loss /= len(train_data)
        logging(
            f'epoch {ep} train done! averge train loss is {train_loss:.5f}')

        ep_test_acc, test_loss = evaluate(ep)
        acc_all.append(ep_test_acc)
        if ep < 4: warmup_scheduler.step(ep)
        else: scheduler.step(test_loss, epoch=ep)

        if args.verbose:
            vis.line(X=[ep + 1],
                     Y=[train_loss],
                     env=env_name,
                     win='loss',
                     name='train_loss',
                     update='append')
            vis.line(X=[ep + 1],
                     Y=[test_loss],
                     env=env_name,
                     win='loss',
                     name='test_loss',
                     update='append')
            vis.line(X=[ep + 1],
                     Y=[ep_test_acc],
                     env=env_name,
                     win='acc',
                     name='test_acc',
                     update='append')

        if ep_test_acc > best_acc or (ep_test_acc == best_acc
                                      and test_loss < test_best_loss):
            best_acc = ep_test_acc
            test_best_loss = test_loss
            best_path = config_model_save_path[dataset_name].format(
                net.model_name, best_acc, get_time(), note)
            best_state = copy.deepcopy(net.state_dict())

        if (ep + 1) % (epochs // 3) == 0 and best_state:
            logging(f'saving model in {best_path} best acc {best_acc:.5f}')
            torch.save(best_state, best_path)
            best_state = None

    count = epochs - epochs // 2
    acc_all = sum(acc_all[epochs // 2:])
    acc_all /= count
    if best_state is not None: torch.save(best_state, best_path)
    logging(
        f'train {epochs} done! The last train loss is {train_loss/len(test_data):.5f}!'
    )
    logging(
        f'The last {count} epoch test average acc is {acc_all:.5f}, best acc is {best_acc:.5f}\n'
        f'best model saved in {best_path}')
示例#18
0
from Database import Database
from tools import get_time, save_md

d = Database()
d.connect()

auth = "Stan"

for i in range(10):
    c = "测试" + str(i)
    title = c
    create_date = get_time()
    path = save_md(c, auth)
    d.add_blog(title, auth, create_date, c, path)
d.show_all_blog()
d.close()
示例#19
0
if args.verbose:
    vis = Visdom(port=15555)
    assert vis.check_connection()
    opts = dict(title='rate',
                xlabel='nth',
                legend=['acc', 'att_failure_rate', 'att_success_rate'])
    vis.line([0], [0], win='rate', env=env_name, opts=opts)

output_dir = f'./static/{args.dataset}/foolresult/{args.attack}/{args.model}/'
make_dir_if_not_exist(output_dir)

dataset = args.dataset
attack_method = args.attack
sub_rate_limit = args.sub_rate_limit
csv_path = output_dir + args.note + '_' + get_time() + '_log.csv'
adv_path = output_dir + args.note + '_' + get_time() + '_adv.txt'
clean_path = config_data[dataset].clean_1k_path
model = args.model

tokenizer = Tokenizer()
train_paths = [config_data[dataset].train_data_path]
if 'adv' in model:
    train_paths.append(
        config_data[dataset].adv_train_path[model.split('_')[0]])
train_data = MyDataset(config_dataset,
                       tokenizer,
                       is_train=True,
                       data_path=train_paths)
vocab0 = Vocab(train_data.data_token,
               vocab_limit_size=config_data[dataset].vocab_limit_size)