示例#1
0
def dostuff(num):

    start = time.Time()
    x=0
    for x in range(num):

        x = x+1
    end = time.Time()

    print(totaltime = end - start)

    return x
示例#2
0
    def check_for_surge(self, bytes_received, packets_received):
        if bytes_received > SwitchStats.surge_threshold / SwitchStats.polling_interval:
            end_timestamp = time.Time()
            surge_data = {
                'datapath': self.datapath,
                'start_timestamp':
                end_timestamp - SwitchStats.polling_interval,
                'end_timestamp': end_timestamp,
                'packets_received': packets_received,
                'bytes_received': bytes_received
            }
            if self.current_surge != None:
                self.current_surge.extend(surge_data)
            else:
                self.current_surge = TrafficSurge(surge_data)

            return self.current_surge
        else:
            if self.current_surge != None:
                self.current_surge.end()
                surge = copy.copy(self.current_surge)
                self.current_surge = None
                return surge
            else:
                return None
示例#3
0
def json_detection():
    """
    Returns JSON with classes found in the images
    """
    raw_images = []
    images = request.files.getlist("images")
    for image in images:
        image_name = image.filename
        image_names.append(image_name)
        image.save(os.path.join(os.getcwd(), image_name))
        img_raw = tf.image.decode_image(
            open(image_name, 'rb').read(), channels=3
        )
        raw_images.append(img_raw)

    num = 0

    # list for final response
    for i in range(len(raw_images)):
        '''
        list of responses for current image
        '''
        response = []
        responses = []
        raw_img = raw_images[i]
        num += 1
        img = tf.expand_dims(raw_img, 0)
        img = transform_images(img, size)

        time1 = time.Time()
        boxes, scores, classes, num = yolo(img)
        time2 = time.time()
        print("Time: {}".format(time2 - time1))

        print("Detections:")
        for j in range(nums[0]):
            print("\t{}, {}, {}".format(class_names[int(classes[0][j])],
                                        np.array(scores[0][j]),
                                        np.array(boxes[0][j])))

            resposes.append([
                "class": class_names[int(classes[0][j])],
                "confidence": float("{o:.2f}".format(np.array(scores[0][j]*100)))
            ])

        response.append([
            "image": image_names[j],
            "detections": responses
        ])
def Scan(a, m, n):
    time = Time()
    count = 0
    for i in range(m, m + 64 + 1):
        for j in range(n, n + 64 + 1):
            if a[i, j] >= 200:
                count += 1
    if count >= 64 * 64 * 1 / 2:
        time.Time()
        if time.Check() and not x[-1] == True and not x[-2] == True:
            # return True
            x.append(True)
        else:
            x.append(False)
    else:
        x.append(False)
示例#5
0
def handleRequest(req):
    env = pack.unpackRequest(req)
    if env is False:
        return False

    head = env.Head
    cur_time = head.Time
    appkey = head.AppKey[:8]
    #
    # 通过appkey可以在游戏表里面获取appsecret
    #
    appsecret = appkey
    secret = binascii.unhexlify(appsecret)

    if head.Platform != common.PT_ANDROID and head.Platform != common.PT_IOS:
        env.Head.Result = common.MP_ERR_PLATFORM
        return pack.packRequest(env, "")

    key = pack.GetKey(secret, cur_time, head.Platform)
    result = ymcoder.mpDecode(env.ReqBody, key)
    if result is False:
        env.Head.Result = common.MP_ERR_DECODE
        return pack.packRequest(env, "")

    body = zlib.decompress(result)
    if body is False:
        env.Head.Result = common.MP_ERR_UNPACK
        return pack.packRequest(env, "")

    env.HeadExt.readFrom(body)
    env.ReqBody = body[env.HeadExt.Len:]

    retenv = handleProcess(env)
    retenv.Head.Time = int(time.Time())
    key = pack.GetKey(secret, retenv.Head.Time, head.Platform)
    return pack.packRequest(env, key)
if len(contours) != 0:
    #Draw in blue the found contours
    cv2.drawContours(output, contours, -1, 255, 3)

    #Find the largest contour
    c = max(contours, key=cv2.contourArea)
    #print str(cv2.contourArea(c))

    #Contour must be this big to count as ball. If number too small when no ball present may detect anything
    if cv2.contourArea(c) > 525:
        #Draw contour with circle
        (x, y), radius = cv2.minEnclosingCircle(c)
        center = (int(x), int(y))
        diameter = radius * 2
        radius = int(radius)
        cv2.circle(output, center, radius, (0, 255, 0), 2)
        #Draw contour with a rectangle
        '''
        x, y, w, h = cv2.boundingRect(c)
        #Draw contour in green
        cv2.rectangle(output, (x,y),(x+w, y+h), (0, 255, 0),2)
        '''

#Show the images
endTime = time.Time()
print "Time taken: " + str(endTime - startTime)
cv2.imshow("Ball", np.hstack([pImg, output]))
cv2.waitKey(0)  # OpenCV for Linux has a bug and needs this line
cv2.destroyAllWindows()
示例#7
0
def set_expstart(table):
    from astropy import time
    mjd = time.Time(table['t_min'], format='mjd')
    table['expstart'] = mjd.iso
示例#8
0
def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    if args.no_partialbn:
        model.module.partialBN(False)
    else:
        model.module.partialBN(True)

    # 切换到训练模式
    # 注意:调用的model重写的train方法以达到冻结bn层参数的目的
    model.train()

    end = time.Time()
    for i, (input, target) in enumerate(train_loader):
        # 计算数据的导入时间
        # 当执行enumerate(train_loader)的时候,是先调用DataLoader类的__iter__方法,该方法里面再调用DataLoaderIter类的初始化操作__init__
        # 而当执行for循环操作时,调用DataLoaderIter类的__next__方法,在该方法中通过self.collate_fn接口读取self.dataset数据时就会调用TSNDataSet类的__getitem__方法,从而完成数据的迭代读取
        data_time.update(time.time() - end)

        # 读取到数据后就将数据从Tensor转换成Variable格式,然后执行模型的前向计算
        # 如果想在CUDA上进行计算,需要将操作对象放在GPU内存中。
        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # 计算输出
        # output:batch size*class维度
        # 自动调用模型中自定义的forward函数
        output = model(input_var)
        loss = criterion(output, target_var)

        # 计算准确度和视频的总损失
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data[0], input.size(0))
        prec1.update(prec1.data[0], input.size(0))
        prec5.update(prec5.data[0], input.size(0))

        # 对所有的参数的梯度缓冲区进行归零
        optimizer.zero_grad()

        # autograd.Variable 这是这个包中最核心的类。
        # 它包装了一个Tensor,并且几乎支持所有的定义在其上的操作。一旦完成了你的运算,你可以调用 .backward()来自动计算出所有的梯度。
        # 反向传播,自动计算梯度
        loss.backward()

        if args.clip_gradient is not None:
            total_norm = clip_grad_norm(model.parameters(), args.clip_gradient)
            if total_norm > args.clip_gradient:
                print("clipping gradient: {} with coef {}".format(
                    total_norm, args.clip_gradient / total_norm))

        # 执行参数更新
        optimizer.step()

        # 计算消耗的时间
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print(('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t'
                   'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                   'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                   'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                   'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                   'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                       epoch,
                       i,
                       len(train_loader),
                       batch_time=batch_time,
                       data_time=data_time,
                       loss=losses,
                       top1=top1,
                       top5=top5,
                       lr=optimizer.param_groups[-1]['lr'])))
示例#9
0
 def __init__(self, surge_data):
     super(EventTrafficSurgeEnd, self).__init__()
     self.timestamp = time.Time()
     self.surge_data = surge_data
示例#10
0
 def Updater(self):
     self.nextTime = time.Time() - self.startTime
     self.SetTime(self.nextTime)
     self.timer = self.after(50, self.Updater)
示例#11
0
 def ping(self):
     lc = time.Time()