def distance(self):
        while True:
            GPIO.output(self.__trigpine, GPIO.high)
            time.sleep(0.00001)

            GPIO.output(self.__trigpine, GPIO.LOW)

            startTime = time.time()
            stoptTime = time.tiem()

        cnt = 0

        while GPIO.input(self.__echopin) == GPIO.LOW:
            cnt += 1
            startTime = time.time()
            if cnt > 100000:
                return self.distandce()

        while GPIO.input(self.__echopin) == GPIO.HIGH:
            cnt += 1
            startTime = time.time()
            if cnt > 100000:
                return self.distandce()

        during = stopTime - startTime
        dist = during * (343 / 2) * 100
        self.value = dist
        time.sleep(1)
示例#2
0
imgs =[]
img_detections =[]

print('\n Performing object detection:')

prev_time = time.time()
for batch_i,(img_paths,input_imgs) in enumerate(dataloader):
    #configure input
    input_imgs = Variable(input_imgs.type(tensor))
    #get detections
    with torch.no_grad():
        detections = model(input_imgs)
        detections = non_max_suppression(detections,80,opt.conf_thres,opt.nms_thres)

    #log progress
    current_time = time.tiem()
    inference_time = datetime.timedelta(seconds=current_time - prev_time)
    prev_time = current_time
    print('\t+ Batch %d,Inference Time: %s' % *batch_i,inference_time)

    #save image and detections
    imgs.extend(img_paths)
    img_detections.extend(detections)

#bounding-box colors
cmap = plt.get_cmap('tab20b')
colors = [cmap[i] for i in np.linspace(0,1,20)]

print('\nSaving images:')

#Iterate through images and save plot of detections
示例#3
0
    #connect the database
    db = pymysql.connect('localhost', 'username', 'password', 'ema')
    cursor = db.cursor()

    survey_id = recommend_suid(speaker_id='123', mood_id='H', scream=1, cry=1)
    current_time = time.time()
    last_time = 0
    struc_current_tiem = time.localtime(current_time)
    if current_time - last_time >= 300 and (struc_current_tiem[3] < 22
                                            and struc_current_tiem[3] > 9):
        last_time = current_time
        # based on recommended survey_id, form url to trigger phone buzz, using other parameters
        send_rec(phone_url='http://191.168.0.106:2226',
                 speaker_id='2',
                 survey_id=str(survey_id),
                 server_url='http://191.168.0.107/ema/ema.php',
                 androidid='db7d3cdb88e1a62a',
                 empathid='999|1550004755',
                 alarm=True)
        while time.tiem() - current_time < 300:
            query = "SELECT answer FROM ema_data where primkey = empathid AND variablename = QID"
            data = cursor.execute(query)
            print data

    db.close()
# Outputs from acoustic pipeline
# line 1: speaker ID. possible value: 0, 1, 2. 0 denotes speaker #1, 1 denotes speaker #2, 2 denotes un-identifiable speaker.
# line 2: mood from the audio clip. possible value: H, A, N, S, standing for happy, angry, neutral, sad respectively.
# line 3: scream. possible value: 0, 1. 0 represents that screaming is not detected. 1 represents screaming is detected.
# line 4: cry. possible value: 0, 1. 0 represents that crying is not detected. 1 represents crying is detected.
      p_soup = BeautifulSoup(p_c,'html.parser')
      p_content = p_soup.find('dis',{'class':'list-cont'})
      pageCar = []
      for car in p_content:
               carDic = {}
               carDic['picUrl'] = car.find('div',{'class':'list-cont-img'}).find('img')['src']
               carDic['name'] = car.find('div',{'class': 'list-cont-main}).find('a').txt
                try:
                       carDic['score'] = car.find('spam',{'class':'score-nu,ber'}).txt
                 except Exception as e:
                        carDic{'score'} = ''
                 pageCar.append(carDic)
      return pageCar

if __name__ = '__mian__':
       t1 = time.tiem()
       pool = mp.Pool()
       pool.map(main, [i*10 for i in range(10)])
       multi_res = [pool.apply_async{crawl_page,(url)) for url in urls]
       pageCars = [res.get() for res in multi_res]

       for pageCar in pageCars:
                 for cat in pageCar:
                          cars.append(car)
       print(len(cars))
       t2 = time.time()
       print(t2 - t1)
'''
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
示例#5
0
def crop_image(count, image_high, image_low):
    t0 = time.time()
    for i in range(len(data1)):
        I = imread('database\\' + data1[i])
        h, w, c = I.shape
        I_low = imresize(I, (int(h / scaling_factor), int(w / scaling_factor)),
                         'bicubic')
        I_low = imresize(I_low, (h, w), 'bicubic')
        x = int(np.floor((h - f) / stride) + 1)
        y = int(np.floor((w - f) / stride) + 1)
        for p in range(x):
            for q in range(y):
                im = I[p * stride:p * stride + 33,
                       q * stride:q * stride + 33, :].reshape(1, 33, 33, 3)
                im_low = I_low[p * stride:p * stride + 33,
                               q * stride:q * stride + 33, :].reshape(
                                   1, 33, 33, 3)
                image_high = np.concatenate((image_high, im), axis=0)
                image_low = np.concatenate((image_low, im_low), axis=0)
                count = count + 1
                if count % 10000 == 0:
                    print('we have already cropped {} pictures'.format(count))
                    if count % 50000 == 0:
                        np.save(
                            'image_high' + str(int(count / 50000)) + '.npy',
                            image_high[1:, :, :, :])
                        np.save('image_low' + str(int(count / 50000)) + '.npy',
                                image_low[1:, :, :, :])
                        image_high = np.zeros(3267).reshape(1, 33, 33, 3)
                        image_low = np.zeros(3267).reshape(1, 33, 33, 3)
                        print('database' + str(i))
    for i in range(len(data2)):
        I = imread('database1\\' + data2[i])
        h, w, c = I.shape
        I_low = imresize(I, (int(h / scaling_factor), int(w / scaling_factor)),
                         'bicubic')
        I_low = imresize(I_low, (h, w), 'bicubic')
        x = int(np.floor((h - f) / stride) + 1)
        y = int(np.floor((w - f) / stride) + 1)
        for p in range(x):
            for q in range(y):
                im = I[p * stride:p * stride + 33,
                       q * stride:q * stride + 33, :].reshape(1, 33, 33, 3)
                im_low = I_low[p * stride:p * stride + 33,
                               q * stride:q * stride + 33, :].reshape(
                                   1, 33, 33, 3)
                image_high = np.concatenate((image_high, im), axis=0)
                image_low = np.concatenate((image_low, im_low), axis=0)
                count = count + 1
                if count % 10000 == 0:
                    print('we have already cropped {} pictures'.format(count))
                    if count % 50000 == 0:
                        np.save('image_high.npy' + str(count / 50000),
                                image_high[1:, :, :, :])
                        np.save('image_low.npy' + str(count / 50000),
                                image_low[1:, :, :, :])
                        image_high = np.zeros(3267).reshape(1, 33, 33, 3)
                        image_low = np.zeros(3267).reshape(1, 33, 33, 3)
                        print('database1' + str(i))
    for i in range(len(data3)):
        I = imread('database2\\' + data3[i])
        h, w, c = I.shape
        I_low = imresize(I, (int(h / scaling_factor), int(w / scaling_factor)),
                         'bicubic')
        I_low = imresize(I_low, (h, w), 'bicubic')
        x = int(np.floor((h - f) / stride) + 1)
        y = int(np.floor((w - f) / stride) + 1)
        for p in range(x):
            for q in range(y):
                im = I[p * stride:p * stride + 33,
                       q * stride:q * stride + 33, :].reshape(1, 33, 33, 3)
                im_low = I_low[p * stride:p * stride + 33,
                               q * stride:q * stride + 33, :].reshape(
                                   1, 33, 33, 3)
                image_high = np.concatenate((image_high, im), axis=0)
                image_low = np.concatenate((image_low, im_low), axis=0)
                count = count + 1
                if count % 10000 == 0:
                    print('we have already cropped {} pictures'.format(count))
                    if count % 50000 == 0:
                        np.save('image_high.npy' + str(count / 50000),
                                image_high[1:, :, :, :])
                        np.save('image_low.npy' + str(count / 50000),
                                image_low[1:, :, :, :])
                        image_high = np.zeros(3267).reshape(1, 33, 33, 3)
                        image_low = np.zeros(3267).reshape(1, 33, 33, 3)
                        print('database2' + str(i))
    np.save('image_high.npy', image_high[1:, :, :, :])
    np.save('image_low.npy', image_low[1:, :, :, :])
    t1 = time.tiem()
    return count, t1 - t0
示例#6
0
    def train2(self, loops):
        for loop in range(loops):
            t1 = time.tiem()
            self.going_righ = 0
            for i in np.arange(self.n - 2, 0, -1):

                self.current_site = i
                self.merged_tensor, self.merged_idx = contra(
                    self.tensors[self.current_site],
                    self.order_left[self.current_site],
                    self.tensors[self.current_site + 1],
                    self.order_left[self.current_site + 1])
                self.Z = self.compute_Z()
                self.psi = self.compute_psi2()
                dmerge = self.gradient_descent2()
                # nll=0
                # for k in range(self.m):
                #     nll=nll+(torch.log(self.psi[k] *self.psi[k] /self.Z))
                # nll=nll/self.m
                # print nll,i
                self.tensors[self.current_site], self.tensors[
                    self.current_site + 1] = svd_update(
                        self.tensors[self.current_site],
                        self.order[self.current_site],
                        self.tensors[self.current_site + 1],
                        self.order[self.current_site + 1], dmerge,
                        self.going_righ, 1e-6, self.max_bond)
                self.contraction_updat_twosite2()

            self.going_righ = 1
            for i in range(self.n - 2):
                self.current_site = i
                self.merged_tensor, self.merged_idx = contra(
                    self.tensors[self.current_site],
                    self.order_left[self.current_site],
                    self.tensors[self.current_site + 1],
                    self.order_left[self.current_site + 1])
                self.Z = self.compute_Z()
                self.psi = self.compute_psi2()
                # nll = 0
                # for k in range(self.m):
                #     nll = nll + ( torch.log(self.psi[k] *self.psi[k]  / self.Z))
                # nll=nll/self.m
                # print nll,i
                dmerge = self.gradient_descent2()
                self.tensors[self.current_site], self.tensors[
                    self.current_site + 1] = svd_update(
                        self.tensors[self.current_site],
                        self.order[self.current_site],
                        self.tensors[self.current_site + 1],
                        self.order[self.current_site + 1], dmerge,
                        self.going_righ, 1e-6, self.max_bond)
                self.contraction_updat_twosite2()
            for j in range(len(self.links)):
                self.Z = self.compute_Z()
                self.psi = self.compute_psi2()
                k0 = self.links[j][0]
                k1 = self.links[j][1]
                dmerge = self.gradient_descent25(k0, k1)
                self.tensors[k0], self.tensors[k1] = svd_update(
                    self.tensors[k0], self.order[k0], self.tensors[k1],
                    self.order[k1], dmerge, self.going_righ, 2e-6,
                    self.max_bond)
                self.contraction_update_all_left2()
            nll = 0
            for k in range(self.m):
                nll = nll + (torch.log(self.psi[k] * self.psi[k] / self.Z))
            nll = nll / self.m
            print nll
            if nll > self.nll_history[-1] + 2e-5:
                self.nll_history.append(nll)
            else:
                break
            t2 = time.time()
            print t2 - t1