class AnalysisResults(models.Model): """ One-to one linked with analysis. Info for post-processing. """ analysis = models.OneToOneField(Analysis) xmlFile = models.FileField(upload_to=lambda instance,filename: st('resultfiles/%Y/%m/%d/')+instance.analysis.dbname.fulldbname()+'.xml') figFile = models.ImageField(upload_to=lambda instance,filename: st('resultfiles/%Y/%m/%d/')+instance.analysis.dbname.fulldbname()+'.png')
def main(): os.system('clear') user = input('\033[44m' + st('%H:%M:%S') + ' ' + '[!] Usuário: ' + '\033[0;0m') passwd = getpass.getpass('\033[41m' + st('%H:%M:%S') + ' ' + '[!] Senha: ' + '\033[0;0m') instafollow = InstaFollow(user, passwd) os.system('clear') instafollow.load()
def __init__(self, X, y, task, method_list, grid, logfile): ''' Inputs ------ X: DataFrame or array, explanatory vars / features/ predictors y: Series or array, target task: str, 'binary', 'multiclass', or 'regression' method_list: list, see tallant.machine_learning.grids grid: dict, see tallant.machine_learning.grids logfile: str, defaults to 'models_<date>' Output ------ DataFrame and log of every model (method, params, evaluation metrics) ToDo ---- Time-Series holdouts. Built in CV. ''' self.task = task self.method_list = method_list self.grid = grid self._make_data(X, y) self.logfile = '{}_models_{}.log'.format(self.task, st('%m_%d_%Y')) self.logger = self._get_logger() self.report = self._init_output()
def add(self, bro, page_select): pages = page_select[0:20] added = 1 for page in pages: try: bro.get('https://www.instagram.com/' + page + '/') slp(self.tempo3 * self.lag) bro.find_elements_by_xpath("//div[@class='_e3il2']")[0].click() slp(self.tempo2 * self.lag) try: bro.find_elements_by_xpath( "//a[@class='_nzn1h _gu6vm']")[0].click() except: bro.find_elements_by_xpath( "//a[@class='_nzn1h']")[0].click() slp(self.tempo2 * self.lag) users = bro.find_elements_by_xpath( "//button[@class='_qv64e _gexxb _4tgw8 _njrw0']") slp(self.tempo2 * self.lag) random.shuffle(users) for user in users: user.click() slp(random.randrange(1, 4)) print(self.F1 + st('%H:%M:%S') + self.CE + '[' + str(added) + ']' + self.C4 + '[+] Adicionando usuários...', end='\r') added += 1 except Exception as add_err: print('\nAdicionados:', str(added)) return print('\nAdicionados:', str(added))
def load(self): while True: try: bro = self.driver() break except: pass slp(self.tempo1 * self.lag) self.openinsta(bro) self.login(bro) while True: random.shuffle(self.profiles) page_select = self.profiles self.action(bro, page_select) for seconds in range(self.interval * self.lag + 1): restante = str( round(((self.interval * self.lag) - seconds) / 60)) print(self.F4 + st('%H:%M:%S') + ' ' + self.C6 + '[...] Processo concluído! Recomeçando em', str(round(((self.interval * self.lag) - seconds) / 60)), str('minuto.' if int(restante) < 2 else 'minutos.') + self.CE, end='\r') slp(1)
def take_pic(self, collection_name: str): capture = cv2.VideoCapture(0) face_cascade = cv2.CascadeClassifier( 'cascades/data/haarcascade_frontalface_alt2.xml') image_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'devils_eye') while True: ret, image = capture.read() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5) for (x, y, w, h) in faces: roi_gray = gray[y:y + h, x:x + w] roi_color = image[y:y + h, x:x + w] cv2.imwrite(os.path.join(image_dir, 'target.jpeg'), roi_color) cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) name = Stalker().run_stalker(collection_name, 'target.jpeg') print(name) if name in self.names_list: pass else: self.names_list.append(name) self.time_list.append(st('%H:%M:%S', gt())) cv2.imshow('look_at_me', image) if cv2.waitKey(20) & 0xFF == ord('q'): break cv2.destroyAllWindows() Attendance(self.names_list, self.time_list).take_attendance()
def createwidgets(): root.date = Label(root, text="DATE:- " + st("%d/%m/%Y"), font=("Helvetica", 50), bg="skyblue", fg="white") root.date.grid(sticky='nw') root.time = Label(root, font=("Helvetica", 100), bg="skyblue", fg="white") root.time.grid(sticky='nw') update()
def pngUpload(instance,filename): return st('resultfiles/%Y/%m/%d/')+instance.analysis.configuration.fulldbname()+'.png' class AnalysisResults(models.Model):
def xmlUpload(instance,filename): return st('resultfiles/%Y/%m/%d/')+instance.analysis.configuration.fulldbname()+'.xml' def pngUpload(instance,filename): return st('resultfiles/%Y/%m/%d/')+instance.analysis.configuration.fulldbname()+'.png'
avrWIFIrssi = [0 for i in range(len(timeintervals) - 1)] ############################ find time objects ################################# g = subprocess.Popen([ 'grep', '-E', 'bluetooth\|found|wifi\|connected|bluetooth\|found.*rssi|wifi\|connected.*rssi|battery\|level|timeinstates', '/usr/666Users/' + n ], stdout=subprocess.PIPE) lines = g.stdout.readlines() for i in range(len(lines)): try: tmatch = re.search('T(.+?)\.', lines[i]).group(1) except: continue try: x = st(tmatch.split(', ')[0], '%H:%M:%S') # convert the string to format timedelta time_currentline = td(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec) total_minute = int(time_currentline.total_seconds() / 60) timedelta_min = td(minutes=total_minute) except: print "failed to convert string to timedelta object for line: " + str( i) continue ############################ find Battery Level ################################# if 'battery' in lines[i]: BLmatch = re.findall( r"(?<!\d)\d{1}(?!\d)$|(?<!\d)\d{2}(?!\d)$|(?<!\d)\d{3}(?!\d)$", lines[i]) BL = int(BLmatch[0])
def pngUpload(instance, filename): return st('resultfiles/%Y/%m/%d/' ) + instance.analysis.configuration.fulldbname() + '.png'
def action(self, bro, page_select): x = 0 max = 25 for page in page_select: if x > max: return print( '\n' + self.F4 + st('%H:%M:%S') + self.CE + self.C7 + ' [' + self.C2 + str(max - x) + self.C7 + '] Página:', self.N4 + str(page) + self.CE) x += 1 users = [] try: bro.get('https://www.instagram.com/' + page + '/') slp(self.tempo3 * self.lag) # open first post bro.find_elements_by_xpath("//div[@class='_e3il2']")[0].click() slp(self.tempo3 * self.lag) comments_post = bro.find_elements_by_xpath( "//a[@class='_2g7d5 notranslate _95hvo']") for comments_list in comments_post: user = comments_list.get_attribute("innerHTML") if user not in users: users.append(user) if len(comments_post) < 10: print(self.F4 + st('%H:%M:%S') + ' ' + self.C0 + '[!] Poucos comentários.' + self.CE) x -= 1 continue elif self.user in users: print(self.F4 + st('%H:%M:%S') + ' ' + self.N4 + '[!] Já existe um comentário recente seu.' + self.CE) x -= 1 continue # open comment campo bro.find_elements_by_xpath( "//a[@class='_p6oxf _6p9ga']")[0].click() slp(self.tempo2 * self.lag) # comment bro.find_elements_by_xpath( "//textarea[@class='_bilrf']")[0].send_keys( random.choice(self.comments)) slp(self.tempo2 * self.lag) # Post comment #bro.find_elements(By.XPATH, '//button[text()="Publicar"]').click() slp(self.tempo2 * self.lag) try: bloqueados_err = bro.find_elements_by_xpath( "//a[@class='_rke62']") if len(bloqueados_err) > 0: print(self.F4 + st('%H:%M:%S') + self.CE + ' ' + '[!] Comentários temporariamente bloqueados!') self.add(bro, page_select) for secconds in range(self.block_time * self.lag): print(self.F4 + st('%H:%M:%S') + self.CE + ' ' + '[!] Aguardando', str( round(((self.block_time * self.lag) - secconds) / 60)), 'minutos.', end='\r') slp(1) continue except Exception as block_err: print( self.F1 + st('%H:%M:%S') + ' ' + self.CE + '[!] Erro block_err:', str(block_err)) pass print(self.F4 + st('%H:%M:%S') + ' ' + self.C2 + '[!] Comentado com sucesso!' + self.CE) except Exception as action_err: print( self.F1 + st('%H:%M:%S') + ' ' + self.CE + '[!] Erro in action_err:', str(action_err))
def write(self, message, filename): new_row.writerow(st("%Y-%m-%d %H:%M:%S"), message)
def main(cam_num, feed, coordinates, area): second_frame = 0 frames = [] date_time = 0 img1 = 0 while True: is_feed, img = feed.read() if is_feed: # print(type(coordinates[0]), type(coordinates[1]), type(coordinates[2]), type(coordinates[3])) try: img1 = img[coordinates[0]:coordinates[0] + coordinates[2], coordinates[1]:coordinates[1] + coordinates[3]] except IndexError: print( 'there was a index error while defining the ROI of the image so the full image has been taken' ) img1 = img # finally: # print('error dfsdafdsfsd') # img1 = img th = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) cars = car_cascade.detectMultiScale(th, 1.04, 3) print(type(img1)) if len(cars) > 0 and second_frame != 2: frames.append(cars[0]) if second_frame != 2 and len(cars) > 0: second_frame += 1 elif len(cars) > 0 and second_frame == 2 and len(frames) == 2: frame_change = check_cord(prv_frame=frames[0], current_frame=frames[1]) if not frame_change: for (x, y, w, h) in cars: img1 = cv2.rectangle(img1, (x, y), (x + w, y + h), (255, 0, 0), 2) try: if date_time != st("%d/%m/%Y-%X"): with open( "C:/Users/Akshat/Desktop/occupied pump {}.txt" .format(area[-1]), "a") as file: date_time = st("%d/%m/%Y-%X") file.write( '{} : pump {} is occupied by {}\n'.format( date_time, area[-1], len(cars))) # this is for debugging -> print('found {} cars'.format(len(frames))) except: if date_time != st("%d/%m/%Y-%X"): with open("~/Desktop/occupied pump {}.txt".format( area[-1])) as file: date_time = st("%d/%m/%Y-%X") file.write('{} : pump {} is occupied\n'.format( date_time, area[-1])) del frames[0] second_frame -= 1 cv2.imshow('img', img1) else: print('CAM IS NOT RESPONDING OR THE VIDEO FEED ENDED') break k = cv2.waitKey(30) & 0xff if k == 27: break feed.release() cv2.destroyAllWindows()
def dataExt(turl): props = Props_var() crmUS = props.crmUS crmPS = props.crmPS fp = webdriver.FirefoxProfile() fp.set_preference("browser.download.folderList", 2) fp.set_preference("browser.download.manager.showWhenStarting", False) fp.set_preference("browse.download.dir", '<path to directory>') fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/csv") options = Options() options.add_argument('--headless') driver = webdriver.Firefox( executable_path=r'<path to geckodriver.exe>', firefox_profile=fp, firefox_options=options) crmUrl = '<crmURL>' driver.get(crmUrl) # this walks through the DOM for login. It would likely be different an alternitve site WebDriverWait(driver, 100).until( lambda driver: driver.find_element_by_id('maincontent_Username')) driver.find_element_by_id('maincontent_Username').send_keys("username") driver.find_element_by_id('maincontent_Password').send_keys("password") driver.find_element_by_id('maincontent_LoginButton').click() WebDriverWait(driver, 100).until( lambda driver: driver.find_element_by_id('maincontent_Login')) driver.find_element_by_id('maincontent_Login').click() # AppServer asFDgen = '<Path to raw>' # Local #asFDgen = '<path to test>' asFDcustVerifi = '<path to test>' DLF = '<path to download file>' for reps in turl: #Report pull 1 elif reps == 'rep1': try: url = '<url>' print('initiating rep1 DL') mn = st("%m") yr = st("%Y") ds = str(mn + "/" + "01" + "/" + yr) driver.get(url) time.sleep(10) WebDriverWait(driver, 100).until( lambda driver: driver.find_element_by_id('maincontent_From_Date')) driver.find_element_by_id('maincontent_Date_From').click() driver.find_element_by_id('maincontent_Date_From').clear() driver.find_element_by_id('maincontent_Date_From').send_keys(ds) time.sleep(3) driver.find_element_by_id('maincontent_Export').click() print('rep1 completed') time.sleep(10) fn = asFDgen + 'rep1.csv' moveDLcsv(title='rep1 Raw DL', MDL_newFileName=fn, DLF=DLF) try: # this def basically aggrigates the data and appends it to a database dataETL('FA', fn, fn) except Exception as e: print('etl failed ' + str(e)) print('completed mal') except Exception as e: print('FAILED capp ' + str(e))
def update(): root.time.config(text=st("%H:%M:%S")) root.time.after(1000, update)