def start(self) -> None: """Start generating and tweet the itemshop""" if self.delay > 0: log.info(f"Delaying process start for {self.delay}s...") sleep(self.delay) item_shop = Utility().get_url( "https://fortnite-api.com/v2/shop/br/combined", {"language": self.language}, ) if item_shop is not None: item_shop = item_shop["data"] # Strip time from the timestamp, we only need the date + translate # in every language from googletrans date = Translator().translate(Utility().iso_to_human( item_shop["date"].split("T")[0]), str='en', dest=self.date_language).text log.info(f"Retrieved Item Shop for {date}") shop_image = self.generate_image(date, item_shop) if shop_image is True: if self.twitter_enabled is True: self.tweet(date)
def __init__(self): self.cannons = {} self.tactics = {} self.ips = [] self.scaninfo = {} self.util = Utility() self.shells = {}
def test1(self): utility = Utility() testNeighbors = [(1, 0), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0)] output = utility.getNeighboringCoordinates((0, 0)) for neighbor in testNeighbors: self.assertIn(neighbor, output) self.assertEquals(len(output), len(testNeighbors))
def trainRaw(model, trainDir, valDir, testDir, epochs): model.compile(Adam(lr=0.001), loss="sparse_categorical_crossentropy", metrics=['acc']) checkpoint = ModelCheckpoint(Utility().getModelPath(), monitor='val_acc', verbose=1, save_best_only=True, mode='max') reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, verbose=1, mode='max', min_lr=0.00000001) callbacks_list = [checkpoint, reduce_lr] trainDataGen = ImageDataGenerator(rescale=1. / 255) trainGenerator = trainDataGen.flow_from_directory(trainDir, batch_size=10, class_mode="sparse", target_size=(224, 224), shuffle=True) valDataGen = ImageDataGenerator(rescale=1. / 255) valGenerator = valDataGen.flow_from_directory(valDir, batch_size=10, class_mode="sparse", target_size=(224, 224), shuffle=True) testDataGen = ImageDataGenerator(rescale=1. / 255) testgenerator = testDataGen.flow_from_directory(testDir, batch_size=10, class_mode="sparse", target_size=(224, 224), shuffle=False) history = model.fit_generator(trainGenerator, epochs=epochs, verbose=1, validation_data=valGenerator, steps_per_epoch=100, validation_steps=50, callbacks=callbacks_list) test_loss, test_acc = model.evaluate_generator(testgenerator, steps=772) print(test_loss) print(test_acc) return history
def main(): clear() print(Fore.GREEN + 'Welcome to smarty.py v2.0' + Fore.RESET) token = raw_input('If you have a token saved, press enter to choose. Otherwise, please enter your SmartToken to begin:\n') global api if token: try: instance = Utility()._get_instance(token) except: # Authentication Error print(Fore.RED + 'Error 01 - Invalid SmartToken; please try again.' + Fore.RESET) time.sleep(2) main() else: # store new token api = SmartLibrary(instance) instance['token'] = token Utility()._save_token(instance['token'], instance['name']) menu(instance) else: tokenlist = Utility()._list_tokens() print(Fore.CYAN + 'Select a token from the following:' + Fore.RESET) for i, pair in enumerate(tokenlist): print(Fore.LIGHTYELLOW_EX + """ %s. %s | %s """ % (i + 1, pair[0], pair[1]) + Fore.RESET) selection = raw_input('Selection: ') try: instance = Utility()._get_instance(tokenlist[int(selection) - 1][1]) except: # Authentication Error print(Fore.RED + 'Error 01 - Invalid SmartToken; please try again.' + Fore.RESET) time.sleep(2) main() else: api = SmartLibrary(instance) instance['token'] = tokenlist[int(selection) - 1][1] menu(instance)
def __init__(self, ip_addr='127.0.0.1'): self.ip_address = ip_addr self.scanner = nmap.PortScanner() self.xml = None self.util = Utility() # Read Configuration Options full_dir_path = os.path.dirname(os.path.abspath(__file__)) config = configparser.ConfigParser() try: config.read(os.path.join(full_dir_path, 'config.ini')) except FileExistsError: self.util.print_message(FAIL, 'Configuration file missing. exiting...') sys.exit(1) self.nmap_arguments = config['Nmap']['command'][5:]
def registerIP(self, ipaddr: str): u = Utility() if not u.isValidIP(ipaddr): raise ValueError("Invalid IP Address") if not (ipaddr in self.ips): self.ips.append(ipaddr)
elif target.status == self.utility.status_attack_sub: target.update(x=1, status=self.utility.status_normal, text_color=self.utility.color_7) msg = 'The {} is slightly damaged.'.format(target.hostname) self.DisplayText.push(msg, self.utility.color_11) # Probe. elif target.status == self.utility.status_attack_probe: target.update(status=self.utility.status_normal, text_color=self.utility.color_7) msg = 'The {} is being investigated.'.format( target.hostname) self.DisplayText.push(msg, self.utility.color_12) target.origin_framecount = 0 target.wait_framecount = 0.0 # main. if __name__ == '__main__': file_name = os.path.basename(__file__) full_path = os.path.dirname(os.path.abspath(__file__)) utility = Utility() utility.write_log(20, '[In] 8Vana [{}].'.format(file_name)) # Start application Application(utility) print(os.path.basename(__file__) + ' finish!!') utility.write_log(20, '[Out] 8Vana [{}].'.format(file_name))
def train(model, X_train, Y_train, X_test, Y_test, num_epochs, batch_size, data_augmentation=False): util = Utility() # Compile the model model.compile(optimizer=Adam(lr=util.lr_schedule(0), epsilon=1e-8), loss='categorical_crossentropy', metrics=['accuracy']) print("Plotting model into png") plot_model(model, to_file='model.png') modelCheckpoint = ModelCheckpoint(filepath=util.getModelPath(), monitor='val_acc', verbose=1, save_best_only=True) learningRateScheduler = LearningRateScheduler(util.lr_schedule) lrReducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0.0, patience=5, min_lr=0.5e-6) #earlyStop = EarlyStopping(monitor='val_loss', patience=5, min_delta=1e-3,restore_best_weights=True,mode='auto') # Prepare callbacks callbacks = [modelCheckpoint, learningRateScheduler, lrReducer] if not data_augmentation: print('Not using data augmentation.') # Train the model model.fit(X_train, Y_train, epochs=num_epochs, batch_size=batch_size, validation_data=(X_test, Y_test), shuffle=True, callbacks=callbacks) else: print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( # set input mean to 0 over the dataset featurewise_center=False, # set each sample mean to 0 samplewise_center=False, # divide inputs by std of dataset featurewise_std_normalization=False, # divide each input by its std samplewise_std_normalization=False, # apply ZCA whitening zca_whitening=False, # epsilon for ZCA whitening zca_epsilon=1e-06, # randomly rotate images in the range (deg 0 to 180) rotation_range=0, # randomly shift images horizontally width_shift_range=0.1, # randomly shift images vertically height_shift_range=0.1, # set range for random shear shear_range=0., # set range for random zoom zoom_range=0., # set range for random channel shifts channel_shift_range=0., # set mode for filling points outside the input boundaries fill_mode='nearest', # value used for fill_mode = "constant" cval=0., # randomly flip images horizontal_flip=True, # randomly flip images vertical_flip=False, # set rescaling factor (applied before any other transformation) rescale=None, # set function that will be applied on each input preprocessing_function=None, # image data format, either "channels_first" or "channels_last" data_format=None, # fraction of images reserved for validation (strictly between 0 and 1) validation_split=0.0) # Compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(X_train) # Fit the model on the batches generated by datagen.flow(). model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), validation_data=(X_test, Y_test), epochs=num_epochs, verbose=1, workers=4, callbacks=callbacks, steps_per_epoch=math.ceil( len(X_train) / batch_size)) # Evaluate the model _, test_acc = model.evaluate(X_test, Y_test) print("Accuracy on the test set: " + str(test_acc * 100) + "%")
def test(self): utility = Utility() self.assertEquals(1, 1)
class SentimentAnalysis(): ## Dictionary to store sentiment values senti = dict() ## Creates an instance of the Utility object util = Utility() ## Normalizes the sentiment values and returns the overall "average" score # # @param self The object pointer. # @param pos The positive total of the sentence # @param neg The negative total of the sentence # @param alpha Weighted value used for averaging # \returns The overall sentiment value of the sentence def normalize(self, pos, neg, alpha=15): sum = pos - neg norm = sum / math.sqrt((sum * sum) + alpha) return norm ## Basic Sentiment Parsing # # @param self The object pointer. # @param text The line of text being analyzed # \returns A string with Overall, Positive, Negative, and Neutral/Objective sentiment values def sentiment(self, text): words = self.util.tokenize(text) # Runs text through Utility tokenizer pos = 0.0 neg = 0.0 posCt = 0 negCt = 0 for word in words: # first the SentiWordNet POS's get appended to the word keys =[word + "/r", word + "/a", word+"/n", word+"/e", word+"/v"] for key in keys: # Checks to see if new "key" exists in the dictionary if key in self.senti: tmp = self.senti[key] if(tmp[0] > 0): pos += float(tmp[0]) # Add positive sentiment vals to positive total (pos) posCt += 1 if(tmp[1] > 0): neg += float(tmp[1]) # Add negative sentiment vals to negative total (neg) negCt += 1 else: continue overall = self.normalize(pos, neg) if posCt > 0: pos /= float(posCt) # Calculates positive portion of sentiment if negCt > 0: neg /= float(negCt) # Calculates negative portion of sentiment obj = 1.0 - (pos + neg) # calculate objective (neutral) portion of sentiment pos = "{0:.4f}".format(pos); neg = "{0:.4f}".format(neg) overall = "{0:.4f}".format(overall); obj = "{0:.4f}".format(obj) out = "Overall: {:>7} Pos: {:>6} Neg: {:>6} Obj: {:>6}\n".format(overall, pos, neg, obj) return out ## Processes multiple sentences in a list # # @param self The object pointer. # @param sentences A list of sentences to be analyzed def batch_proc(self, sentences): for sentence in sentences: result = self.bigram_sent(sentence) print(result) ## Processes sentences as bigrams (pairs of words) # # @param self The object pointer. # @param text The line/sentence being analyzed # \returns A string with Overall, Positive, Negative, and Neutral/Objective sentiment values def bigram_sent(self, text): bigrams = self.util.bigrams(text) # Tokenizes text and returns bigrams pos = 0 neg = 0 posCt = 0 negCt = 0 flag = 0 for bigram in bigrams: loc1, loc2 = self.util.intensifier(bigram) # gets location of intensifier in bigram (0 or 1) word1 = bigram[0]; word2 = bigram[1] keys =[word1+"/r", word1+"/a", word1+"/n", word1+"/e", word1+"/v"] keys += [word2+"/r", word2+"/a", word2+"/n", word2+"/e", word2+"/v"] for key in keys: if key in self.senti: tmp = self.senti[key] if(tmp[0] > 0): # if first word is positive intensifier and second is regular word if loc1 == True and loc2 == False: pos += 1.25 neg -= 1 pos += float(tmp[0]) posCt += 1 flag = 0 if(tmp[1] > 0): # if first word is negative intensifier and second is regular word if loc1 == True and loc2 == False: neg += 1.25 pos -= 1 neg += float(tmp[1]) negCt += 1 flag = 1 else: continue # if bigram contains two intensifier values if loc1 == True and loc2 == True: if flag == 1: neg += 1.0 pos -= 0.5 elif flag == 0: pos += 1.0 neg -= 0.5 # gets averaged overall sentiment score overall = self.normalize(pos, neg) if posCt > 0: pos /= float(posCt) if negCt > 0: neg /= float(negCt) obj = 1.0 - (pos + neg) out = "Overall: " + "{0:.4f}".format(overall) + " Pos: " + "{0:.4f}".format(pos) out += " Neg: " + "{0:.4f}".format(neg) + " Obj: " + "{0:.4f}".format(obj) + "\n" return out ## Processes multiple sentences from a file # # @param self The object pointer. # @param infile The file containing all of the sentences. # @param outfile The file to which overall values will be written. # @param raw_out The file to which all "raw" data will be written. def file_proc(self, infile, outfile, raw_out): with open(infile, "r") as inf: if os.path.isfile(infile): with open(outfile, "w") as out: if os.path.isfile(outfile): with open(raw_out, "w") as raw: if os.path.isfile(raw_out): while True: new_lines = list(islice(inf, 30)) # reads file 30 lines at a time if not new_lines: break for line in new_lines: result = self.bigram_sent(line) raw.write(result) # writes raw (all) output to raw file result = str(result.split()[1]) + "\n" out.write(result) # writes only overall score to main output file else: return False raise raw_out raw.close() else: return False raise outfile out.close() else: return False raise infile inf.close() ## Class Constructor # loads data from Pickle file upon object creation # @param self The object pointer. def __init__(self): self.senti = pickle.load(open("simple_senti_py/data.pkl", "rb"))
def manage_candidates(instance): """ CLI: Candidates Menu List options regarding candidate management, each tied to one or more smartlib functions. Arguments: instance -- a dictionary of SR instance information """ clear() print(Fore.GREEN + 'Current Instance: %s' % (instance['name']) + Fore.RESET) print(Fore.CYAN + 'Departments Menu - Select an Action' + Fore.RESET) print(Fore.LIGHTYELLOW_EX + """ 1. Search Candidates 2. Add Candidate 3. Add Candidate and Assign to Job 4. Get Candidate Info 5. Return to Main Menu """ + Fore.RESET) selection = raw_input('Selection: ') if selection == '': print('Invalid input. Please try again.') time.sleep(1) manage_depts(instance) elif selection == '1': # search candidates clear() print(Fore.GREEN + 'Current Instance: %s' % (instance['name']) + Fore.RESET) print(Fore.CYAN + 'Candidate Search' + Fore.RESET) q = raw_input('Search Term: ') l = raw_input('Limit: ') o = raw_input('Offset: ') try: results = api.CandidateAPI.search( { 'q': q, 'limit': l, 'offset': o } ) except SmartRecruitersError: print(Fore.RED + """ There are no candidates matching that search term. """ + Fore.RESET) else: if results: print(Fore.CYAN + '%s Total | Search Results:' % (results['totalFound']) + Fore.RESET) for r in results['content']: print(Fore.MAGENTA + """ %s %s | %s """ % (r['firstName'], r['lastName'], r['id']) + Fore.RESET) else: print(Fore.RED + """ There are no candidates matching that search term. """ + Fore.RESET) action = raw_input('Press enter to return to Candidate Management.') if action == '': manage_candidates(instance) elif selection == '2': # add candidate clear() print(Fore.GREEN + 'Current Instance: %s' % (instance['name']) + Fore.RESET) print(Fore.CYAN + 'Candidate Creation' + Fore.RESET) number = raw_input('Number of candidates to add: ') for _ in range(int(number)): candidate = Utility(instance=instance).create_candidate_json() try: response = api.CandidateAPI.create(candidate) except SmartRecruitersError: print(Fore.RED + """ There was an error adding that candidate. """ + Fore.RESET) else: print(Fore.YELLOW + """ Candidate added: %s %s """ % ( response['firstName'], response['lastName'] ) + Fore.RESET) action = raw_input('Press enter to return to Candidate Management.') if action == '': manage_candidates(instance) elif selection == '3': # add and assign candidate to job clear() print(Fore.GREEN + 'Current Instance: %s' % (instance['name']) + Fore.RESET) print(Fore.CYAN + 'Candidate Creation and Assignment' + Fore.RESET) job_id = raw_input('Job ID: ') number = raw_input('Number of candidates to add and assign: ') for _ in range(int(number)): candidate = Utility(instance=instance).create_candidate_json() try: response = api.CandidateAPI.assign(job_id, candidate) except SmartRecruitersError: print(Fore.RED + """ There was an error adding and assigning that candidate. """ + Fore.RESET) else: print(Fore.YELLOW + """ Candidate added: %s %s """ % ( response['firstName'], response['lastName'] ) + Fore.RESET) action = raw_input('Press enter to return to Candidate Management.') if action == '': manage_candidates(instance) elif selection == '4': # get candidate details clear() print(Fore.GREEN + 'Current Instance: %s' % (instance['name']) + Fore.RESET) print(Fore.CYAN + 'Candidate Info' + Fore.RESET) candidate_id= raw_input('Candidate ID: ') try: response = api.CandidateAPI.get(candidate_id) except SmartRecruitersError: print(Fore.RED + """ That ID does not match a candidate. """ + Fore.RESET) except ValueError: print(Fore.RED + """ Invalid input; try again. """ + Fore.RESET) else: print(Fore.YELLOW + """ %s %s %s %s """ % ( response['firstName'], response['lastName'], response['email'], 'https://www.smartrecruiters.com/app/people/candidates/' + response['id'] ) + Fore.RESET) action = raw_input('Press enter to return to Candidate Management.') if action == '': manage_candidates(instance) elif selection == '5': menu(instance)
def msgrpc_service(req): util = Utility() # Read config.ini. full_path = os.path.dirname(os.path.abspath(__file__)) config = configparser.ConfigParser() try: config.read(os.path.join(full_path, 'config.ini')) except FileExistsError as err: util.print_message(FAIL, 'File exists error: {}'.format(err)) response = { "request_id": req['request_id'], "service": "msgrpc", "success": False, "reason": "Config File not found!" } sio.emit("response", data=response) return meth = req['method'] if meth == 'auth.login': req['option'] = ['auth.login', str(config['Common']['msgrpc_user']), str(config['Common']['msgrpc_pass'])] options_meta = req['option'] uri = req['uri'] # origin_option = req['origin_option'] headers = req['headers'] # if meth != 'auth.login': # option = [] # for op in options_meta: # if op['type'] == "bytes": # option.append(bytes(op['value'], "utf-8")) # else: # option.append(op['value']) # else: option = req['option'] params = msgpack.packb(option) resp = '' host = "172.19.0.1" port = int(config['Common']['server_port']) # client = http.client.HTTPSConnection(host, port) try: # client.request("POST", uri, params, headers) # resp = client.getresponse() resp = requests.post("http://" + host + ":" + str(port) +uri, data=params, headers=headers) open('response.bin', "wb").write(resp.content) sio.emit("response", data={"request_id": req['request_id'],"service": "msgrpc",'success': True, 'data': resp.content}) return res = msgpack.unpackb(resp.content, strict_map_key=False, raw=False) print("Response: " + str(res)) decoded_res = [] for key, value in res.items(): op = [] if type(key).__name__ == "bytes": op.append({'type': type(key).__name__, 'value': key.decode('utf-8')}) else: op.append({'type': type(key).__name__, 'value': key}) if type(value).__name__ == "bytes": op.append({'type': type(value).__name__, 'value': value.decode('utf-8')}) else: op.append({'type': type(value).__name__, 'value': value}) decoded_res.append(op) print("\n\nDecoded: ", str(decoded_res)) response = { "request_id": req['request_id'], "service": "msgrpc", "success": True, "resp": decoded_res } sio.emit("response", data=response) except Exception as err: traceback.print_exc() response = { "request_id": req['request_id'], "service": "msgrpc", "success": False, "reason": "auth" } sio.emit("response", data=response)
from pycvesearch import CVESearch from SystemScan import SystemScan from MetasploitCannon import * from __const import NOTE from util import Utility from CannonEngine import CannonEngine ce = CannonEngine() ce.registerCannonPlug(MetasploitCannon()) cve = CVESearch() exploited = False sc = SystemScan('172.28.128.3') sc.startScan() sc.fetchMSFE() cpes = sc.cpes util = Utility() # target = {'host': '172.28.128.3', 'port': '6697', 'cpe': 'cpe:/a:unrealircd:unrealircd', 'msfe': ['exploit/unix/irc/unreal_ircd_3281_backdoor']} # print(target) # for target in cpes: target = { 'host': '172.28.128.3', 'port': '6697', 'cpe': 'cpe:/a:unrealircd:unrealircd', 'msfe': ['exploit/unix/irc/unreal_ircd_3281_backdoor'] } # TODO: Skip Modules and payloads that belong to other operating system using OScpe property # if exploited == True: #break for exploit in target['msfe']: if exploited == True: