def place_order(self, data, i): ''' Funtion to place an order with ST and TP parameters ''' log.print_green('Placing an order...') print data try: self._instr = self.instruments_dict.get( data.get('instrument').upper())[0].get('instrument') self._unit = self.instruments_dict.get( data.get('instrument').upper())[3].get('tradeUnit') for x in range(1, (i + 1)): self._action = data.get('trade')[x].keys()[0] if not data.get('trade')[x][self._action]['TP']: self._tp = 0 else: self._tp = data.get('trade')[x][self._action]['TP'] self._sl = data.get('trade')[x][self._action]['SL'] log.print_warning(self._instr, self._unit, self._action, self._tp, self._sl) self.ordr = order.MyOanda(self._instr, self._unit, self._action, self._sl, self._tp) try: self._art_data.add_oanda(self.ordr.create_order()) except OandaError: log.print_error('Placing a order failed') return except AttributeError, TypeError: log.print_error('AttributeError, data NoneType') return
def outer(self, u, value=str()): log.print_warning('!!wchodze z u: ', u) if self.find_digit(u).isdigit(): while (self.find_digit(u).isdigit() or self.find_digit(u) == '.' or self.find_digit(u) == ','): value += value.join(self.find_digit(u)) log.print_warning("na poczatku while: ", self.find_digit(u)) if self.find_digit(u) == ',': log.print_warning("if self.find_digit(u) == ',' ", self.find_digit(u)) if (self.find_digit(u + 1).isdigit() or self.find_digit(u + 2).isdigit()): #value += value.join(self.find_digit(u)) log.print_warning("if self.find_digit(u+1).isdigit() ", value) u += 1 #return self.outer(u+1, value.replace(',','.')) else: return value[:-1].encode('ascii', 'ignore') #return self.outer(u+1, value.replace(',','.')) #return self.outer(u+1, value) #value += value.join(self.find_digit(u)) log.print_warning("na koncu while ", value) u += 1 return value.encode('ascii', 'ignore') else: return self.outer(u + 1)
def place_order(self, data, i): ''' Funtion to place an order with ST and TP parameters ''' log.print_green('Placing an order...') print data try: self._instr = self.instruments_dict.get(data.get('instrument').upper())[0].get('instrument') self._unit = self.instruments_dict.get(data.get('instrument').upper())[3].get('tradeUnit') for x in range(1,(i+1)): self._action = data.get('trade')[x].keys()[0] if not data.get('trade')[x][self._action]['TP']: self._tp = 0 else: self._tp = data.get('trade')[x][self._action]['TP'] self._sl = data.get('trade')[x][self._action]['SL'] log.print_warning(self._instr, self._unit, self._action, self._tp, self._sl) self.ordr = order.MyOanda(self._instr, self._unit, self._action, self._sl, self._tp) try: self._art_data.add_oanda(self.ordr.create_order()) except OandaError: log.print_error('Placing a order failed') return except AttributeError, TypeError: log.print_error('AttributeError, data NoneType') return
def load_datasets(*sampSizes): x = [] y = [] p = [] for ss in sampSizes: log.print_debug("Opening Cropped dataset " + str(ss)) cropped_dataset_folder = path.join(CROP_FOLDER, str(ss)) for filename in os.listdir(cropped_dataset_folder): try: img_path = path.join(cropped_dataset_folder, filename) img_patient = filename.split("_")[0] img_class = CATEGORIES.index(str(filename.split("_")[1])) img = Image.open(img_path).convert('RGB') img_array = np.asarray(img, np.uint8) if filter.check_valid(img_array): to_append_img = np.asarray( img.resize((int(INPUT_SIZE), int(INPUT_SIZE)), Image.LANCZOS)) x.append(to_append_img) y.append(img_class) p.append(img_patient) else: log.print_warning("Img " + filename + " not loaded: too much white") except Exception as e: log.print_error("Cannot load image " + filename) return x, y, p
def do_collect_info(self, art, pageurl): #self.save_to_yaml('page', str(art)) self._art_data = {} #internal variable to hold article data log.print_green("Let's start with ", art.find("h1").text.lower()) self._art_data = tradeClass() self._art_data.title = art.find("h1").text.lower().encode('ascii', 'ignore') self._art_data.add_time(art.find("time").get("datetime")) self._art_data.ID = art.get("id") self._art_data.page_adr = pageurl self._art_data.instrument = self.check_instrument(self._art_data.title) for p in art.find('div').find_all('p'): #.text.encode('ascii', 'ignore') self._art_data.description += p.text.encode('ascii', 'ignore') log.print_warning('###### description ##### ', self._art_data.description, self._art_data.ID) self.find_tp_sl(self._art_data.description) # find take profit and sl values self._art_data.author = art.find('section', {'class' : 'autor'}).find('div', {'class' : 'about'}).find('h1').text.encode('ascii', 'ignore') #self._art_data.add_trade(self.art_data['action'], self._art_data.takestop) #log.print_warning(art.find('div').find('p').text.encode('ascii', 'ignore')) self._art_data.do_all_data() print 'take stop', self._art_data.takestop self.do_trade(self._art_data.takestop) if self._art_data.trade and (self._art_data.instrument != 'not found') : self.place_order(self._art_data.all_data, len(self._art_data.trade)) #log.print_warning('trade: ', self._art_data.trade) #log.print_warning('len: ', len(self._art_data.trade)) self._art_data.do_all_data() self.trades.update({self._art_data.ID : self._art_data.all_data}) #self.save_to_yaml(self._art_data.ID, self._art_data.all_data) #self.save_to_yaml_all(self.trades) self.db.insert(self._art_data.all_data) #insert all data to database self.trades = {} return
def balance_dataset(x, y, p): cropped_dataset_folder = path.join(CROP_FOLDER, str(1120)) unique, counts = np.unique(y, return_counts=True) max_class_size = max(counts) for i in range(0, unique.shape[0]): if counts[i] < max_class_size: fileList = glob.glob( path.join(cropped_dataset_folder, "*" + CATEGORIES[unique[i]] + "*")) for j in range(0, max_class_size - counts[i]): if len(fileList) > 0: filename = random.choice(fileList) fileList.remove(filename) img_patient = os.path.basename(filename).split("_")[0] img_class = unique[i] img = Image.open(filename).convert('RGB') img_array = np.asarray(img, np.uint8) if filter.check_valid(img_array): x.append(img_array) y.append(img_class) p.append(img_patient) else: log.print_warning("Img " + filename + " not loaded: too much white") j = j - 1 else: log.print_warning("No more available images for class " + CATEGORIES[unique[i]]) break return x, y, p
def ask_database(self, what, artID): self._result = self.db.select(what, artID) if self._result: for data in self._result: self._result_dict = yaml.load(data) return self._result_dict else: log.print_warning('Nothing found!') return 0
def check_artid(self, page, option): self.soup = BeautifulSoup(self.test_page_cont, "lxml") self.art = self.soup.find('article', option) if self.ask_database('time', self.art.get("id")): log.print_green('Found something with has same articleID in database already...') self.check_timestamp(self.art.get("id"), self.art) # checking if article is an update, or old one return else: log.print_warning('Found something new!') self.do_soup(page, option)
def get_output(task, prog_path): """ Run program specified by prog_path and return tuple (output, error_output). """ # check task language and set relevant interpreter if task.get_language() == "python2": interpreter = common.get_config().get_python2path() elif task.get_language() == "python3": interpreter = common.get_config().get_python3path() elif task.get_language() == "ruby": interpreter = common.get_config().get_ruby_path() else: logger.print_error("unknown task language") return ("", "") # run subprocess try: p = subprocess.Popen([interpreter, prog_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: logger.print_error("cannot run interpreter for language " + \ task.get_language()) return ("", "") # send input to subprocess in another thread outputs = ["", ""] t = Thread(target=subprocess_communicate, args=(p, task.get_input(), outputs)) t.start() # wait until subprocess ends or time out timeout = common.get_config().get_evaluator_timeout() start = datetime.datetime.now() while p.poll() == None: sleep(0.1) now = datetime.datetime.now() if (now - start).seconds > timeout: p.kill() logger.print_warning("subprocess with interpreter " + \ task.get_language() + " " + \ "has been killed") break # wait until communication thread ends or time out (5 sec) timeout = 5 start = datetime.datetime.now() while t.is_alive(): sleep(0.1) now = datetime.datetime.now() if (now - start).seconds > timeout: logger.print_warning("subprocess communication thread is " + \ "still running, interpreter: " + \ task.get_language()) break return (outputs[0], outputs[1])
def check_artid(self, page, option): self.soup = BeautifulSoup(page.content, "lxml") self.art = self.soup.find('article', option) if not self.art: return if self.ask_database('time', self.art.get("id")): #log.print_green('Found something with has same articleID in database already...') self.check_timestamp( self.art.get("id"), self.art) # checking if article is an update, or old one return else: log.print_warning('Found something new!') self.do_soup(page, option)
def subprocess_communicate(process, input_str, outputs): """ Send input_str to the process and wait until send it's output """ if input_str != None: input_data = input_str.encode("u8") else: input_data = b"" try: (output, error_output) = process.communicate(input_data) outputs[0] = output.decode("u8", errors="replace") outputs[1] = error_output.decode("u8", errors="replace") except IOError: logger.print_warning("IOError while comunicating with subprocess") outputs[0] = "IOError" outputs[1] = "IOError"
def compile_model(x_train, y_train, x_test, y_test, drop_rate): if tf.test.is_built_with_cuda: if tf.test.is_gpu_available(cuda_only=False, min_cuda_compute_capability=None): log.print_error("MAYBE GPU IS USED") else: log.print_warning("NO GPU IS USED") else: log.print_warning("THIS VERSION OF TENSORFLOW DOES NOT USES CUDA") input_tensor = tf.keras.Input(shape=x_train[0].shape) bayesian_model = models.Model( input_tensor, bayesian_cnn(inputs=input_tensor, drop_rate=drop_rate)) opt = tf.keras.optimizers.Adam(lr=LEARNING_RATE, decay=DECAY) bayesian_model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy']) model_name = str(x_train[0].shape[0]) + "_" + str(N_EPOCH) + "_" + str(BATCH_SIZE) + "_" + str(LEARNING_RATE) \ + "_" + str(DECAY) + "_" + str(drop_rate) + "_" + str(USE_BIAS) + "_" + str(DENSE_SIZE) + "_" \ + str(SEPARABLE_CONVOLUTION) + "_local.h5" bayesian_model.summary() # Save model skeleton if not os.path.isdir(SUMMARY_FOLDER): os.makedirs(SUMMARY_FOLDER) summary_path = os.path.join(SUMMARY_FOLDER, model_name + ".txt") with open(summary_path, 'w') as f: with redirect_stdout(f): bayesian_model.summary() bayesian_train = bayesian_model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=N_EPOCH, validation_data=(x_test, y_test), shuffle=True) # Save model and weights if not os.path.isdir(MODEL_FOLDER): os.makedirs(MODEL_FOLDER) model_path = os.path.join(MODEL_FOLDER, model_name) bayesian_model.save_weights(model_path) log.print_info('Saved trained model at %s ' % model_path) # Score trained model. scores = bayesian_model.evaluate(x_test, y_test, verbose=1) log.print_info('Test loss : ' + str(scores[0])) log.print_info('Test accuracy : ' + str(scores[1]))
def balance_set(x, y, in_set_patients): log.print_debug("Balancing dataset") cropped_dataset_folder = path.join(CROP_FOLDER, str(1120)) x_list = x.tolist() unique, counts = np.unique(y, return_counts=True) max_class_size = max(counts) for i in range(0, unique.shape[0]): if counts[i] < max_class_size: file_list = glob.glob( path.join(cropped_dataset_folder, "*" + CATEGORIES[unique[i]] + "*")) cleaned_file_list = file_list.copy() for filename in cleaned_file_list: img_patient = os.path.basename(filename).split("_")[0] if img_patient in in_set_patients: cleaned_file_list.remove(filename) images_to_add = max_class_size - counts[i] for j in range(0, max_class_size - counts[i]): if len(cleaned_file_list) > 0: filename = random.choice(cleaned_file_list) cleaned_file_list.remove(filename) img_class = unique[i] img = Image.open(filename).convert('RGB') img_array = np.asarray(img, np.uint8) if filter.check_valid(img_array): to_append_img = np.asarray( img.resize((int(INPUT_SIZE), int(INPUT_SIZE)), Image.LANCZOS)) x_list.append(to_append_img) y = np.append(y, img_class) images_to_add = images_to_add - 1 #log.print_debug("Img " + filename + " added to set. " + str( images_to_add ) + " images to go.") else: log.print_warning("Img " + filename + " not loaded: too much white") continue else: log.print_warning("No more available images for class " + CATEGORIES[unique[i]]) break return np.asarray(x_list), y
def do_collect_info(self, art, pageurl): #self.save_to_yaml('page', str(art)) self._art_data = {} #internal variable to hold article data log.print_green("Let's start with ", art.find("h1").text.lower()) self._art_data = tradeClass() self._art_data.title = art.find("h1").text.lower().encode( 'ascii', 'ignore') self._art_data.add_time(art.find("time").get("datetime")) self._art_data.ID = art.get("id") self._art_data.page_adr = pageurl self._art_data.instrument = self.check_instrument(self._art_data.title) for p in art.find('div').find_all( 'p'): #.text.encode('ascii', 'ignore') self._art_data.description += p.text.encode('ascii', 'ignore') log.print_warning('###### description ##### ', self._art_data.description, self._art_data.ID) self.find_tp_sl( self._art_data.description) # find take profit and sl values self._art_data.author = art.find('section', { 'class': 'autor' }).find('div', { 'class': 'about' }).find('h1').text.encode('ascii', 'ignore') #self._art_data.add_trade(self.art_data['action'], self._art_data.takestop) #log.print_warning(art.find('div').find('p').text.encode('ascii', 'ignore')) self._art_data.do_all_data() print 'take stop', self._art_data.takestop self.do_trade(self._art_data.takestop) if self._art_data.trade and (self._art_data.instrument != 'not found'): self.place_order(self._art_data.all_data, len(self._art_data.trade)) #log.print_warning('trade: ', self._art_data.trade) #log.print_warning('len: ', len(self._art_data.trade)) self._art_data.do_all_data() self.trades.update({self._art_data.ID: self._art_data.all_data}) #self.save_to_yaml(self._art_data.ID, self._art_data.all_data) #self.save_to_yaml_all(self.trades) self.db.insert(self._art_data.all_data) #insert all data to database self.trades = {} return
def outer(self, u, value = str()): log.print_warning('!!wchodze z u: ', u) if self.find_digit(u).isdigit(): while (self.find_digit(u).isdigit() or self.find_digit(u) == '.' or self.find_digit(u) == ','): value += value.join(self.find_digit(u)) log.print_warning("na poczatku while: ", self.find_digit(u)) if self.find_digit(u) == ',': log.print_warning("if self.find_digit(u) == ',' ", self.find_digit(u)) if (self.find_digit(u+1).isdigit() or self.find_digit(u+2).isdigit()): #value += value.join(self.find_digit(u)) log.print_warning("if self.find_digit(u+1).isdigit() ", value) u += 1 #return self.outer(u+1, value.replace(',','.')) else: return value[:-1].encode('ascii', 'ignore') #return self.outer(u+1, value.replace(',','.')) #return self.outer(u+1, value) #value += value.join(self.find_digit(u)) log.print_warning("na koncu while ", value) u +=1 return value.encode('ascii', 'ignore') else: return self.outer(u+1)
def parse_tasks(folder_path): """ Parse all XML tasks from folder folder_path. Return list with datamodel.Task objects or None if something got wrong. """ tasks = [] try: for xmlfile in os.listdir(folder_path): file_path = os.path.join(folder_path, xmlfile) if file_path.endswith(".py"): task = parse_dynamic_task(file_path) elif file_path.endswith(".xml"): task = parse_task(file_path) else: continue if task == None or not task.check(): logger.print_warning("skipped incorrect task " + file_path) else: task.set_filepath(file_path) tasks += [task] except OSError: return None return tasks
def handle(self): try: super(HTTPRequestHandler, self).handle() except socket.error as e: logger.print_warning("socket error while comunicating with " + self.client_address[0] + " " + str(e))