Beispiel #1
0
 def test_remove_files_in_dir(self):
     """
     Test that files in a directory are successfully deleted
     """
     path = self.test_files_path
     pattern = "(\w)*\.txt"
     FileUtils.remove_files_in_dir(path, pattern)
     files = os.listdir(path)
     self.assertEqual(0, len(files), f"{path} has still files")
    def result_stock_data_container_list_changed(self):
        """
        Update the columns and data of the view, of stock data container list changed.
        Additionally, it adds the not already available columns to the MvcModel and fill not available columns with dummy.
        :return: -
        """
        tree = w.Scrolledtreeview1
        tree.delete(*tree.get_children())
        stock_data_container_list = self.model.result_stock_data_container_list.get(
        )

        # sort the list by rank
        newlist = sorted(stock_data_container_list,
                         key=lambda x: x.get_rank(),
                         reverse=True)

        for result_container in newlist:
            try:
                is_updated = self.model.update_column_list(
                    result_container.get_names_and_values().keys())

                if is_updated:
                    init_result_table(self.view.Scrolledtreeview1,
                                      self.model.get_column_list())

                GuiUtils.insert_into_treeview(
                    self.view.Scrolledtreeview1, self.model.get_column_list(),
                    result_container.get_names_and_values(), "Stock")

                # append all COLUMNS to file --> new layout leads to new line with header
                FileUtils.append_text_list_to_file(
                    self.model.get_column_list(),
                    GlobalVariables.get_data_files_path() +
                    "ScreeningResults.csv", True, ",")

                # append VALUES to file
                values = result_container.get_names_and_values().values()
                text = ','.join(str(e) for e in values)
                FileUtils.append_textline_to_file(
                    text,
                    GlobalVariables.get_data_files_path() +
                    "ScreeningResults.csv", True)

            except Exception as e:
                logger.error("Exception: " + str(e) + "\n" +
                             str(traceback.format_exc()))
                continue

        # add a sort functionality for each column, when click on header
        GuiUtils.advanced_sorting(self.view.Scrolledtreeview1,
                                  self.model.get_column_list(), True)

        # add a color for the entries in tree view
        for color in GlobalVariables.get_row_colors().keys():
            tree.tag_configure(
                GlobalVariables.get_row_colors()[color],
                background=GlobalVariables.get_row_colors()[color])
Beispiel #3
0
 def test_delete_file(self):
     """
     Test that a file can be successfully deleted
     """
     path = self.test_files_path
     t_file = "text1.txt"
     FileUtils.delete_file(os.path.join(path, t_file))
     files = os.listdir(path)
     self.assertEqual(9, len(files), f"{path} has still all its files")
     self.assertTrue(t_file not in files,
                     f"{t_file} still inside the directory")
Beispiel #4
0
    def __init__(self):
        self.image_utils = ImageUtils()
        self.mask_utils = MaskUtils()
        self.file_utils = FileUtils()
        self.coco_utils = CocoUtils()

        self.cfg = None
        self.predictor = None
        self.bladder_metadata = None

        self.configure()
Beispiel #5
0
    def _method_to_execute(self, elm):
        """
        read news from traderfox home page with dpa-afx-compact news
        :param date_time_format: news datetime format
        :param date_file: file for last check date
        :param url: traderfox news page url
        :return: news as list
        """
        from Utils.FileUtils import FileUtils
        all_news = []

        date_time = (str(elm.footer.span.get_text()))  # date and Time
        date_time = date_time.rsplit(' Uhr')[
            0]  # TODO: split because of datetime format

        article_text = (str(elm.h2.get_text(strip=True))
                        )  # h2 --> article head line
        news_text = date_time.replace(',', '.') + ", " + article_text.replace(
            ',', '.')
        # TODO REMOVE THAT
        # THIS IS JUST FOR BACKTESTING NEWS DATA COLLECTION
        FileUtils.append_textline_to_file(
            news_text,
            GlobalVariables.get_data_files_path() + "NewsForBacktesting.txt",
            True)
        all_news.append(news_text)

        prep_news = self.text_analysis.optimize_text_for_german_tagger(
            news_text)
        name_ticker_exchange_target_prize = \
            self.text_analysis.identify_stock_name_and_stock_ticker_and_target_price_from_news_nltk_german_classifier(
                news_text)

        if name_ticker_exchange_target_prize is not None and name_ticker_exchange_target_prize.get_stock_name(
        ) != "":
            container = StockDataContainer(
                name_ticker_exchange_target_prize.get_stock_name(),
                name_ticker_exchange_target_prize.stock_ticker(),
                name_ticker_exchange_target_prize.stock_exchange())

            if container in self.stock_data_container_list:
                idx = self.stock_data_container_list.index(container)
                container_2 = self.stock_data_container_list[idx]
                if isinstance(container_2, StockDataContainer):
                    container = container_2
                    self.stock_data_container_list.remove(container_2)

            news_dec = NewsDataContainerDecorator(
                container,
                name_ticker_exchange_target_prize.stock_target_price(), 0,
                prep_news)

            self.stock_data_container_list.append(news_dec)
Beispiel #6
0
def main():
    FileUtils.clear_folder("./images/")

    net = NeuralNetWork(num_neurons=20, num_layers=10, with_alpha=False)
    net.set_image_size(128, 128)

    for i in range(0, 10000, 50):
        print(f"{i}")
        colors = net.generate()
        SaveUtils.save_colors(colors, i=i)

    FileUtils.save_gif(fp_in="./images/*.png", fp_out="./animation.gif")
Beispiel #7
0
def _read_data(selected_strategies_list, strategy_parameter_dict, other_params,
               stock_data_container_list):
    reader_results = {}
    for selected_strat in selected_strategies_list:
        for data_reader in strategy_parameter_dict[selected_strat][
                'data_readers']:
            data_reader_params = strategy_parameter_dict[selected_strat][
                'data_readers'][data_reader]
            data_storage = DataReaderFactory()
            reader_type = data_reader

            # TODO anders machen, ned hier importieren
            from Utils.FileUtils import FileUtils
            if data_reader_params['ticker_needed']:
                if data_reader_params['reload_data'] is True:
                    stock_data_container_list.extend(
                        FileUtils.read_tickers_from_web(
                            other_params['stock_data_container_file'],
                            other_params['dict_with_stock_pages_to_read']))
                else:
                    try:
                        stock_data_container_list.extend(
                            FileUtils.read_tickers_and_data_from_file(
                                other_params['stock_data_container_file']))
                    except RecursionError as e:
                        raise RecursionError(
                            "Reload the data, old data is maybe faulty")

            curr_reader = data_storage.prepare(
                reader_type,
                stock_data_container_list=stock_data_container_list,
                reload_stockdata=data_reader_params['reload_data'],
                parameter_dict=data_reader_params)
            logger.info("data_reader " + reader_type + " initialised.")
            # only add, if not added by another strategy reader --> avoid duplications
            # try:
            #     if reader_results[reader_type] is None:
            #         raise Exception
            # except Exception:
            # reader_results[reader_type] = 'Read'
            # TODO stock_data_container_list.extend(readers[reader_type].read_data())
            curr_reader.read_data()

    # dump for next run, instead of reloading every time
    with open(other_params['stock_data_container_file'], "wb") as f:
        pickle.dump(stock_data_container_list, f)

    logger.info("data_reader " + reader_type + " read data.")

    return stock_data_container_list
Beispiel #8
0
    def test_append_to_file__only_new_entries__all_entries(self):
        filename = GlobalVariables.get_data_files_path(
        ) + "TestData\\NewsForBacktesting.txt"
        text = "25.07.2018 um 08:41, ANALYSE-FLASH: Berenberg hebt Ziel für Adidas auf 207 Euro - 'Hold'"
        self.assertFalse(
            FileUtils.append_textline_to_file(text, filename, True))

        text = str(datetime.now()) + ", Test Eintrag"
        self.assertTrue(FileUtils.append_textline_to_file(
            text, filename, True))

        text = "16.07.2018 um 8:51, Test Eintrag"
        self.assertTrue(
            FileUtils.append_textline_to_file(text, filename, False))
Beispiel #9
0
    def test_read_tickers_from_file_or_web(self):
        stock_data_container_file_name = "stock_data_container_file.pickle"
        stock_data_container_file = GlobalVariables.get_test_data_files_path(
        ) + stock_data_container_file_name

        dict_with_stock_pages_to_read = \
            GlobalVariables.get_other_parameters_with_default_parameters()['dict_with_stock_pages_to_read']

        stock_data_container_list = FileUtils.read_tickers_from_file_or_web(
            stock_data_container_file, True, dict_with_stock_pages_to_read)
        self.assertGreater(len(stock_data_container_list), 800)

        stock_data_container_list = FileUtils.read_tickers_from_file_or_web(
            stock_data_container_file, False, dict_with_stock_pages_to_read)
        self.assertGreater(len(stock_data_container_list), 800)
    def print_and_save_mean(self, file_name=""):
        """
        Print the mean text, and save all diff values and mean value to the given file, if file name not empty.
        :param file_name: file_name to save the values, can be empty --> do not save to file
        :return: mean value as float
        """
        from Utils.FileUtils import FileUtils
        mean_value = np.mean(self.get_diff_list_seconds())
        mean_text = "Mean: " + str(mean_value).replace('.', ',')
        print(mean_text)

        if len(file_name) > 0:
            for diff in self.get_diff_list_seconds():
                FileUtils.append_text_list_to_file(
                    [str(diff).replace('.', ',')], file_name, False)
            FileUtils.append_text_list_to_file([mean_text], file_name, False)

        return mean_value
Beispiel #11
0
    def _save_current_order(self, order_id, stock_ticker, order_type='', action='', quantity=0,
                            limit_price=0, security_type='STK', exchange='SMART', currency='USD'):
        """
        Save the current order into file.
        :param order_id: order id by interactive broker, must be unique
        :param stock_ticker: stock ticker
        :param order_type: oder type , ex: LMT for limit orders
        :param action: BUY / SELL
        :param quantity: number of stocks to order
        :param limit_price: limit price to buy or sell
        :param security_type: STK for stocks
        :param exchange: echange to trade, SMART
        :param currency: USD / EUR
        :return: nothing
        """
        FileUtils.check_file_exists_or_create(self.file_path, GlobalVariables.get_order_file_header())

        text_line = str(datetime.now()) + "," + str(stock_ticker) + "," + str(order_id) + "," + str(
            order_type) + "," + str(action) + "," + str(quantity) + "," + str(limit_price) + "," + str(
            security_type) + "," + str(exchange) + "," + str(currency)
        print(text_line)
        FileUtils.append_textline_to_file(text_line, self.file_path, False)
Beispiel #12
0
    def test_append_text_list_to_file__write_and_read_again__2x_only_new_data__1x_all_data(
            self):
        filename = GlobalVariables.get_data_files_path(
        ) + "TestData\\ScreeningResults.csv"
        test_1 = "Test 1"
        text_list = [test_1, "Test2", "Test3"]
        self.assertFalse(
            FileUtils.append_text_list_to_file(text_list, filename, True))

        text_list_2 = [str(datetime.now()) + ", Test Eintrag"]
        self.assertTrue(
            FileUtils.append_text_list_to_file(text_list_2, filename, True))

        self.assertTrue(
            FileUtils.append_text_list_to_file([test_1], filename, False))

        text_to_find = text_list
        text_to_find.extend(text_list_2)

        with open(filename, 'r') as myfile:
            file_content = myfile.read()
            for text in text_to_find:
                self.assertTrue(str(text) in file_content)
Beispiel #13
0
    def read_data(self):
        from Utils.FileUtils import FileUtils
        if self.reload_stockdata:
            FileUtils.check_file_exists_and_delete(
                self._parameter_dict['last_check_date_file'])

        # self._read_news_from_traderfox(self._parameter_dict['last_check_date_file'])
        # TODO enable for enhanced info
        # url = "https://traderfox.de/nachrichten/dpa-afx-compact/kategorie-2-5-8-12/"  # analysen, ad hoc, unternehmen, pflichtmitteilungen
        start_time = datetime.now()

        self.text_analysis = GermanTaggerAnalyseNews(
            self.stock_data_container_list, None,
            self._parameter_dict['german_tagger'])

        # ex: #news = "27.02. 10:41 dpa-AFX: ANALYSE-FLASH: Bryan Garnier hebt Morphosys auf 'Buy' - Ziel 91 Euro"
        all_news = []
        all_articles = self._read_news_from_traderfox(
            self._parameter_dict['last_check_date_file'])

        # all_news.extend(self.map_list(all_articles))
        self.map_list(all_articles)

        print("Time diff map_list:" + (str(datetime.now() - start_time)))
Beispiel #14
0
    def read_orders(self):
        """
        Read the current order id from file
        :return: orders list
        """
        orders = []
        # Create an order ID which is 'global' for this session. This
        # will need incrementing once new orders are submitted.
        if FileUtils.check_file_exists_or_create(self.file_path, GlobalVariables.get_order_file_header()):
            data = pd.read_csv(self.file_path)

            if len(data) > 0:
                # datetime, stock_ticker
                orders = data

        return orders
Beispiel #15
0
    def _read_current_order_id(self):
        """
        Read the current order id from file
        :return: order id
        """

        # Create an order ID which is 'global' for this session. This
        # will need incrementing once new orders are submitted.
        last_order_id = pd.np.random.randint(1000, 90000)

        if FileUtils.check_file_exists_or_create(self.file_path, GlobalVariables.get_order_file_header()):
            data = pd.read_csv(self.file_path)

            if len(data) > 0:
                last_order_id = data['order_id'][len(data) - 1]

        order_id = last_order_id + 1
        return order_id
Beispiel #16
0
def runExportJobs(inDirectory, tempDirectory):
    listJob = [
        o for o in os.listdir(tempDirectory) if
        (os.path.isfile(os.path.join(tempDirectory, o)) and o.endswith('.fej'))
    ]
    allArray = []
    if len(listJob) > 0:
        allArray = ["All"]
    dirChoiceList = allArray + listJob + ["Return to Main menu"]
    theRunDirectoryChoice = 0
    while (theRunDirectoryChoice != -1 and theRunDirectoryChoice !=
           (len(dirChoiceList) - 1)):
        targetedJob = []
        theRunDirectoryChoice = InteractionUtils.query_choice_list(
            "Choose the targeted directory(ies).", dirChoiceList, True)
        if (theRunDirectoryChoice > 0 and theRunDirectoryChoice !=
            (len(dirChoiceList) - 1)):
            targetedJob.append(listJob[theRunDirectoryChoice - 1])
        elif theRunDirectoryChoice == 0:
            targetedJob = listJob
        elif (theRunDirectoryChoice == -1
              or theRunDirectoryChoice == (len(dirChoiceList) - 1)):
            break
        for aJobFile in targetedJob:
            excludedEpisodePath = os.path.join(tempDirectory,
                                               aJobFile[:-4] + '.ejd')
            jobListPath = os.path.join(tempDirectory, aJobFile)
            excludedEpisode = FileUtils.importFileOrCreateArray(
                excludedEpisodePath)
            jobList = FileUtils.importFileOrCreateDict(jobListPath)
            print(jobList)
            jobListBis = copy.deepcopy(jobList)
            indexJobList = 1
            for src, dest in jobList.items():
                print("[" + str(indexJobList) + "/" +
                      str(len(jobList.items())) + "]Move '" + src[14:] +
                      "' to '" + dest + "'.")
                newDest = os.path.join(dest, src[14:])
                oldDest = os.path.join(inDirectory, src)
                if os.path.exists(newDest) and not filecmp.cmp(
                        oldDest, newDest):
                    os.remove(newDest)
                if not os.path.exists(newDest):
                    shutil.copy(oldDest, dest)
                excludedEpisode.append(src[14:])
                del jobListBis[src]
                FileUtils.writeFile(jobListPath, jobListBis)
                FileUtils.writeFile(excludedEpisodePath, excludedEpisode)
                indexJobList += 1
Beispiel #17
0
    def test_read_data_all(self):
        dict_with_stock_pages_to_read = \
            GlobalVariables.get_other_parameters_with_default_parameters()['dict_with_stock_pages_to_read']

        stock_data_container_list = FileUtils.read_tickers_from_file_or_web(
            stock_data_container_file, True, dict_with_stock_pages_to_read)

        # stock_data_container_list = self.split_list(stock_data_container_list, 3)
        # stock_data_container_list = stock_data_container_list[0]

        # TODO abstract factory: http://python-3-patterns-idioms-test.readthedocs.io/en/latest/Factory.html
        # TODO eventuell als return statt als call by reference: stock_data_container_list = data_storage.read_data("HistoricalDataReader", stock_data_container_list, weeks_delta, GlobalVariables.get_data_files_path() + 'stock_dfs')
        # TODO relead data
        data_storage = DataReaderFactory()
        strategy_parameter_dict = {
            'Name': 'HistoricalDataReader',
            'weeks_delta': 52,
            'data_source': 'iex'
        }
        stock_data_reader = data_storage.prepare("HistoricalDataReader",
                                                 stock_data_container_list,
                                                 stock_data_container_file,
                                                 True, strategy_parameter_dict)
        stock_data_reader.read_data()

        failed_reads = 0
        for stock_data_container in stock_data_container_list:
            if len(stock_data_container.historical_stock_data()) <= 0:
                failed_reads += 1

        self.assertGreater(30, failed_reads)
        logger.info("Failed reads: " + str(failed_reads))

        #TODO deutsche gehen nicht mehr self.assertEqual(len(stock_data_container_list), 818)
        self.assertEqual(len(stock_data_container_list), 505)
        self.assertGreater(
            len(stock_data_container_list[0].historical_stock_data()), 200)
        self.assertGreater(
            len(stock_data_container_list[1].historical_stock_data()), 200)
Beispiel #18
0
    def __init__(self):
        self.image_utils = ImageUtils()
        self.file_utils = FileUtils()

        self.detectron_bladder = DetectronBladder()
        self.detectron_tumor = DetectronBladderTumor()
Beispiel #19
0
class DetectronWrapper():
    def __init__(self):
        self.image_utils = ImageUtils()
        self.file_utils = FileUtils()

        self.detectron_bladder = DetectronBladder()
        self.detectron_tumor = DetectronBladderTumor()

    def create_result_folders(self, userID, imageDate):
        user_folder = self.file_utils.create_folder("./Data/Server/" +
                                                    str(userID) + "/")

        out = self.file_utils.create_folder(user_folder + imageDate + "/")

        return out

    def save_image(self, bytes, path, skipped):
        npbytes = base64.b64decode(bytes[skipped:])
        file = open(path, "wb")
        file.write(npbytes)
        file.close()
        return path

    def save_png_images(self, output_folder, readdata):
        k = 1

        png_images = []
        png_image_paths = []

        for x in readdata[:][:]:
            file_path = str(k) + ".png"
            plt.imsave(output_folder + file_path, x, cmap="gray")
            png_images.append(k)
            png_image_paths.append(file_path)
            k += 1

        return png_images, png_image_paths

    def obtain_png_images(self, output_folder, imageName, imageBytes):
        path = self.save_image(imageBytes, output_folder + imageName, 37)

        readdata, header = nrrd.read(path, index_order='C')

        return self.save_png_images(output_folder, readdata)

    def detect_bladder(self, png_images, output_folder):
        bladder_out = self.file_utils.create_folder(output_folder + "Bladder/")

        bladder_images = []
        bladder_masks = []

        for p in png_images:
            file_path = output_folder + str(p) + ".png"
            out_path = bladder_out + str(p) + ".png"
            found, mask, err = self.detectron_bladder.detect_bladder(
                file_path, out_path)
            if found:
                bladder_images.append(p)
                bladder_masks.append(mask)

        bladder_image_paths, bladder_masks = self.image_utils.get_longest_sequence_paths(
            bladder_images, bladder_masks)

        return bladder_image_paths, bladder_masks, bladder_out

    def detect_tumor(self, bladder_image_paths, bladder_folder):
        # tumor_out = self.file_utils.create_folder(output_folder + "Tumor/")

        tumor_images = []
        tumor_masks = []

        print("Tumor")

        for p in bladder_image_paths:
            file_path = bladder_folder + str(p)
            print(file_path)
            # out_path = tumor_out + str(p)
            mask, err = self.detectron_tumor.detect_tumor(
                file_path)  #, out_path)
            if mask != []:
                tumor_images.append(p)
                tumor_masks.append(mask)

        # tumor_images, tumor_masks = self.image_utils.get_longest_sequence_paths(tumor_images, tumor_masks)

        return tumor_images, tumor_masks

    def get_bladder_tumor_indices(self, bladder_image_paths, tumor_images,
                                  name):
        bladder_i = -1
        tumor_i = -1
        try:
            bladder_i = bladder_image_paths.index(name)
            tumor_i = tumor_images.index(name)
        except:
            pass

        return bladder_i, tumor_i

    def save_output_image(self, im, masks, output_path, input_path):
        instances = Instances((512, 512))
        instances.pred_masks = masks

        if len(instances.pred_masks) > 0:
            v = Visualizer(
                np.flip(im[:, :, ::-1], 1),
                metadata=self.detectron_tumor.tumor_metadata,
                scale=0.8,
                instance_mode=ColorMode.
                IMAGE_BW  # remove the colors of unsegmented pixels
            )
            v = v.draw_instance_predictions(instances)
            plt.figure(figsize=(14, 10))
            img = np.flip(v.get_image()[:, :, ::-1], 1)
            img = Image.fromarray(img, 'RGB')
            img.save(output_path)
        else:
            copyfile(input_path, output_path)

    def get_output_image_path(self, final_output, output_folder, png_path,
                              bladder_image_paths, tumor_images, tumor_masks):
        output_path = final_output + png_path
        input_path = output_folder + png_path
        im = cv2.imread(input_path)

        bladder_i, tumor_i = self.get_bladder_tumor_indices(
            bladder_image_paths, tumor_images, str(png_path))

        if bladder_i != -1 and tumor_i != -1 and len(tumor_masks[tumor_i]) > 0:
            self.save_output_image(im, tumor_masks[tumor_i], output_path,
                                   input_path)
        else:
            copyfile(input_path, output_path)

        return output_path

    def get_output_bytes(self, output_folder, png_image_paths,
                         bladder_image_paths, tumor_images, tumor_masks):
        final_output = self.file_utils.create_folder(output_folder + "Output/")

        resultBytes = []

        for i in range(len(png_image_paths)):
            output_path = self.get_output_image_path(final_output,
                                                     output_folder,
                                                     png_image_paths[i],
                                                     bladder_image_paths,
                                                     tumor_images, tumor_masks)

            resultBytes.append(self.image_utils.image_to_bytes(output_path))

        return resultBytes

    def clean_up(self, userID, bladder_folder, output_folder):
        if userID == "guest":
            self.file_utils.delete_folder(output_folder)
        else:
            self.file_utils.delete_folder(bladder_folder)
            self.file_utils.delete_files(output_folder, ".png")
            self.file_utils.delete_files(output_folder, ".nrrd")

    def obtain_png_from_png(self, output_folder, imageBytes):
        png_images = [1]
        png_image_paths = ["1.png"]

        self.save_image(imageBytes, output_folder + "1.png", 22)

        return png_images, png_image_paths

    def detect_file(self, userID, imageDate, imageName, imageBytes):
        output_folder = self.create_result_folders(userID, imageDate)

        if self.file_utils.is_of_type(imageName, ".nrrd"):
            png_images, png_image_paths = self.obtain_png_images(
                output_folder, imageName, imageBytes)
        else:
            png_images, png_image_paths = self.obtain_png_from_png(
                output_folder, imageBytes)

        print(png_image_paths)
        print(png_images)

        bladder_image_paths, bladder_masks, bladder_folder = self.detect_bladder(
            png_images, output_folder)

        print(bladder_image_paths)

        tumor_images, tumor_masks = self.detect_tumor(bladder_image_paths,
                                                      bladder_folder)

        resultBytes = self.get_output_bytes(output_folder, png_image_paths,
                                            bladder_image_paths, tumor_images,
                                            tumor_masks)

        self.clean_up(userID, bladder_folder, output_folder)

        return resultBytes

    def get_record_images(self, userID, imageDate):
        user_folder = "Data/Server/" + userID + "/"
        if not self.file_utils.file_exists(user_folder):
            return []

        record_folder = user_folder + imageDate + "/"
        if not self.file_utils.file_exists(record_folder):
            return []

        images_folder = record_folder + "Output/"
        if not self.file_utils.file_exists(images_folder):
            return []

        resultBytes = []
        for file in self.file_utils.get_files_in_folder(images_folder):
            file_path = images_folder + file
            resultBytes.append(self.image_utils.image_to_bytes(file_path))

        return resultBytes
class DetectronBladderTumor:
    def __init__(self):
        self.image_utils = ImageUtils()
        self.mask_utils = MaskUtils()
        self.file_utils = FileUtils()
        self.coco_utils = CocoUtils()

        self.cfg = None
        self.predictor = None
        self.tumor_metadata = None

        self.configure()

    def configure(self):
        self.cfg = get_cfg()
        self.cfg.OUTPUT_DIR = "./Output/Tumor"
        self.cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
        self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
        self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg.OUTPUT_DIR, "model_final.pth")
        self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
        self.predictor = DefaultPredictor(self.cfg)
        self.tumor_metadata = MetadataCatalog.get("tumor_test")

    def train(self):
        os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)
        trainer = DefaultTrainer(self.cfg)
        trainer.resume_or_load(resume=False)
        trainer.train()

    def save_resulted_image(self, im, instances, output_path):
        v = Visualizer(im[:, :, ::-1],
                       metadata=self.tumor_metadata,
                       scale=0.8,
                       instance_mode=ColorMode.IMAGE_BW
                       )
        v = v.draw_instance_predictions(instances)
        plt.figure(figsize=(14, 10))
        img = v.get_image()[:, :, ::-1]

        plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))

        if output_path != "":
            resized = cv2.resize(img, (512, 512), interpolation=cv2.INTER_AREA)
            cv2.imwrite(output_path, resized)

        plt.close()

    def process_result(self, outputs):
        instances = outputs["instances"].to("cpu")

        # self.save_resulted_image(im, instances, output_path)

        if instances.has("pred_masks") and len(instances.pred_masks) > 0:
            return instances.pred_masks, ""
        else:
            return [], ""

    def detect_tumor(self, image_path):
        im = cv2.imread(image_path)

        outputs = self.predictor(im)

        return self.process_result(outputs)

    def test_detect(self, images_folder, output_folder, test_files, width, height):
        ious = []
        dscs = []
        for (image_path, json_path) in test_files:
            output_path = output_folder + image_path
            pred_masks, err = self.detect_tumor(images_folder + image_path)

            if json_path is not None:
                masks = self.coco_utils.json_annotation_to_masks(images_folder + json_path)
            else:
                masks = None
            output_mask = self.mask_utils.combine_masks(pred_masks, width, height)
            self.mask_utils.draw_mask(output_mask, output_path[:-4] + "-m.png")
            expected_mask = self.mask_utils.combine_masks(masks, width, height)
            self.mask_utils.draw_mask(expected_mask, output_path[:-4] + "-m2.png")

            intersection, union, sum = self.mask_utils.get_intersection_union_sum(expected_mask, output_mask)

            if union == 0:
                if intersection == 0:
                    iou = 1
                else:
                    iou = 0
            else:
                iou = intersection/union

            if iou <= 0.1:
                print(image_path)

            if sum == 0:
                dsc = 1
            else:
                dsc = (2 * intersection) / sum

            ious.append(iou)
            dscs.append(dsc)

        return ious, dscs

    def get_IOU_DSC(self, images_folder, output_folder):
        test_files = self.file_utils.get_test_files(images_folder)
        ious, dscs = self.test_detect(images_folder, output_folder, test_files, 512, 512)
        return sum(ious) / len(ious), sum(dscs) / len(dscs)
Beispiel #21
0
class DetectronBladder:
    def __init__(self):
        self.image_utils = ImageUtils()
        self.mask_utils = MaskUtils()
        self.file_utils = FileUtils()
        self.coco_utils = CocoUtils()

        self.cfg = None
        self.predictor = None
        self.bladder_metadata = None

        self.configure()

    def configure(self):
        self.cfg = get_cfg()
        self.cfg.OUTPUT_DIR = "./Output/Bladder"
        self.cfg.merge_from_file(
            model_zoo.get_config_file(
                "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
        self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
        self.cfg.MODEL.WEIGHTS = os.path.join(self.cfg.OUTPUT_DIR,
                                              "model_final.pth")
        self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
        self.predictor = DefaultPredictor(self.cfg)
        self.bladder_metadata = MetadataCatalog.get("bladder_test")

    def train(self):
        os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)
        trainer = DefaultTrainer(self.cfg)
        trainer.resume_or_load(resume=False)
        trainer.train()

    def register_dataset(self):
        for d in ["train", "test"]:
            register_coco_instances(f"bladder_{d}", {},
                                    f"Data/Vezica/{d}.json",
                                    f"Data/Vezica/{d}")

    def process_result(self, image_path, outputs, output):
        try:
            instances = outputs["instances"].to("cpu")
            if instances.has("pred_boxes") and len(instances.pred_boxes) > 0:
                if output != "":
                    im2 = Image.open(image_path)
                    img = self.image_utils.get_masked_image(
                        im2, instances.pred_masks[0])
                    img.save(output)
                return True, instances.pred_masks[:1], ""
            else:
                return False, [], ""
        except Exception as e:
            return False, [], str(e)

    def test_detect(self, images_folder, output_folder, test_files, width,
                    height):
        ious = []
        dscs = []
        for (image_path, json_path) in test_files:
            output_path = output_folder + image_path
            is_bladder, pred_masks, err = self.detect_bladder(
                images_folder + image_path, output_path)

            if json_path is not None:
                masks = self.coco_utils.json_annotation_to_masks(
                    images_folder + json_path)
            else:
                masks = None
            output_mask = self.mask_utils.combine_masks(
                pred_masks, width, height)
            self.mask_utils.draw_mask(output_mask, output_path[:-4] + "-m.png")
            expected_mask = self.mask_utils.combine_masks(masks, width, height)
            self.mask_utils.draw_mask(expected_mask,
                                      output_path[:-4] + "-m2.png")

            intersection, union, sum = self.mask_utils.get_intersection_union_sum(
                expected_mask, output_mask)

            if union == 0:
                if intersection == 0:
                    iou = 1
                else:
                    iou = 0
            else:
                iou = intersection / union

            if iou <= 0.1:
                print(image_path)

            if sum == 0:
                dsc = 1
            else:
                dsc = (2 * intersection) / sum

            ious.append(iou)
            dscs.append(dsc)

        return ious, dscs

    def get_IOU_DSC(self, images_folder, output_folder):
        test_files = self.file_utils.get_test_files(images_folder)
        ious, dscs = self.test_detect(images_folder, output_folder, test_files,
                                      512, 512)
        return sum(ious) / len(ious), sum(dscs) / len(dscs)

    def detect_bladder(self, image_path, output_path):
        im = cv2.imread(image_path)

        outputs = self.predictor(im)

        return self.process_result(image_path, outputs, output_path)
    def test_dump_params(self):
        global val, w, root
        root = Tk()
        top = ASTA_Framework(root)
        controller = MvcController.init(root, top)
        data_file_path = GlobalVariables.get_data_files_path()

        strat_param_file = GlobalVariables.get_data_files_path(
        ) + '\\TestData\\ParameterFile_test_dump_and_load_strat_params.pickle'
        FileUtils.check_file_exists_and_delete(strat_param_file)

        strategy_parameter_dict = {
            'SimplePatternNewsStrategy': {
                'news_threshold': 0.7,
                'german_tagger':
                data_file_path + 'nltk_german_classifier_data.pickle',
                'data_readers': {
                    'TraderfoxNewsDataReader': {
                        'last_check_date_file':
                        data_file_path + 'TestData\\last_date_time.csv',
                        'german_tagger':
                        data_file_path + 'nltk_german_classifier_data.pickle',
                        'reload_data': True,
                        'ticker_needed': False
                    },
                    'HistoricalDataReader': {
                        'weeks_delta': 52,
                        'data_source': 'iex',
                        'reload_data': True,
                        'ticker_needed': False
                    }
                }
            }
        }
        stock_data_file = data_file_path + 'TestData\\stock_data_container_file.pickle'
        other_params = {
            'stock_data_container_file': stock_data_file,
            'dict_with_stock_pages_to_read': {
                'SP500': {
                    'websource_address':
                    "http://en.wikipedia.org/wiki/List_of_S%26P_500_companies",
                    'find_name': 'table',
                    'class_name': 'class',
                    'table_class': 'wikitable sortable',
                    'ticker_column_to_read': 0,
                    'name_column_to_read': 1,
                    'stock_exchange': 'en'
                }
            },
            'RiskModels': {
                'FixedSizeRiskModel': {
                    'OrderTarget': 'order_target_value',
                    'TargetValue': 2500
                }
            }
        }

        params = {
            'Strategies': strategy_parameter_dict,
            'OtherParameters': other_params
        }
        req_params = StrategyFactory.get_required_parameters_with_default_parameters(
        )
        import _pickle as pickle
        with open(strat_param_file, "wb") as f:
            pickle.dump(params, f)
Beispiel #23
0
from Algorithm import Model
from Utils.Preprocess import Preprocess
from Algorithm.ThreadManager import ThreadManager
from Utils.Log import writer
from Utils.FileUtils import FileUtils
import numpy as np
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression

if __name__ == '__main__':
    # create our data set

    headers = ['Id', 'OverallQual', 'YearBuilt', 'OverallCond', 'OpenPorchSF']
    df1 = FileUtils.read_data_frame_from_path('Data/train_1.csv', headers)
    df2 = FileUtils.read_data_frame_from_path('Data/train_2.xlsx', headers)
    df3 = FileUtils.read_data_frame_from_path('Data/train_targets.csv')
    df_total = df1.append(df2).merge(df3, on='Id', how='left')
    pp = Preprocess(df_total, 'SalePrice')
    pp.replace_nan()
    X_train, X_test, y_train, y_test = pp.split_train_test_by_pandas()

    # send to train
    params = [{
        'model': SVC,
        "C": 0.4
    }, {
        'model': LinearRegression,
        'normalize': False,
        "fit_intercept": False
Beispiel #24
0
# -*- coding: utf-8 -*-

__author__ = "sun hai lang"
__date__ = "2019-12-20"

from Utils.FileUtils import FileUtils
from Utils.VersionData import VersionData

fileUtils = FileUtils()

__versionList__ = []


def AddVersionList(verlist):
    print(type(verlist))
    for ver in verlist:
        version = VersionData()
        version.__dict__ = ver
        __versionList__.append(version)


def GetVersion(platfrom):
    pfList = [item for item in __versionList__ if item.m_key == platfrom]
    if len(pfList) > 0:
        return pfList[0]
    else:
        return VersionData()
    def test_dump_and_load_other_parameter_to_file(self):
        global val, w, root
        root = Tk()
        top = ASTA_Framework(root)
        controller = MvcController.init(root, top)

        data_file_path = GlobalVariables.get_data_files_path()

        strat_param_file = data_file_path + '\\TestData\\ParameterFile_test_dump_and_load_strat_params.pickle'
        FileUtils.check_file_exists_and_delete(strat_param_file)

        strategy_parameter_dict = {
            'SimplePatternNewsStrategy': {
                'news_threshold': 0.7,
                'german_tagger':
                data_file_path + 'nltk_german_classifier_data.pickle',
                'data_readers': {
                    'TraderfoxNewsDataReader': {
                        'last_check_date_file':
                        data_file_path + 'TestData\\last_date_time.csv',
                        'german_tagger':
                        data_file_path + 'nltk_german_classifier_data.pickle',
                        'reload_data': True,
                        'ticker_needed': False
                    },
                    'HistoricalDataReader': {
                        'weeks_delta': 52,
                        'data_source': 'iex',
                        'reload_data': True,
                        'ticker_needed': False
                    }
                }
            },
            'W52HighTechnicalStrategy': {
                'check_days': 7,
                'min_cnt': 3,
                'min_vol_dev_fact': 1.2,
                'within52w_high_fact': 0.98,
                'data_readers': {
                    'HistoricalDataReader': {
                        'weeks_delta': 52,
                        'data_source': 'iex',
                        'reload_data': False,
                        'ticker_needed': True
                    }
                }
            }
        }
        stock_data_file = data_file_path + 'TestData\\stock_data_container_file.pickle'
        other_params = {
            'stock_data_container_file': stock_data_file,
            'dict_with_stock_pages_to_read': {
                'SP500': {
                    'websource_address':
                    "http://en.wikipedia.org/wiki/List_of_S%26P_500_companies",
                    'find_name': 'table',
                    'class_name': 'class',
                    'table_class': 'wikitable sortable',
                    'ticker_column_to_read': 0,
                    'name_column_to_read': 1,
                    'stock_exchange': 'en'
                }
            },
            'RiskModels': {
                'FixedSizeRiskModel': {
                    'OrderTarget': 'order_target_value',
                    'TargetValue': 2500
                }
            }
        }

        params = {
            'Strategies': strategy_parameter_dict,
            'OtherParameters': other_params
        }
        req_params = StrategyFactory.get_required_parameters_with_default_parameters(
        )
        controller.dump_analysis_parameters_to_file(strat_param_file, params,
                                                    req_params)

        controller.load_analysis_parameters_from_file(strat_param_file,
                                                      req_params)
        self.assertEqual(ast.literal_eval(params),
                         controller.model.analysis_parameters.get(),
                         req_params)
Beispiel #26
0
def buildExportJobs(inDirectory, outDirectory, tempDirectory):
    listDir = [
        o for o in os.listdir(inDirectory)
        if (os.path.isdir(os.path.join(inDirectory, o)) and re.search(
            "[0-9][0-9][0-9][0-9][0-9][0-9]_[0-9][0-9][0-9][0-9][0-9][0-9]", o)
            )
    ]
    allArray = []
    targetedArray = []
    if len(listDir) > 0:
        allArray = ["All"]
    dirChoiceList = allArray + listDir + ["Return to Main menu"]
    theBuildDirectoryChoice = 0
    while (theBuildDirectoryChoice != -1 and theBuildDirectoryChoice !=
           (len(dirChoiceList) - 1)):
        theBuildDirectoryChoice = InteractionUtils.query_choice_list(
            "Choose the targeted directory(ies).", dirChoiceList, True)
        if (theBuildDirectoryChoice > 0 and theBuildDirectoryChoice !=
            (len(dirChoiceList) - 1)):
            targetedArray.append(listDir[theBuildDirectoryChoice - 1])
        elif theBuildDirectoryChoice == 0:
            targetedArray = listDir
        elif (theBuildDirectoryChoice == -1
              or theBuildDirectoryChoice == (len(dirChoiceList) - 1)):
            break
        for aDir in targetedArray:
            print(
                "##########################################################################"
            )
            print("We build the export Jobs for directory '" + aDir +
                  "' with file '" + aDir + ".fej'")
            exportJobDoneFileName = os.path.join(tempDirectory, aDir + ".ejd")
            jobOrderFileName = os.path.join(tempDirectory, aDir + ".fej")
            associationTable = FileUtils.importFileOrCreateDict(
                jobOrderFileName)
            excludedArray = FileUtils.importFileOrCreateArray(
                exportJobDoneFileName)
            aWeekDirPath = os.path.join(inDirectory, aDir)
            listEpisode = [
                o for o in os.listdir(aWeekDirPath)
                if (os.path.isfile(os.path.join(aWeekDirPath, o))
                    and not o.endswith('.srt'))
            ]
            for anEpisode in listEpisode:
                if (anEpisode not in excludedArray and os.path.join(
                        aDir, anEpisode) not in associationTable.keys()):
                    episodeChoiceList = [
                        "Build export for this episode", "Skip the episode",
                        "Ignore the episode", "Return to folder choice"
                    ]
                    exportAnswer = int(
                        InteractionUtils.query_choice_list(
                            "Choose your action for '" + anEpisode + "'.",
                            episodeChoiceList, True))
                    if exportAnswer == 0:
                        buildJobForEpisode(aDir, anEpisode, associationTable,
                                           outDirectory)
                    elif exportAnswer == 2:
                        excludedAnEpisode(anEpisode, excludedArray,
                                          aWeekDirPath)
                    elif exportAnswer == 3:
                        break
            FileUtils.writeFile(jobOrderFileName, associationTable)
            FileUtils.writeFile(exportJobDoneFileName, excludedArray)
Beispiel #27
0
 def _delete_music_cache(self):
     path = "./music_cache"
     pattern = r".*\..*"
     FileUtils.remove_files_in_dir(path, pattern)