Esempio n. 1
0
class Coupon:

    couponDeirectory = ''
    couponDatas = []
    
    def __init__(self,couponDeirectory):
        self.couponDeirectory = couponDeirectory
        self.csvReader = CSVReader()
        self.__generateCouponDatas()

    def __generateCouponDatas(self):
        filesDirectory = self.couponDeirectory
        for dirname, dirnames, filenames in os.walk(filesDirectory):
            # print path to all filenames.
            for filename in filenames:
                fileNamePath = os.path.join(dirname, filename)
                fileNamePath = os.path.normpath(fileNamePath)
                filename = filename.split('.')[0]
                fileDatas = self.__getCSVData(fileNamePath)
                couponData = []
                couponData.append(filename)
                couponData.append(fileDatas)
                self.couponDatas.append(couponData)

    def __getCSVData(self,fileNamePath):
        self.csvReader.setFileName(fileNamePath)
        return self.csvReader.readCsv()

    def getCouponDatas(self):
        return self.couponDatas
Esempio n. 2
0
def main(filename="./sample.csv"):
    """
    Main method for testing
    """
    reader = CSVReader()

    generator = KeywordGenerator(reader.read_file(filename))
    for word in generator.generate():
        print(word + ",", end="", flush=True)
Esempio n. 3
0
 def extract(self):
     logger.info('extracting csvs from shared drive')
     csv_reader = CSVReader(Config.directory)
     csv_reader.targets = [Target(*params) for params in Config.targets]
     received, shipped, history, picked = csv_reader.read()
     received.to_csv('data/received.csv')
     shipped.to_csv('data/shipped.csv')
     history.to_csv('data/history.csv')
     picked.to_csv('data/picked.csv')
     return received, shipped, history, picked
Esempio n. 4
0
def model_test():
    print("here")
    sentiments = np.load('sentiments.npy', allow_pickle=True)
    texts = np.load('texts.npy', allow_pickle=True)
    all_texts = np.load('text_cache.npy', allow_pickle=True)
    neutral = []

    _, X_test, _, Y_test = train_test_split(texts, sentiments, test_size=0.01)
    print("here ",len(Y_test))
    airline_data = CSVReader.dataframe_from_file("Tweets.csv",['airline_sentiment','text'])
    airline_text = np.array(airline_data.text)
    airline_sentiment = np.array(airline_data.airline_sentiment)
    count = 0
    for i in range(len(airline_text)):
        if(count > 1000):
            break
        if(airline_sentiment[i] == "neutral"):
             neutral = np.append(neutral,airline_text[i])
             count+=1
    X_test = np.append(X_test,neutral)
    Y_test = np.append(Y_test,[0]*len(neutral))
    Y_test[Y_test==-1] = 4
    Y_test[Y_test==-2] = 3
    # categ_test = to_categorical(Y_test,num_classes=5)
    tokenizer = Tokenizer(num_words=300000)
    tokenizer.fit_on_texts(all_texts)
    model = load_model("savedModel2/saved-model3-60.h5")
    result = model.predict_on_batch(pad_sequences(tokenizer.texts_to_sequences(X_test),maxlen=75))
    result = np.argmax(result,axis=-1)
    # cat_result = to_categorical(result,num_classes=5)
    print("f1 ",precision_score(Y_test,result, average=None))
Esempio n. 5
0
    def read(self):
        if not os.path.exists(self.filepath):
            raise Exception("File not found {}".format(self.filepath))
        self.filetype = os.path.splitext(self.filepath)[1]
        if self.filetype == '.xls':
            # read excel sheets, these are expected to be there, TODO: error if not
            ingredient_reader = ExcelReader(self.filepath, 'ingredients')
            step_reader = ExcelReader(self.filepath, 'steps')
            data_reader = ExcelReader(self.filepath, 'data')
            # get as dataframes
            ingredients = ingredient_reader.read_xl()
            self.ingredients = ingredients.fillna(0)
            steps = step_reader.read_xl()
            self.steps = steps.fillna(0)
            self.data = data_reader.read_xl()
        elif self.filetype == '.txt':
            reader = TextReader(self.filepath)
            self.data = reader.read()
        elif self.filetype == '.csv':
            reader = CSVReader(self.filepath)
            self.data = reader.read()
        else:
            raise Exception("Incorrect file type {}".format(self.filetype))

        return self.data
Esempio n. 6
0
    def _packet_in_handler(self, ev):
        msg = ev.msg
        datapath = msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        # get Datapath ID to identify OpenFlow switches.
        dpid = datapath.id
        self.mac_to_port.setdefault(dpid, {})

        # analyse the received packets using the packet library.
        pkt = packet.Packet(msg.data)
        eth_pkt = pkt.get_protocol(ethernet.ethernet)
        dst = eth_pkt.dst
        src = eth_pkt.src

        # get the received port number from packet_in message.
        in_port = msg.match['in_port']
        self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)

        # learn a mac address to avoid FLOOD next time.
        self.mac_to_port.setdefault(dpid, {})
        self.mac_to_port[dpid][src] = in_port

        # read policies from csv file
        policies = CSVReader().read()
        out_port = 0

        if (dst in policies[src]) or dst == 'ff:ff:ff:ff:ff:ff':
            self.logger.info("packet is permitted")
            if dst in self.mac_to_port[dpid]:
                out_port = self.mac_to_port[dpid][dst]
                self.logger.info("packet is knowingly forwarded to port %s",
                                 out_port)
            else:
                out_port = ofproto.OFPP_FLOOD
                self.logger.info("packet is flooded.")

            actions = [parser.OFPActionOutput(out_port)]

            # install a flow to avoid packet_in next time.
            if out_port != ofproto.OFPP_FLOOD:
                self.logger.info("installing flood...")
                match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
                self.add_flow(datapath, 1, match, actions)

            # construct packet_out message and send it.
            out = parser.OFPPacketOut(datapath=datapath,
                                      buffer_id=ofproto.OFP_NO_BUFFER,
                                      in_port=in_port,
                                      actions=actions,
                                      data=msg.data)

            datapath.send_msg(out)
        else:
            self.logger.info("packet has no policy.")
Esempio n. 7
0
def run_script():

    user_id = np.random.randint(low=1000000000,
                                high=9999999999,
                                size=DATASET_SIZE)

    category = np.random.choice(CATEGORY, DATASET_SIZE)
    file1_data = {'user_id': user_id, 'category': category}
    # creating file1.csv
    CSVReader().create_csv_file(file_name='file1.csv',
                                data=file1_data,
                                columns_name=['user_id', 'category'])

    age = np.random.randint(low=10, high=100, size=DATASET_SIZE)
    height = np.random.uniform(low=3.5, high=6.5, size=DATASET_SIZE)
    file2_data = {'user_id': user_id, 'age': age, 'height': height}
    #creating file2.csv
    CSVReader().create_csv_file(file_name='file2.csv',
                                data=file2_data,
                                columns_name=['user_id', 'age', 'height'])
Esempio n. 8
0
    def testSquare(self) -> None:

        value1_index,result_index,test_data = CSVReader.callCreateCSVReader("TwoFieldsCSV",
                                                                            "../csv_files/",
                                                                            "Unit Test Square.csv"
        )
        #pprint(test_data)
        #print()
        for row in test_data:
            
            value1 = int(row[value1_index])
            result = int(row[result_index])
            
            self.assertEqual(self.calculator.sqr(value1), result)
            self.assertEqual(self.calculator.result, result)
Esempio n. 9
0
    def testDivision(self) -> None:

        value1_index,value2_index,result_index,test_data = CSVReader.callCreateCSVReader("ThreeFieldsCSV",
                                                                                       "../csv_files/",
                                                                                       "Unit Test Division.csv"
        )
        #pprint(test_data)
        #print()
        for row in test_data:
            
            value1 = int(row[value1_index])
            value2 = int(row[value2_index])
            result = float(row[result_index])
            
            self.assertEqual(self.calculator.divide(value1, value2), result)
            self.assertEqual(self.calculator.result, result)
Esempio n. 10
0
 def testFileNotExist(self):
     expectedValue = []
     self.assertEqual(CSVReader("rafiar.csv").readCsv, expectedValue)
Esempio n. 11
0
 def testFileExists(self):
     expectedValue = [[
         'rafiar', 'rahmansyah', 'borneo', 'tangerang', 'pegipegi'
     ], ['nafiar', 'rahmansyah', 'borneo', 'tangerang', 'tokopedia']]
     self.assertEqual(CSVReader("rafiar.csv").readCsv, expectedValue)
Esempio n. 12
0
 def __init__(self):
     #self.emotion_data = CSVReader.dataframe_from_file("VentDataset/emotions.csv", ['id', 'emotion_category_id'])
     # self.emotion_data = self.emotion_data[self.emotion_data.enabled == 'TRUE']
     self.vent_data = CSVReader.dataframe_from_file("VentDataset/vents.csv",
                                                    ['emotion_id', 'text'])
     self.textPreProcessing = TextPreprocessing()
Esempio n. 13
0
def train_network(network, device):

    csv_reader = CSVReader()

    # Load the training data from the csv files
    training_loader = csv_reader.get_training_loader(
        Config.batch_size, Config.shuffle, Config.load_data_from_file,
        Config.preprocess_data)
    validation_loader = csv_reader.get_validation_loader(
        Config.batch_size, Config.shuffle, Config.load_data_from_file,
        Config.preprocess_data)

    # Converts data into 3D renderings
    if (Config.use_image_generator):
        from skeleton_visualizer import SkeletonVisualizer

        # You need to give the visualizer 2 skeleton instances to display
        skeleton1 = training_loader.dataset.features[0 + 12].numpy()
        skeleton2 = training_loader.dataset.features[6261 + 12].numpy()
        render = SkeletonVisualizer(skeleton1, skeleton2)
        return

    optimizer = optim.SGD(net.parameters(), lr=0.01)
    criterion_ce = nn.CrossEntropyLoss(size_average=True)

    num_classes = RecognitionDataset.num_classifications
    class_names = RecognitionDataset.classification_names

    # This plotter is a class than I have created
    visdom_env_name = 'Plots'
    if (Config.use_visdom):
        plotter = visdom_utils.VisdomLinePlotter(env_name=visdom_env_name)
        confusion_logger_training = torchnet.logger.VisdomLogger(
            'heatmap',
            env=visdom_env_name,
            opts={
                'title': 'Training Confusion Matrix',
                'columnnames': class_names,
                'rownames': class_names
            })
        confusion_logger_validation = torchnet.logger.VisdomLogger(
            'heatmap',
            env=visdom_env_name,
            opts={
                'title': 'Validation Confusion Matrix',
                'columnnames': class_names,
                'rownames': class_names
            })
    confusion_graph_training = torchnet.meter.ConfusionMeter(num_classes)
    confusion_graph_validation = torchnet.meter.ConfusionMeter(num_classes)

    batch_count = 1

    total_confusion = np.zeros((num_classes, num_classes))

    for epoch in range(Config.max_num_epochs):
        total = 0
        total_correct = 0
        network.train()

        if (Config.use_visdom):
            confusion_graph_training.reset()
            confusion_graph_validation.reset()

        for data in training_loader:
            features, labels = data
            #print((features.min(), features.max()))

            loss_ce, target_np, predicted_np = run_network(
                network, optimizer, criterion_ce, device, features, labels,
                batch_count, True)

            confusion = np.swapaxes(
                confusion_matrix(target_np,
                                 predicted_np,
                                 labels=np.arange(0, num_classes)), 0, 1)
            total_confusion = np.add(total_confusion, confusion)

            # The predicted and target are flopped in order to make the data on the correct axis
            #confusion_graph_training.add(torch.from_numpy(target_np), torch.from_numpy(predicted_np))
            if (Config.use_visdom):
                confusion_graph_training.add(torch.from_numpy(predicted_np),
                                             torch.from_numpy(target_np))

            total_correct += (predicted_np == target_np).sum()
            total += features.size(0)

            batch_count += 1

        # Calculate the network's accuracy every epoch
        validation_correct, validation_total = get_validation_accuracy(
            network, optimizer, criterion_ce, device, validation_loader,
            confusion_graph_validation)

        accuracy_validation = 100 * validation_correct / validation_total
        accuracy_training = 100 * total_correct / total
        print(
            'loss={}   TRAINING: #total={}   #correct={}  VALIDATION: #total={}   #correct={}'
            .format(loss_ce, total, total_correct, validation_total,
                    validation_correct))

        # Plot our results to visdom
        if (Config.use_visdom):
            plotter.plot('loss', 'train', 'Class Loss', epoch, loss_ce.item())
            plotter.plot('accuracy', 'train', 'Class Accuracy', epoch,
                         accuracy_training)
            plotter.plot('accuracy', 'validation', 'Class Accuracy', epoch,
                         accuracy_validation)
            confusion_logger_training.log(confusion_graph_training.value())
            confusion_logger_validation.log(confusion_graph_validation.value())
Esempio n. 14
0
 def __init__(self,couponDeirectory):
     self.couponDeirectory = couponDeirectory
     self.csvReader = CSVReader()
     self.__generateCouponDatas()
Esempio n. 15
0
            "everything is great, i have lost some weight",
            "awesome, really cool", "should I play cards",
            "I am full and inshape", "is it okay to be that hungry at night?"
        ]),
                      maxlen=75))
    print("result: ", np.argmax(result, axis=-1), "\n")


if __name__ == "__main__":
    embeddings = np.load('text_embedding.npy', allow_pickle=True)
    sentiments = np.load('sentiments.npy', allow_pickle=True)
    texts = np.load('texts.npy', allow_pickle=True)
    all_texts = np.load('text_cache.npy', allow_pickle=True)
    _, X_test, _, Y_test = train_test_split(texts, sentiments, test_size=0.01)

    airline_data = CSVReader.dataframe_from_file("Tweets.csv",
                                                 ['airline_sentiment', 'text'])
    airline_text = np.array(airline_data.text)
    airline_sentiment = np.array(airline_data.airline_sentiment)
    count = 0
    for i in range(len(airline_text)):
        if (count > 1000):
            break
        if (airline_sentiment[i] == "neutral"):
            X_test = np.append(X_test, airline_text[i])
            Y_test = np.append(Y_test, [0])
            count += 1
    models = []
    models = np.append(models, load_model("ensemble_bgru.h5"))
    models = np.append(models, load_model("ensemble_gru.h5"))
    models = np.append(models, load_model("ensemble_gru.h5"))
    models = np.append(models, load_model("ensemble_lstm.h5"))
from hand_selector import HandSelector
from body_box_extractor import BodyBoxExtractor
from bspline_preprocessor import BSplinePreprocessor
from dtw_processor import DTWProcessor
from task_runner import TaskRunner

ROOT_DIRECTORY = join(dirname(realpath(sys.argv[0])), '..', '..')

left_hand = False
right_hand = True
smoothing = 0.01
sampling_rate = 100

hand_presenter = HandPresenter("Hand Presenter")
bspline_presenter = BSplinePresenter("B-Spline Presenter")
body_box_presenter = BodyBoxPresenter("Body Box Presenter")
dtw_presenter = DTWPresenter("DTW Presenter")

csv_reader = CSVReader(ROOT_DIRECTORY)
hand_selector = HandSelector("Hand Selector", left_hand, right_hand, [hand_presenter])
body_box_extractor = BodyBoxExtractor("Body Box Extractor", [body_box_presenter])
bspline_preprocessor = BSplinePreprocessor("B-Spline Preprocessor", smoothing, sampling_rate, [bspline_presenter])
dtw_processor = DTWProcessor("DTW Processor", 2, [dtw_presenter])
task_runner = TaskRunner(csv_reader, [hand_selector, body_box_extractor, bspline_preprocessor], dtw_processor)

input_files = ['res/HKG_001_a_0002 Aaron 41/HKG_001_a_0002 Aaron 41.csv', 'res/HKG_001_a_0001 Aaron 22/HKG_001_a_0001 Aaron 22.csv']
for input_file in input_files:
    csv_reader.set_file(input_file)
    task_runner.add_data()
task_runner.process()
Esempio n. 17
0
def main():

    # store downloaded CSV files
    download_path = f'{pathlib.Path().absolute()}\\download\\'
    print(f'path: {download_path}')

    # iframe url zacks.com
    iframe_url = 'https://screener-api.zacks.com/?scr_type=stock&c_id=zacks&c_key=0675466c5b74cfac34f6be7dc37d4fe6a008e212e2ef73bdcd7e9f1f9a9bd377&ecv=2ITM2QTOyQDO&ref=screening#'

    # path to web-browser
    wb_path = "C:\\Program Files\\Mozilla Firefox\\firefox.exe"

    # path to web-driver
    firefox_dPath = f'{pathlib.Path().absolute()}\\drivers\\Firefox\\geckodriver.exe'

    if len(sys.argv) >= 2:
        order = "no-sorting"
        output_fn = "all-no-sorting.csv"

        # test if user provided a sorting method
        # ---------------------------------------------------------------------------------- #
        if sys.argv[1] != "ascending" and sys.argv[1] != "descending":
            print(f'Default sorting for final report: {order}')
        elif sys.argv[1] == "ascending" or sys.argv[1] == "descending":
            order = sys.argv[1]  # user should provide a sorting name
            print(f'Sorting for final report: {order}')
        # ---------------------------------------------------------------------------------- #

        # sort report
        # ---------------------------------------------------------------------------------- #
        if order == "ascending":
            output_fn = "all-ascending.csv"
        if order == "descending":
            output_fn = "all-descending.csv"
        # ---------------------------------------------------------------------------------- #

        # Init FireFox Run
        # ---------------------------------------------------------------------------------- #
        run_firefox = RunFirefox(download_path, iframe_url, wb_path,
                                 firefox_dPath)
        # ---------------------------------------------------------------------------------- #

        # init CSV Reader
        # ---------------------------------------------------------------------------------- #
        csv_dir = "download/"
        csv_dir_out = "output/"
        csv_reader = CSVReader(csv_dir, csv_dir_out)  # init CSV reader
        csv_reader.clean_download(
        )  # remove downloaded reports before getting new files
        # ---------------------------------------------------------------------------------- #

        # get csv files that have tickers and its values
        # ---------------------------------------------------------------------------------- #
        s0 = 10  # time in seconds --> system time
        page_load_time = 180  # time in seconds to wait for page to load
        wait_time = 1  # Explicit wait time
        driver_time = 60  # implicitly wait time --> driver time
        # Run firefox for zacks.com
        run_firefox.run_firefox(s0, page_load_time, wait_time,
                                driver_time)  # using a url of the iframe
        # ---------------------------------------------------------------------------------- #

        # working with csv files
        # ---------------------------------------------------------------------------------- #
        print(f'Building final report for zacks.com tickets.')
        csv_reader.empty_reports("output/all-ascending.csv")  # empty doc
        csv_reader.empty_reports("output/all-descending.csv")  # empty doc
        csv_reader.empty_reports("output/all-no-sorting.csv")  # empty doc
        csv_reader.empty_reports("output.html")  # empty doc
        print(f'Sorting final report: {order}')
        csv_reader.write_csv(
            order
        )  # arg: ascending, descending, no-sorting; overwrites existing file
        csv_reader.clean_download()  # remove downloaded reports
        # load final report into dictionary and this report will not include the header from CSV file
        final_report = csv_reader.read_final_report(output_fn)
        num_of_records = len(final_report)
        print(f'Report is reade.')
        # ---------------------------------------------------------------------------------- #

        # Using Selenium WebDriver
        # ---------------------------------------------------------------------------------- #
        s0 = 2  # time in seconds --> system time
        page_load_time = 180  # time in seconds to wait for page to load
        wait_time = 1  # Explicit wait time
        driver_time = 60  # implicitly wait time --> driver time

        # singlethread: get charts
        # singlethread(final_report, s0, page_load_time, wait_time, driver_time)

        # multithreading: get charts
        # multithreading(final_report, csv_reader, num_of_records, s0, page_load_time, wait_time, driver_time)
        # ---------------------------------------------------------------------------------- #

        # Using BeautifulSoup and urllib3
        # ---------------------------------------------------------------------------------- #
        # singlethread: get charts
        pc = PageCrawler()
        pc.run(final_report)

        # multithreading: get charts

        # ---------------------------------------------------------------------------------- #

    else:
        print(
            "Please, provide one of the sorting names: ascending, descending, no-sorting as an argument."
        )
import sys
from os import listdir
from os.path import join,dirname,realpath,isdir
import json
from csv_reader import CSVReader
from body_box_extractor import BodyBoxExtractor

ROOT_DIRECTORY = join(dirname(realpath(sys.argv[0])), '..', '..', 'res')
data_directories = [f for f in listdir(ROOT_DIRECTORY) if isdir(join(ROOT_DIRECTORY, f))]
csv_reader = CSVReader(ROOT_DIRECTORY)
body_box_extractor = BodyBoxExtractor('', None)
check_results = {}

for data_directory in data_directories:
    try:
        type_json_file = open(join(ROOT_DIRECTORY, data_directory, 'type.json'))
        type_data = json.load(type_json_file)

        csv_reader.set_file(join(data_directory, data_directory+'.csv'))
        skeleton_data = {'raw_data': csv_reader.read()}
        data = body_box_extractor.process(skeleton_data)

        check_results[data_directory] = type_data
        check_results[data_directory]['has_shoulder'] = all(abs(data['body_box']['shoulder_left']['3d']['mean']) > 0.001) and all(abs(data['body_box']['shoulder_left']['3d']['mean']) > 0.001)
        check_results[data_directory]['has_hip'] = all(abs(data['body_box']['hip_left']['3d']['mean']) > 0.001) and all(abs(data['body_box']['hip_right']['3d']['mean']) > 0.001)

    except IOError:
        continue

selected_signs = [data_name for data_name,data in check_results.items() if data['has_hip'] and data['oneOrTwo'] is 1 and data['handRecognized']]
print sorted(selected_signs)
Esempio n. 19
0
app = Flask(__name__)
utils = Application()


@app.route("/")
def main():
    return "Welcome!"


@app.route("/getCityHotels", methods=['POST'])
@check_rate_limit('api_key')
def get_hotels():
    try:
        request_body = json.loads(request.data)
        city_id = request_body.get('city_id')
        sort_type_value = request_body.get('sort_type')
        meta_data = utils.get_city_hotels(city_id, sort_type=sort_type_value)
        format_data = utils.format_data(meta_data, config.CSV_DATA.headers)
        response = json.dumps(format_data)
        return response
    except KeyError:
        response = "Key not found for {}".format(city_id)
        abort(make_response(str(response), 404))
    except Exception as e:
        response = e.args
        abort(make_response(str(response), 400))


if __name__ == "__main__":
    config.CSV_DATA = CSVReader()  # reads CSV while while starting the server
    app.run()
Esempio n. 20
0
from data_reconfiguration import reconfigure_contestants_data as rc
from data_reconfiguration import reconfigure_question_data as rcq
from data_reconfiguration import reconfigure_game_contestant_location_data as rc_game_player_loc

if __name__ == '__main__':
    # Parse command line arguments
    input_config, output_config = ap.argument_parser()

    # Retrieve csv locations from the config
    contestants_csv_location = input_config.get('files', 'contestants')
    game_contestant_csv_location = input_config.get('files', 'locations')
    questions_csv_location = input_config.get('files', 'questions')
    trend_csv_location = input_config.get('files', 'trend')
    final_results_csv_location = input_config.get('files', 'final_results')

    # Request data-frame from CSVs
    contestants_df = CSVReader.get_dataframe(contestants_csv_location)
    game_contestant_df = CSVReader.get_dataframe(game_contestant_csv_location)
    questions_df = CSVReader.get_dataframe(questions_csv_location)
    trend_df = CSVReader.get_dataframe(trend_csv_location)
    final_results_df = CSVReader.get_dataframe(final_results_csv_location)

    # Reconfiguration
    rc.generate_contestant_and_occupation(contestants_df, input_config,
                                          output_config)
    df_player_loc = rc_game_player_loc.generate_sql_statements(
        game_contestant_df, input_config, output_config, rc)

    # Reconfiguration of questions
    rcq.generate_sql_statements(questions_df, trend_df, final_results_df,
                                df_player_loc, input_config, output_config)
Esempio n. 21
0
def convert_data(rows):
    def convert(t):
        return datetime.strptime(t, '%I:%M%p').strftime('%H:%M')

    start_of_day = '09:00'
    end_of_day = '17:00'
    array = []
    for row in rows:
        current_row = []
        for column in row:
            if column != '':
                column = convert(column)
            current_row.append(column)
        if '' in current_row:
            if current_row[0] == '':
                end_of_day = current_row[1]
            else:
                start_of_day = current_row[0]
        else:
            array.append(current_row)
    return start_of_day, end_of_day, array


data = CSVReader('times')
start, end, data = convert_data(data)
meetings = Meetings(start, end, data)
print(meetings)
conflicts, out_of_hours = meetings.report_anomalies()
print('conflicting:', conflicts)
print('out of hours:', out_of_hours)
Esempio n. 22
0
from tensorflow.keras.models import load_model, Sequential, Model
from models import Models
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
from text_preprocessing import TextPreprocessing
import json
import os
from csv_reader import CSVReader
from datetime import datetime
from tensorflow.keras import backend as K
from sklearn.metrics import classification_report
textPreProcessing = TextPreprocessing()
if __name__ == "__main__":
    embeddings = np.load('text_embedding.npy', allow_pickle=True)
    texts = []
    sentiments = []
    good = CSVReader.dataframe_from_txt("WordNetAffectEmotionLists/joy.txt")
    good = good.texts
    texts = texts = np.append(texts, np.array(good))
    sentiments = np.append(sentiments, [1] * len(good))
    surprise = CSVReader.dataframe_from_txt(
        "WordNetAffectEmotionLists/surprise.txt")
    surprise = surprise.texts
    texts = np.append(texts, np.array(surprise))
    sentiments = np.append(sentiments, [2] * len(surprise))
    sad = CSVReader.dataframe_from_txt("WordNetAffectEmotionLists/sadness.txt")
    sad = sad.texts
    texts = np.append(texts, np.array(sad))
    sentiments = np.append(sentiments, [-1] * len(sad))
    fear = CSVReader.dataframe_from_txt("WordNetAffectEmotionLists/fear.txt")
    fear = fear.texts
    texts = np.append(texts, np.array(fear))
Esempio n. 23
0
from autoencoder import TimeDistributedAutoEncoder
from csv_reader import CSVReader
from data_generator import DataGenerator

if __name__ == "__main__":
    conf = docopt(__doc__, version='LSTM Anomaly Detector 0.1')
    ae = TimeDistributedAutoEncoder(conf)
    p = Plot(conf, ae)

    # determine whether to pull in fake data or a csv file
    if conf['synthetic']:
        #print 'generating synthetic data....'
        source = DataGenerator(conf, p)
    else:
        #print 'reading from csv file...'
        source = CSVReader(conf, p)

    # pull in the data
    (x_train, y_train), (x_test, y_test) = source.split_data()
    #print 'X_train.shape %s | Y_train.shape: %s' % (x_train.shape, y_train.shape)
    #print 'X_test.shape %s  | Y_test.shape: %s' % (x_test.shape, y_test.shape)

    # build an LSTM or a regular autoencoder
    #print 'building %s autoencoder...' % conf['--model_type']

    # Add the required layers
    model_type = conf['--model_type'].strip().lower()
    if model_type == 'lstm':
        # Deep autoencoder
        # ae.add_lstm_autoencoder([int(conf['--input_dim']), int(conf['--hidden_dim'])
        #                         , int(conf['--hidden_dim'])/2, int(conf['--hidden_dim']) / 4]
Esempio n. 24
0
import sys
from csv_reader import CSVReader
from csv_writer import CSVWriter
from trustpilot_url_encrypt import TrustPilotURLEncryption
from config import Config

if len(sys.argv) < 2:
    print('Requires at least one filename')
    exit()

for filename in sys.argv[1:]:
    input_file = filename
    output_file = filename.replace('.csv', '_links.csv')
    print('reading', input_file)
    config = Config('config.yaml')
    reader = CSVReader(input_file)
    writer = CSVWriter(output_file)
    # write the header only once in the output file
    write_header = True
    # go through the source csv
    for row in reader():
        # build the record for trust pilot we get something like
        record = {}
        for field in ('name', 'email', 'ref'):
            record[field] = row[config['fields'][field]]
        url = TrustPilotURLEncryption.encrypt(record)
        new_row = []
        for key in reader.header:
            new_row.append(row[key])
        new_row.append(url)
        if write_header:
Esempio n. 25
0
from linear_regression import LinearRegression
from train_test_split import TrainTestSplit
import matplotlib.pyplot as plt
from csv_reader import CSVReader

f = CSVReader()
f.read(csv="autos_prepared.csv")

x_train, x_test, y_train, y_test = TrainTestSplit().split(x=f.data["powerPS"],
                                                          y=f.data["price"])

model = LinearRegression()
model.train(x=x_train, y=y_train)

print("Fehler              |", model.error)
print("Durchschnittsfehler |", model.avg_error)
print("a                   |", model.a)
print("b                   |", model.b)
print("Bestimmtheitsmaß    |", model.score(xs=x_test, ys=y_test))

predicted = model.predict(x_test)

plt.scatter(x_train, y_train)
plt.scatter(x_test, y_test, color="green")
plt.plot(x_test, predicted, color="red")
plt.show()
Esempio n. 26
0
import sys
from os import listdir
from os.path import join, dirname, realpath, isdir
import json
from csv_reader import CSVReader
from body_box_extractor import BodyBoxExtractor

ROOT_DIRECTORY = join(dirname(realpath(sys.argv[0])), '..', '..', 'res')
data_directories = [
    f for f in listdir(ROOT_DIRECTORY) if isdir(join(ROOT_DIRECTORY, f))
]
csv_reader = CSVReader(ROOT_DIRECTORY)
body_box_extractor = BodyBoxExtractor('', None)
check_results = {}

for data_directory in data_directories:
    try:
        type_json_file = open(join(ROOT_DIRECTORY, data_directory,
                                   'type.json'))
        type_data = json.load(type_json_file)

        csv_reader.set_file(join(data_directory, data_directory + '.csv'))
        skeleton_data = {'raw_data': csv_reader.read()}
        data = body_box_extractor.process(skeleton_data)

        check_results[data_directory] = type_data
        check_results[data_directory]['has_shoulder'] = all(
            abs(data['body_box']['shoulder_left']['3d']['mean']) > 0.001
        ) and all(
            abs(data['body_box']['shoulder_left']['3d']['mean']) > 0.001)
        check_results[data_directory]['has_hip'] = all(