Example #1
0
 def test_decrypt_files(self):
     sleep(1)
     P = PyHasher(PATH=getcwd(), pattern="**/TU/_CRYPT_*.txt", passw=_TEST_PASS, salt=_TEST_SALT, mode="decrypt")
     P.routine()
     B = Aggregator(pattern="**/TU/*DECRYPTED_1__CRYPT_1*.txt", path_to_files=getcwd())
     C = Aggregator(pattern="**/TU/test_file.txt", path_to_files=getcwd())
     self.assertEqual("_DECRYPTED_1__CRYPT_1_test_file.txt" in B.matching_list[0], second=True)
     with open(B.matching_list[0], "r") as b:
         with open(C.matching_list[0], 'r') as c:
             self.assertEqual(b.read(), c.read())
             c.close()
         b.close()
Example #2
0
    def test_get_users(self):
        gm_client = gearman.GearmanClient(['localhost:4730'])
        agg = Aggregator(gm_client, None)

        user_data = agg.get_users()
        for i in range(len(user_data)):
            del user_data[i]['_id']
            if 'words' in user_data[i]:
                del user_data[i]['words']

        expected_response = [
            {
                u'username': u'iandioch',
                u'subscribed_feeds': [
                    u'https://news.ycombinator.com/rss'
                    ],
            },
            {
                u'username': u'sully',
                u'subscribed_feeds': [
                    u'https://news.ycombinator.com/rss',
                    u'http://spritesmods.com/rss.php',
                    u'http://dave.cheney.net/feed'
                    ],
            },
            {
                u'username': u'theotherguys',
                u'subscribed_feeds': [
                    u'https://news.ycombinator.com/rss',
                    u'http://spritesmods.com/rss.php'
                    ],
            }]

        self.assertEqual(user_data, expected_response)
Example #3
0
def test_find_near():
    db = DB()
    aggregator = Aggregator(db)
    car_id_1 = aggregator.login_car()
    car_id_2 = aggregator.login_car()
    car_id_3 = aggregator.login_car()
    car_1 = db.get_car(car_id_1)
    car_2 = db.get_car(car_id_2)
    car_3 = db.get_car(car_id_3)

    aggregator.new_position_car(car_id_1, 10, 10)
    aggregator.new_position_car(car_id_2, 20, 20)
    aggregator.new_position_car(car_id_3, 30, 30)

    assert set(db.find_near(Point(5, 5), 2)) == set([car_1, car_2])

    aggregator.logout_car(car_id_1, "Test reason")
    aggregator.logout_car(car_id_2, "Test reason")
    aggregator.logout_car(car_id_3, "Test reason")
    with pytest.raises(CarNotFound):
        db.get_car(car_id_1)
    with pytest.raises(CarNotFound):
        db.get_car(car_id_2)
    with pytest.raises(CarNotFound):
        db.get_car(car_id_3)
Example #4
0
    def test_put_g2g(self):
        gm_client = gearman.GearmanClient(['localhost:4730'])
        agg = Aggregator(gm_client, None)

#        add_request = bson.BSON.encode({
#            'database':'feedlark',
#            'collecion':'g2g',
#            'data':{
#                'username':'******',
#                'test_parameter':'NOLO'
#                }
#            })
#        gm_client.submit_job('db-add',str(add_request))

        test_document = {
            'username': '******',
            'test_parameter': 'YOLO',
            }
        agg.put_g2g('iandioch', test_document)

        get_request = bson.BSON.encode({
            'database': 'feedlark',
            'collection': 'g2g',
            'query': {
                'username': '******',
                },
            'projection': {
                'test_parameter': 1,
                },
            })
        g2g_data = gm_client.submit_job('db-get', str(get_request)).result
        self.assertEqual(
            bson.BSON(g2g_data).decode()['docs'][0]['test_parameter'], 'YOLO')
Example #5
0
    def group_by(self, time_unit):
        """
        Get results by time slice ('days', 'hours', 'minutes', 'seconds').
        Returns a generator of times and tweet-generators, like this:

        Example:
        ########
        for time, tweets in collection.group_by('minutes'):
            for tweet in tweets:
                # do something

        ---------------------------------------------------

        Also supports top_x() methods like this:

        Example:
        ########

        collection.since(datetime(2015,6,1)).group_by('days').top_user_locations(n=5)
        >
        #            London  London, UK  Manchester  Scotland  UK
        # 2015-06-1       4           2           1         1   2
        # 2015-06-2      11           4           9         3   3
        # 2015-06-3      14           1           5       NaN   4
        # 2015-06-4      17           1           5         1   6
        # 2015-06-5      10           3           3         3   3
        """
        return Aggregator(self, time_unit=time_unit)
Example #6
0
 def __init__(self):
     self.aggregator = Aggregator()  # to retrive server logs
     self.server_set = set(util.retrieve_server_list())  # [server_ip, ]
     self.content_set = set()
     self.replica_map = {}  # { file_uuid: servers that store the file }
     self.replication_task = []
     self.last_timestamp = 0  # the time stamp of last update
Example #7
0
def test_car_login_logout():
    db = DB()
    aggregator = Aggregator(db)
    car_id = aggregator.login_car()
    car = db.get_car(car_id)
    aggregator.logout_car(car_id, "Test reason")
    with pytest.raises(CarNotFound):
        db.get_car(car_id)
Example #8
0
 def __init__(self, dataset):
     self.aggregator = Aggregator()
     self.dataset = dataset
     self.target_index, target = self.__choose_random_sample(dataset)
     self.target = copy.deepcopy(target)
     #self.target.set_mesh_and_parts_colour([1, 0, 0])
     self.set_colour_coded_sources(
         dataset)  ############# ADD TO RESET TARGET?? ##############
Example #9
0
def main():
    initialize_log()
    logging.info("Starting aggregator")
    config_params = parse_config_params()
    aggregator = Aggregator(config_params['source_queue'],
                            config_params['reducer_queue'],
                            config_params['key'])
    aggregator.start()
Example #10
0
 def run_experiment(self):
     with open('{}/anomaly.json'.format(DATA_FOLDER), 'r') as f:
         an_data = json.load(f)
         agg = Aggregator(an_data)
         incidents, relevance = agg.build_incidents_report()
         metrics_df = pd.read_csv(
             '{}/metrics_0_filter.csv'.format(DATA_FOLDER))
         for key, item in incidents.items():
             self.__do_report(metrics_df, an_data, key, item)
Example #11
0
 def __init__(self, **kwargs):
     self.reportData = None
     #insert your model class names here. Replace 'DummyModel' with your ai model class name
     self.models_aggregator = Aggregator(
         [nnModel.NNModel, foodRandomForest.FoodRandomForest],
         [[
             'nnmodels/nnmodel0.pkl', 'nnmodels/nnmodel1.pkl',
             'nnmodels/nnmodel2.pkl', 'nnmodels/nnmodel3.pkl',
             'nnmodels/nnmodel4.pkl', 'nnmodels/nnmodel5.pkl',
             'nnmodels/nnmodel6.pkl', 'nnmodels/nnmodel7.pkl',
             'nnmodels/nnmodel8.pkl', 'nnmodels/nnmodel9.pkl',
             'nnmodels/nnmodel10.pkl', 'nnmodels/nnmodel11.pkl',
             'nnmodels/nnmodel12.pkl', 'nnmodels/nnmodel13.pkl',
             'nnmodels/nnmodel14.pkl', 'nnmodels/nnmodel15.pkl',
             'nnmodels/nnmodel16.pkl', 'nnmodels/nnmodel17.pkl',
             'nnmodels/nnmodel18.pkl', 'nnmodels/nnmodel19.pkl',
             'nnmodels/nnmodel20.pkl', 'nnmodels/nnmodel21.pkl',
             'nnmodels/nnmodel22.pkl', 'nnmodels/nnmodel23.pkl',
             'nnmodels/nnmodel24.pkl', 'nnmodels/nnmodel25.pkl',
             'nnmodels/nnmodel26.pkl', 'nnmodels/nnmodel27.pkl',
             'nnmodels/nnmodel28.pkl', 'nnmodels/nnmodel29.pkl',
             'nnmodels/nnmodel30.pkl'
         ],
          [
              'rf-models/randomforest0.joblib',
              'rf-models/randomforest1.joblib',
              'rf-models/randomforest2.joblib',
              'rf-models/randomforest3.joblib',
              'rf-models/randomforest4.joblib',
              'rf-models/randomforest5.joblib',
              'rf-models/randomforest6.joblib',
              'rf-models/randomforest7.joblib',
              'rf-models/randomforest8.joblib',
              'rf-models/randomforest9.joblib',
              'rf-models/randomforest10.joblib',
              'rf-models/randomforest11.joblib',
              'rf-models/randomforest12.joblib',
              'rf-models/randomforest13.joblib',
              'rf-models/randomforest14.joblib',
              'rf-models/randomforest15.joblib',
              'rf-models/randomforest16.joblib',
              'rf-models/randomforest17.joblib',
              'rf-models/randomforest18.joblib',
              'rf-models/randomforest19.joblib',
              'rf-models/randomforest20.joblib',
              'rf-models/randomforest21.joblib',
              'rf-models/randomforest22.joblib',
              'rf-models/randomforest23.joblib',
              'rf-models/randomforest24.joblib',
              'rf-models/randomforest25.joblib',
              'rf-models/randomforest26.joblib',
              'rf-models/randomforest27.joblib',
              'rf-models/randomforest28.joblib',
              'rf-models/randomforest29.joblib'
          ]])
     super(UltraProcessedFoodApp, self).__init__(**kwargs)
Example #12
0
async def update_mocks():
    directory = Path(__file__).parent
    aggr = Aggregator(rapidapi_key=RAPIDAPI_KEY)

    for source in aggr._sources:
        source_name = type(source).__name__
        file_path = directory / f'mock_response_{source_name}.txt'
        response = await source.load_data()
        with file_path.open('w') as file:
            file.write(response)
Example #13
0
 def __init__(self):
     self.aggregator = Aggregator()  # to retrive server logs
     self.client_set = set([])  # [client_ip, ]
     self.server_set = set(util.retrieve_server_list())  # [server_ip, ]
     self.content_set = set([])  # [uuid, ]
     self.access_map = {}  # {uuid: {client_ip: num_request}}
     self.replica_map = {}  # {uuid: {server_ip: num_replica}}
     self.last_timestamp = 0  # the timestamp of last update
     self.requests_per_replica = 3
     self.uuid_to_server = None
Example #14
0
async def test_update():
    aggr = Aggregator()
    await aggr.update()
    aggr._rapidapi.update.assert_called_once()
    aggr._github.update.assert_called_once()
    aggr._stopcorona.update.assert_called_once()

    with patch.object(RapidapiSource, 'update', _async_raise(ReadTimeout)):
        aggr = Aggregator()
        await aggr.update()
        logging.error.assert_called_with('ReadTimeout in source update')

    with patch.object(GithubSource, 'update', _async_raise(ConnectTimeout)):
        aggr = Aggregator()
        await aggr.update()
        logging.error.assert_called_with('ConnectTimeout in source update')

    with patch.object(GithubSource, 'update', _async_raise(ZeroDivisionError)):
        aggr = Aggregator()
        await aggr.update()
        logging.exception.assert_called_once()
Example #15
0
    def __init__(self, line, debug):
        reader = EffectReader(line)
        # reader = TraceReader(line)
        reader.parse_trace()
        self.code_size = len(reader.code)

        signal.signal(signal.SIGALRM, handler)
        signal.alarm(15)
        # print(reader.signature)
        optimizer = Aggregator(reader.code)
        ExpressionExecutor(reader, optimizer, debug)
        signal.alarm(0)
Example #16
0
def main():
    set_logging()

    bot = aiogram.Bot(BOT_TOKEN)
    dispatcher = aiogram.Dispatcher(bot)

    aggregator = Aggregator(rapidapi_key=RAPIDAPI_KEY)
    asyncio.run(aggregator.load_sources())
    dispatcher.loop.create_task(aggregator.update_periodically())

    communicator = Communicator(aggregator)
    register_handlers(communicator, dispatcher)

    run(dispatcher)
Example #17
0
def main() -> None:
    args_parser = argparse.ArgumentParser()
    args_parser.add_argument("--cache-dir",
                             dest="cache_dir",
                             metavar="DIR",
                             type=str,
                             default=".cache")
    args_parser.add_argument("--export-dir",
                             dest="export_dir",
                             metavar="DIR",
                             type=str,
                             default=".export")
    args_parser.add_argument(
        "--templates-dir",
        dest="templates_dir",
        metavar="DIR",
        type=str,
        default="templates",
    )
    args_parser.add_argument("--base-url",
                             dest="base_url",
                             metavar="URL",
                             type=str)
    args_parser.add_argument(
        "--podcasts-json",
        dest="podcasts_json",
        metavar="FILE",
        type=str,
        default="podcasts.json",
    )
    args_parser.add_argument("--clear-cache",
                             dest="clear_cache",
                             action="store_true")
    args_parser.add_argument("--keep-feeds",
                             dest="keep_feeds",
                             action="store_true")
    args = args_parser.parse_args()

    a = Aggregator(
        podcasts_json=args.podcasts_json,
        cache_dir=args.cache_dir,
        export_dir=args.export_dir,
        templates_dir=args.templates_dir,
        base_url=args.base_url if args.base_url else "file://{}".format(
            os.path.abspath(args.export_dir)),
    )
    if args.clear_cache:
        a.clear_cache()
    a.sync(keep_feeds=args.keep_feeds)
    a.export()
Example #18
0
def main():
    nasdaq = StockMarket("NASDAQ", ["AAPL", "CSCO", "MSFT", "GOOG"])
    newyork = StockMarket("NYSE", ["IBM", "HPQ", "BP"])

    agg = Aggregator()
    agg.add_market(nasdaq)
    agg.add_symbols(nasdaq.symbols)
    agg.add_market(newyork)
    agg.add_symbols(newyork.symbols)
    print("aggregated symbols:", agg.symbols)

    view = Viewer()
    view.aggregator(agg, ["IBM", "AAPL", "MSFT"])
    view.print_quotes()
Example #19
0
    def run_experiment(self):
        agg = Aggregator(255, 10)
        with open('{}/anomaly.json'.format(DATA_FOLDER), 'r') as f:
            an_data = json.load(f)
        incidents, relevance = agg.build_incidents_report(an_data)

        metrics_df = pd.read_csv('{}/metrics_0_filter.csv'.format(DATA_FOLDER))
        for key, item in incidents.items():
            image_file = '{}_viz.png'.format(key)
            visualisation = VisualizeReports(metrics_df, an_data, item)
            visualisation.visualize_with_siblings('{}/{}'.format(SAMPLES_FOLDER, image_file))

            self.__upload_file('{}/{}'.format(SAMPLES_FOLDER, image_file), image_file)
            self.__run_incident_report_buttons(key, image_file)
Example #20
0
    def go(b1, b2, b3, p, output):
        """
        Launches the triangulation for the given input file and writes result in new CSV
        :return:
        """
        tab = Aggregator().aggregate(b1, b2, b3, p)

        with open(output, 'w') as csvfile:
            csv_writer = csv.writer(csvfile,
                                    delimiter=',',
                                    quotechar='|',
                                    quoting=csv.QUOTE_MINIMAL)
            for row in tab:
                csv_writer.writerow(row)

        return 0
    def __init__(self, args, n_entity, n_relation,adj_entity,adj_relation): # 初始化使用 args n_entity n_relation
        super(RippleNet, self).__init__()

        self._parse_args(args, n_entity, n_relation)

        self.entity_emb = nn.Embedding(self.n_entity, self.dim)
        self.relation_emb = nn.Embedding(self.n_relation, self.dim * self.dim)
        self.relation_emb_GCN = nn.Embedding(self.n_relation,self.dim)
        self.transform_matrix = nn.Linear(self.dim, self.dim, bias=False)
        self.criterion = nn.BCELoss()
        self.transformerEncoderLayer = nn.TransformerEncoderLayer(d_model=self.dim, nhead=self.n_head, dim_feedforward=self.feed_f_dim)
        self.transformerEncoder = nn.TransformerEncoder(num_layers=1, encoder_layer=self.transformerEncoderLayer)
        self.pooling = nn.AvgPool2d
        self.linear = nn.Linear(in_features=2 * self.dim, out_features=self.dim)
        self.adj_entity = adj_entity
        self.adj_relation = adj_relation
        self.aggregator = Aggregator(self.batch_size, self.dim, args.aggregator)
Example #22
0
def main():
    nasdaq=StockMarket("NASDAQ", ["AAPL", "CSCO", "MSFT", "GOOG"])
    newyork=StockMarket("NYSE", ["IBM", "HPQ", "BP"])

    agg=Aggregator()
    agg.add_symbols(nasdaq.symbols())
    agg.add_symbols(newyork.symbols())
    print("aggregated symbols:", agg.available_symbols())

    nasdaq.listener(agg)
    newyork.listener(agg)

    view=Viewer()
    agg.view(view, ["IBM", "AAPL", "MSFT"])
    print("")
    while True:
        nasdaq.generate()
        newyork.generate()
        time.sleep(0.5)
Example #23
0
    def __init__(self, num_user, num_ent, num_rel, kg, args, device):
        super(KGCN, self).__init__()
        self.num_user = num_user
        self.num_ent = num_ent
        self.num_rel = num_rel
        self.n_iter = args.n_iter
        self.batch_size = args.batch_size
        self.dim = args.dim
        self.n_neighbor = args.neighbor_sample_size
        self.kg = kg
        self.device = device
        self.aggregator = Aggregator(self.batch_size, self.dim,
                                     args.aggregator)

        self._gen_adj()

        self.usr = torch.nn.Embedding(num_user, args.dim)
        self.ent = torch.nn.Embedding(num_ent, args.dim)
        self.rel = torch.nn.Embedding(num_rel, args.dim)
Example #24
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config['result_dir'], config['checkpoint_dir'], config['checkpoint_dir_lstm']])
    # save the config in a txt file
    save_config(config)
    # create tensorflow session
    sessions = []
    data = []
    model_vaes = []
    vae_trainers = []
    lstm_models = []
    model_vae_global = VAEmodel(config, "Global")
    sess_global = tf.Session(config=tf.ConfigProto())
    for i in range(1, 10):
        sess = tf.Session(config=tf.ConfigProto())
        sessions.append(sess)
        data.append(generator_fl(config, i))
        model_vaes.append(VAEmodel(config, "Client{}".format(i)))
        model_vaes[-1].load(sessions[-1])
        vae_trainers.append(vaeTrainer(sessions[-1], model_vaes[-1], data[-1], config))
        lstm_models.append(lstmKerasModel("Client{}".format(i), config))
    model_vae_global.load(sess_global)
    trainer_vae_global = vaeTrainer(sess_global, model_vae_global, data[0], config)
    lstm_model_global = lstmKerasModel("Global", config)
    client_weights = [0.1] * 8
    client_weights.append(0.2)
    aggregator = Aggregator(vae_trainers, trainer_vae_global, lstm_models, lstm_model_global, config, client_weights)
    aggregator.aggregate_vae()
    aggregator.aggregate_lstm()
 def test_check_operator(self):
     """
     This test is not working because the Exception is not caught by the context : TO BE CORRECTED
     :return: None
     """
     with self.assertRaises(ValueError): Aggregator(self.comp_parent, [self.comp_1, self.comp_3], 'Invalid_Code')
 def test_div(self):
     agg = Aggregator(self.comp_parent, [self.comp_1, self.comp_3], Operator.DIV)
     agg.aggregate()
     self.assertEqual(self.comp_parent.data.equals(pd.Series([0.5, 1, 1.5, 2, 2.5], index=self.idx_1)), True)
 def test_mul(self):
     agg = Aggregator(self.comp_parent, [self.comp_1, self.comp_2, self.comp_3], Operator.MULT)
     agg.aggregate()
     self.assertEqual(self.comp_parent.data.equals(pd.Series([10, 16, 18, 16, 10], index=self.idx_1)), True)
 def test_sub(self):
     agg = Aggregator(self.comp_parent, [self.comp_2, self.comp_3], Operator.SUB)
     agg.aggregate()
     self.assertEqual(self.comp_parent.data.equals(pd.Series([3, 2, 1, 0, -1], index=self.idx_1)), True)
 def test_add(self):
     print("Operator est de type: ", type(Operator))
     agg = Aggregator(self.comp_parent, [self.comp_1, self.comp_2, self.comp_3], Operator.ADD)
     agg.aggregate()
     self.assertEqual(self.comp_parent.data.equals(pd.Series([8, 8, 8, 8, 8], index=self.idx_1)), True)
Example #30
0
from flask import Flask
from aggregator import Aggregator
import asyncio
import websockets
from flask_cors import CORS
from flask_socketio import SocketIO, send, emit
from reddit_client import RedditClient

PERIOD = 5
TOP = 50

app = Flask(__name__)
socketio = SocketIO(app, cors_allowed_origins="*")
CORS(app)
aggregator = Aggregator(PERIOD, TOP)
client = RedditClient(aggregator, socketio)
client.start_streaming()


@socketio.on('test')
def handle_message(data):
    emit("res", "from the server")


if __name__ == '__main__':
    socketio.run(app)