Example #1
0
def get_aucs(data_list):

    START = time.time()
    FILE = 'output/{}-{}-epochs-{}-avg.csv'.format(SLURM_JOB_ID,
                                                   FLAGS.iterations,
                                                   NUM_TO_AVG)

    settings = {
        'data_name': None,
        'alpha': None,
        'iterations': FLAGS.iterations,
        'model': model
    }

    with open(FILE, 'a') as f:
        w = csv.writer(f)
        for dataname, alpha in itertools.product(data_list, alphas):
            settings['data_name'] = dataname
            settings['alpha'] = alpha
            for _ in range(NUM_TO_AVG):
                runner = AnomalyDetectionRunner(settings)
                exc_info = None
                try:
                    r = runner.erun()
                    r.append(T(START))
                    w.writerow(r)
                    f.flush()
                except Exception as e:
                    exc_info = sys.exc_info()
                finally:
                    if exc_info:
                        traceback.print_exception(*exc_info)
                        del exc_info
Example #2
0
flags.DEFINE_float('weight_decay', 0.,
                   'Weight for L2 loss on embedding matrix.')
flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).')
flags.DEFINE_integer('features', 1, 'Whether to use features (1) or not (0).')
flags.DEFINE_integer('seed', 50, 'seed for fixing the results.')
flags.DEFINE_integer('iterations', 300, 'number of iterations.')
flags.DEFINE_float('alpha', 0.8, 'balance parameter')
'''
We did not set any seed when we conducted the experiments described in the paper;
We set a seed here to steadily reveal better performance of ARGA
'''
seed = 7
np.random.seed(seed)
tf.set_random_seed(seed)

data_list = ['twitter', 'BlogCatalog', 'Amazon']
dataname = data_list[0]
model = 'gcn_ae'
task = 'anomaly_detection'
settings = {
    'data_name': dataname,
    'iterations': FLAGS.iterations,
    'model': model
}

runner = None
if task == 'anomaly_detection':
    runner = AnomalyDetectionRunner(settings)

runner.erun()
Example #3
0
                    settings = {
                        'data_name': dataset_str,
                        'iterations': FLAGS.iterations,
                        'model': model,
                        'decoder_act': decoder_act
                    }

                    results_dir = os.path.sep.join(
                        ['results', dataset_str, task, model])
                    log_dir = os.path.sep.join([
                        'logs', dataset_str, task, model,
                        '{}_{}_{}'.format(eta, theta, alpha)
                    ])

                    if not os.path.exists(results_dir):
                        os.makedirs(results_dir)

                    if not os.path.exists(log_dir):
                        os.makedirs(log_dir)

                    file2print = '{}/{}_{}_{}_{}_{}.json'.format(
                        results_dir, dataset_str, eta, theta, alpha, embed_dim)

                    runner = None
                    if task == 'anomaly_detection':
                        runner = AnomalyDetectionRunner(settings, k)

                    writer = SummaryWriter(log_dir)

                    runner.erun(writer)