예제 #1
0
def generate_header(module):
    if not Args().skip_header_generation:
        text = json.dumps(HeaderGenerator().visit(module), separators=(',', ':'))
        with open(Args().get_header_path(), 'w') as file:
            file.write(text)

    return handle_next_stage(module, generate_code)
예제 #2
0
    def test_header_inclusion(self):
        self.reset_module()

        a_code = '''module a
        
                    type a<`a, `b> = {
                        A,
                        B,
                        C = `a * `b,
                        D = a<`a, a<bool, string>> * `b
                    }
                    
                    let foo = fun(a, b) -> { a + b }
                    let bar = fun(a, b) -> { foo(a, b) != 0 }
                    let baz = fun(f, x, y) -> { f(x) = y } '''
        b_code = '''module b

                    type b<`a> = {
                        A,
                        B = `a * int
                    }

                    let foo = fun(a, b) -> { a = b }
                    let bar = fun(a) -> { foo(a, 0) }
                    let baz = fun(f, g, x) -> { f(g(x)) }'''
        c_code = '''module c

                    import "a"
                    open "b"

                    let test1 = fun(a, b, c) -> { foo(a.baz(bar, a, b), c) }
                    let test2 = baz
                    let test3 = a.bar'''

        Args().source = 'a.tml'
        parse_source_code(a_code)
        self.reset_module()

        Args().source = 'b.tml'
        parse_source_code(b_code)
        self.reset_module()

        Args().source = 'c.tml'
        Args().skip_header_generation = True
        parse_source_code(c_code)

        assert_let_types(
            self, {
                'test1':
                fun_type([t_int, t_bool, t_bool], t_bool),
                'test2':
                fun_type([fun_type([t_a], t_b),
                          fun_type([t_c], t_a), t_c], t_b),
                'test3':
                fun_type([t_int, t_int], t_bool)
            })
예제 #3
0
    def test_header_to_json_and_back(self):
        Args().skip_header_generation = True

        code = '''  module test
        
                    type list<`a> = {
                       Empty,
                       Cons = `a * list<`a>
                    }
                    
                    type a<`x, `y> = {
                       B = ref<ref<ref<`x>>> * `y * `x * ref<ref<ref<`y>>>,
                       A = a<bool, a<int, float>>
                    }
                    
                    let a: `a -> `a -> bool = fun(a, b) -> {True}
                    let b = fun(f, x, y) -> {f(x) = y}
                    let c: string -> a<int, string> -> unit = fun(p, q) -> {()}
                    let d = fun() -> { let a = 4; a + 4 }
                    
                    let foo = fun(a, b) -> { a + b }
                    let bar = fun(a, b) -> { foo(a, b) != 0 }
                    let baz = fun(f, x, y) -> { f(x) = y }'''

        parse_source_code(code)
        dic = HeaderGenerator().visit(GlobalModule())
        module2 = HeaderReader().read_module(dic)
        dic2 = HeaderGenerator().visit(module2)

        self.assertEqual(json.dumps(dic), json.dumps(dic2))
예제 #4
0
def infer_types(module):
    if Args().stop_before_type_inferring:
        print(GlobalTypeInferer().dump())
        exit(0)

    GlobalTypeInferer().infer()
    return handle_next_stage(module, generate_header)
def main():
    args = Args().get_args()
    kwargs = vars(args)
    checkpoint = torch.load(args.checkpoint)
    base_classifier = get_architecture(checkpoint["arch"], args.dataset)
    base_classifier.load_state_dict(checkpoint['state_dict'])

    attacker = SmoothAttack(base_classifier)
    smoothed_classifier = Smooth(base_classifier,
                                 get_num_classes(args.dataset), args.sigma)

    dataset = get_dataset(args.dataset, 'test')
    average_nat = []
    average_adv = []

    j_header('index', 'nat_y', 'adv_y', 'nat_rad', 'adv_rad', 'success')
    figure = FigureSaver()
    for i in range(0, len(dataset), args.skip):
        (x, label) = dataset[i]
        x = x.cuda()
        first_x = x.data

        nat_pred, nat_rad = smoothed_classifier.certify(
            x, args.N0, args.N, args.alpha, args.batch)
        if nat_pred is -1:
            continue
        if args.dataset == DATASETS[0]:  # ImageNet
            targets = [j for j in range(0, 1000, 100) if j is not label]
        else:
            targets = [j for j in range(10) if j is not label]
        best_rad = -10.0
        best_image = None
        best_target = -1

        for target in targets:
            adv_x = attacker.perturb(x=first_x, y=target, **kwargs)
            # If you want to do wasserstein attack, uncomment the following and change the attacker to wasserstein
            # adv_x = attacker.perturb(x=first_x, y=target, eps=args.sigma, steps=args.steps, batch=args.batch)
            adv_pred, adv_rad = smoothed_classifier.certify(
                adv_x, args.N0, 2 * args.N0, args.alpha, args.batch)
            adv_suc = (adv_pred != label) and (adv_pred != -1) and (nat_pred !=
                                                                    -1)
            adv_rad = adv_rad if adv_suc else -adv_rad

            if adv_rad > best_rad:
                best_rad = adv_rad
                best_image = adv_x.data
                best_target = target

        figure.save(best_image, i, 'best={}'.format(best_target))
        figure.save(first_x, i, 'natural')
        best_pred, best_rad = smoothed_classifier.certify(
            best_image, args.N0, args.N, args.alpha, args.batch)
        j_print(i, label, best_target, nat_rad, best_rad)
        average_adv.append(best_rad)
        average_nat.append(nat_rad)
    average_nat = np.array(average_nat)
    average_adv = np.array(average_adv)
    print('Average nat radii {}, Average adv radii {}'.format(
        average_nat.mean(), average_adv.mean()))
예제 #6
0
def run_test_code(self, source_path: str, expect_fail: bool):
    args = Args()
    self.init_args(args)
    args.source_path = source_path
    args.expect_fail = expect_fail
    io = TestIo()
    run(args, io)
예제 #7
0
def parse_source_code(text: str):
    ast = parser.parse(text, tracking=True)

    if Args().stop_after_parsing and Errors().is_ok():
        print(json.dumps(AstToDictVisitor().visit(ast)))
        exit(0)

    return handle_next_stage(ast, visit_ast)
def get_exp_name() -> str:
    args = Args()
    name_list = [str(get_exp_no()), _time()]
    exp_name = args.get_args().experiment
    if str(exp_name) is not 'None':
        name_list.append(exp_name)
    name_list.append(args.get_name())
    return '_'.join(name_list)
예제 #9
0
def main():
    time1 = time.time()
    print('Start:',
          time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(time1)))
    args = Args()
    print(args)
    '''
    Ler arquivo args.reference_output
    searchLevel (achar abaixo e acima de Fermi)
    diferenca entre abaixo e acima (gap ref)
    '''
    below, above = utils.search_level(args.reference_output)
    ref_gap = abs(above - below)
    print(ref_gap)

    particle = utils.read_particle(args.particle_path)
    print('>' + str(particle).strip() + '<')

    print(args.specie_type_1)
    print(args.specie_nat_1)

    print(args.specie_type_2)
    print(args.specie_nat_2)

    #montar liga na fracao informada pelo usuario
    #Sorteio de posicoes
    from random import sample
    at_species = [args.specie_type_1] * args.n_atoms
    random_indices = sample(list(range(args.n_atoms)), args.specie_nat_2)
    for idx in random_indices:
        at_species[idx] = args.specie_type_2
    print(at_species)
    particle.at_species = at_species
    print('>' + str(particle).strip() + '<')

    template_file = open('input_template.in')
    content = template_file.read()
    template_file.close()

    content = content.replace('<#NAT#>', str(args.n_atoms))
    content = content.replace('<#NTYP#>', str(args.n_species))
    content = content.replace('<#PARTICLE#>', str(particle).strip())

    espresso_input_file = open('input.in', 'w')

    print(content)
    '''
    Colocar no template

    
    content = ""

    content = content.replace('<#NAT#>', args.n_atoms)
    content = content.replace('<#NTYP#>', args.n_species)
    content = content.replace('<#PARTICLE#>', conteudo_particula)
    #salvar arquivo -> new_content
    '''
    '''
예제 #10
0
파일: __main__.py 프로젝트: chinatsu/genres
def main():
    args = Args()

    lfm = lastfm.init()
    user = lfm.get_user(args.user)

    artists = get_artists(user, args)
    genres = get_genres(artists, args)
    chart.save(args, genres)
예제 #11
0
def main():
    """
    Main function
    """

    args = Args(sys.argv)
    if args.parse_args() == macro.ERROR:
        return macro.ERROR
    zappy = Zappy(args)
    return zappy.launch()
예제 #12
0
def get_args():
    # create args
    # You just need to change the parameters here
    at = False  # Todo: change here
    if not at:  # for normal training
        return Args(
            dataset="imdb",
            model_short_name="cnn",
            batch_size=32,
            epochs=75,
            adversarial_training=False,
            orig_model_prefix="cnn-imdb",
            max_length=2500,

            # for evaluate
            attack_class_for_testing=attack_classes[
                1],  # test robustness against which model
            num_attack_samples=50,  # how many samples to test robustness with

            # for pre-generate
            attack_class_for_training=attack_classes[
                1],  # specify which attack to generate adv samples on the trained model
            adv_sample_file=
            "cnn-imdb-bae.csv",  # file name of where to save adv. samples
            adversarial_samples_to_train=
            2000,  # how many samples in adv_sample_file
        )
    else:  # for adversarial training
        return Args(
            dataset="kaggle-toxic-comment",
            model_short_name="lstm",
            batch_size=32,
            epochs=75,
            adversarial_training=True,
            at_model_prefix="lstm-at-tb-imdb",
            adv_sample_file=
            "lstm-kaggle-tb.csv",  # from which file program will read adv. samples

            # for evaluate
            attack_class_for_testing=attack_classes[0],
            num_attack_samples=250,
        )
def CAN(DX, c, k=15, r=-1, islocal=True):
    """

    :param DX: a temporal sequence of n x n distance matrix for n data points
    :param c: number of clusters
    :param k: number of neighbors to determine the initial graph, and the parameter r if r<=0
    :param r: paremeter, which could be set to a large enough value. If r<0, then it is determined by algorithm with k
    :param islocal:
        1: only update the similarities of the k neighbor pairs, faster
        0: update all the similarities
    :return:
        A: num*num learned symmetric similarity matrix
        evs: eigenvalues of learned graph Laplacian in the iterations
    """

    arg = Args()
    N_ITER = arg.n_iter
    SDX = []

    if arg.debug:
        print('\nInitial Parameters:', '\nc = ', c, '\nk = ', k, '\nr = ', r,
              '\nislocal = ', islocal, '\nNITER = ', N_ITER)

    # Initialization
    for distX in DX:
        num = distX.shape[0]
        distX1 = np.sort(distX, axis=1)
        idx = np.argsort(distX, axis=1)
        A = np.zeros((num, num))
        rr = get_gamma(distX, k)

        eps = 10e-10
        for i in range(0, num):
            A[i, idx[i, 1:k + 2]] = 2 * (distX1[i, k + 1] -
                                         distX1[i, 1:k + 2]) / (rr[i] + eps)

        SDX.append(A)
        if r < 0:
            r = np.mean(rr)

        lmd = np.mean(rr)

    wt_local = [
        np.ones(args.c_dim) / len(connectome_list)
        for i in range(0, len(connectome_list))
    ]

    for itr in range(0, N_ITER):
        for t in range(0, len(DX)):
            distX = DX[t]
            A = SDX[t]

    return A
예제 #14
0
def fetch_resource(resource_type):
    args = Args(resource_type)
    db_manager = DbManager(args)
    scraper = get_scraper(args)
    if args.refetch:
        db_manager.delete_resource(args.db_key)
    if not db_manager.resource_exists(args.db_key):
        resource_data = scraper.get_resource(args.query_params)
        if scraper.driver:
            scraper.driver.quit()
        db_manager.save_resource(args.db_key, resource_data)
    return db_manager.fetch_resource(args.db_key)
예제 #15
0
def create_brain_net_node_files(sub, tentative_label=[]):
    """

    :param sub: subject name
    :param tentative_label: optional label list which can influence the label numbers
    :return:
    """
    args = Args()
    data_dir = os.path.join(os.path.join(args.root_directory, os.pardir),
                            'AD-Data_Organized')
    file_parcellation_table = os.path.join(
        os.path.join(os.path.join(data_dir, sub),
                     'helper_files/parcellationTable.json'))

    # Reading parcellation table to get the coordinates of each region
    with open(file_parcellation_table) as f:
        table = json.load(f)

    connectomes = readMatricesFromDirectory(os.path.join(
        data_dir, sub))  # Reading connectomes
    n_components, connectome_labels = get_number_of_components(connectomes)
    if len(tentative_label) > 0:
        print(len(tentative_label[0]))
        connectome_labels = [
            replace_labels(connectome_labels[t], tentative_label[0])
            for t in range(0, len(connectome_labels))
        ]

    T = len(connectomes)  # Number of time points
    N = len(table)  # Number of regions
    node_size = 4

    output_dir = os.path.join(data_dir,
                              sub + '/helper_files/brain_net/node_file')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    for t in range(1, T + 1):
        print("\n----", t, "----\n")
        print(len(connectome_labels[t - 1]), N)
        assert (len(connectome_labels[t -
                                      1]) == N), "Incorrect number of regions"
        with open(os.path.join(output_dir, sub + "_t" + str(t) + ".node"),
                  'w') as node_file:
            print("Writing: ", node_file.name)
            for i in range(0, N):
                node_file.write(
                    str(table[i]["coord"][0]) + " " +
                    str(table[i]["coord"][1]) + " " +
                    str(table[i]["coord"][2]) + " " +
                    str(connectome_labels[t - 1][i] * 10) + " " +
                    str(node_size) + " " + table[i]["name"] + "\n")
예제 #16
0
def main():
    args = Args()
    app = web.Application()
    app.add_routes([
        web.post("/connection", routes.connection.connection),
        web.get("/printer", routes.get_status.get_status),
        web.get("/job", routes.get_job.get_job),
        web.post("/printer/command", routes.post_command.post_command),
        web.post("/files/local/{file}", routes.print_file.print_file),
        web.post("/job", routes.cancel_print.cancel_print)
    ])
    get_printer(args.path)
    web.run_app(app, host='0.0.0.0', port=args.port)
예제 #17
0
def main():

    args = Args()

    with Display(device=args.device, driver=args.driver) as display:
        monitor = display.connect_monitor(args.monitor)

        if isinstance(monitor, Virtual):
            monitor.orientation = args.orientation

        display.scale = 8

        now = time.monotonic()

        rotori = 0

        try:
            while True:
                start = now
                while now - start < 1.0:
                    now = time.monotonic()

                    if display.channels == 3:
                        display.buffer[:] = intro_effect(
                            display.width, display.height)(now) * 0.75

                        display.pixels[0:2, 0:2] = np.array((0xff, 0, 0))
                        display.pixels[2:6, 0] = np.array((0, 0xff, 0))
                        display.pixels[2:6, 1] = np.array((0, 0, 0))
                        display.pixels[0, 2:8] = np.array((0, 0, 0xff))
                        display.pixels[1, 2:8] = np.array((0, 0, 0))

                    elif display.channels == 1:
                        display.buffer[:, :, 0] = (
                            intro_effect(display.width, display.height)(now) *
                            0.75)[:, :, 1]

                    display.show()
                    time.sleep(0.01)
                rotori += 1
                display.rotation = rotori & 3
                if isinstance(display, Virtual):
                    display.orientation = (rotori >> 2) & 3
                display.flip_horizontal = (rotori >> 4) & 1
                display.flip_vertical = (rotori >> 5) & 1

        except KeyboardInterrupt:
            pass
예제 #18
0
def parse_args(_=None):
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('source', help='Путь к файлу с исходным кодом.')
    arg_parser.add_argument('-p', '--stop-after-parsing',
                            help='Остановиться после синтаксического анализа и вывести АСД в виде JSON.',
                            action='store_true')
    arg_parser.add_argument('-i', '--stop-before-type-inferring',
                            help='Остановиться перед выводом типов и вывести систему уравнений.',
                            action='store_true')
    arg_parser.add_argument('-g', '--skip-header-saving', help='Пропустить сохранение заголовочного файла модуля.',
                            action='store_true')

    args = arg_parser.parse_args()
    Args(args)

    return handle_next_stage(None, read_source_code)
def get_graph_data(dataset, isModelDataset=False):
    """
        Given a dataset name, loads in the graphs of that dataset.
        Creates a specific argument object for that dataset to allow
        for different datasets to be created.
        
        return: If isModelDataset: returns args, train, val, test split
                else: returgn args, data
    """
    args_data = Args()
    args_data.change_dataset(dataset)

    # Load the graph data - Consider using presaved datasets! with graph load list
    graphs = create(args_data)
    graphs_len = len(graphs)
    random.seed(123)
    shuffle(graphs)

    # Display some graph stats
    graph_node_avg = 0
    for graph in graphs:
        graph_node_avg += graph.number_of_nodes()
    graph_node_avg /= graphs_len
    print('Average num nodes', graph_node_avg)

    args_data.max_num_node = max(
        [graphs[i].number_of_nodes() for i in range(graphs_len)])
    max_num_edge = max(
        [graphs[i].number_of_edges() for i in range(graphs_len)])
    min_num_edge = min(
        [graphs[i].number_of_edges() for i in range(graphs_len)])

    # show graphs statistics
    print('total graph num: {}'.format(graphs_len))
    print('max number node: {}'.format(args_data.max_num_node))
    print('max/min number edge: {}; {}'.format(max_num_edge, min_num_edge))
    print('max previous node: {}'.format(args_data.max_prev_node))

    if isModelDataset:
        # split datasets
        graphs_len = len(graphs)
        graphs_test = graphs[int(0.8 * graphs_len):]
        graphs_train = graphs[0:int(0.8 * graphs_len)]
        graphs_validate = graphs[0:int(0.2 * graphs_len)]
        return args_data, graphs_train, graphs_validate, graphs_test

    return args_data, graphs
예제 #20
0
def main():
    logger = set_log()
    logger.info('程序运行开始')

    args = Args().get_all_args()
    set_seed(args)
    # 进行模型的训练
    # 根据参数选择 trainer
    Trainer = getattr(trainers, args.trainer_name)
    trainer = Trainer(args)
    args.do_train = True
    if args.do_train:
        trainer.train()

    # 在做 test 之前,应该需要 load 以前保存的最佳模型
    if args.do_test:
        trainer.test()
예제 #21
0
파일: main.py 프로젝트: wmww/bfstack
def main() -> None:
    args = Args()
    args.parse(sys.argv[1:]) # strip off the first argument (program name)
    if args.show_info:
        logging.basicConfig(level=logging.INFO)
    success = False
    try:
        io = UserIo()
        run(args, io)
        success = True
    except FileNotFoundError as e:
        logger.error(e)
    except ParseError as e:
        logger.error('Syntax error: ' + str(e))
    except ProgramError as e:
        logger.error('Program failed: ' + str(e))
    if not success:
        exit(1)
예제 #22
0
파일: main.py 프로젝트: flixpar/AlphaTSP
def main(args):
    a = Args()
    if args.experiment == "nearestneighbor":
        nearestneighbor.run(a)
    elif args.experiment == "mcts":
        mcts.run(a)
    elif args.experiment == "exact":
        exact.run(a)
    elif args.experiment == "gurobi":
        gurobi.run(a)
    elif args.experiment == "insertion":
        insertion.run(a)
    elif args.experiment == "policy":
        policy.run(a)
    elif args.experiment == "parallel":
        parallel.run(a)
    elif args.experiment == "selfplay":
        selfplay.run(a)
    else:
        raise ValueError("Invalid experiment selection.")
예제 #23
0
class Logger():
    args = Args().args
    cwd = os.getcwd()
    with open(cwd + "/logging.json", 'r') as logging_configuration_file:
        config_dict = json.load(logging_configuration_file)
    if 'console' in config_dict['handlers']:
        config_dict['handlers']['console']['level'] = args.verbosity
        config_dict['handlers']['console']['formatter'] = args.verbosity
    # if 'level' in config_dict['root']:
    #     config_dict['root']['level'] = args.verbosity
    for handler in config_dict['handlers']:
        # config_dict['handlers'][handler]['formatter'] = args.verbosity
        if 'filename' in config_dict['handlers'][handler]:
            if path.isfile(config_dict['handlers'][handler]['filename']):
                print "Deleting logfile %s" % (
                    config_dict['handlers'][handler]['filename'])
                remove(config_dict['handlers'][handler]['filename'])

    logging.config.dictConfig(config_dict)
    logger = logging.getLogger(__name__)
    logger.info("Logger Initialized")
    logger.debug("DEBUG Logger Initialized")
예제 #24
0
from debug import Debug  # noqa
from recovery import Recovery  # noqa
from util import Util  # noqa

# Sikuli settings
sikuli.Settings.MinSimilarity = Globals.DEFAULT_SIMILARITY
sikuli.Settings.WaitScanRate = Globals.SIKULI_SCANRATE
sikuli.Settings.ObserveScanRate = Globals.SIKULI_SCANRATE
sikuli.Settings.OcrTextRead = True
sikuli.Settings.AutoWaitTimeout = 1
sikuli.Settings.RepeatWaitTime = 0

# check run-time args
args = None
if len(sys.argv) > 1:
    args = Args(sys.argv)

# check args, and if none provided, load default config
if args and args.mode == 'cfg':
    config = Config(args.cfg)
elif args and args.mode == 'debug':
    Debug.find(args.window, args.target, args.similarity)
    sys.exit(0)
elif args and args.mode == 'debugc':
    Debug.continuously_find(args.window, args.target, args.similarity)
    sys.exit(0)  # never actually reached
else:
    config = Config('config.ini')

kcauto = KCAuto(config)
 def parse_args(self):
     """Parse CLI args."""
     Args(self.tcex.parser)
     self.args = self.tcex.args
예제 #26
0
 def correction(self, inconnue):
     # dans notre cas il n'y a qu'un seul jeu d'entrées
     self.datasets = [ Args(inconnue) ]
     def check(inconnue):
         return self.connue + inconnue + self.connue
     return ExerciceFunction.correction(self, check)
예제 #27
0
 def parse_args(self):
     """Parse CLI args."""
     self.tcex.log.info('Parsing Args.')
     Args(self.tcex)
     self.args = self.tcex.args
def CAN(distX, c, k=15, r=-1, islocal=True):
    """

    :param distX: n x n distance matrix for n data points
    :param c: number of clusters
    :param k: number of neighbors to determine the initial graph, and the parameter r if r<=0
    :param r: paremeter, which could be set to a large enough value. If r<0, then it is determined by algorithm with k
    :param islocal:
        1: only update the similarities of the k neighbor pairs, faster
        0: update all the similarities
    :return:
        A: num*num learned symmetric similarity matrix
        evs: eigenvalues of learned graph Laplacian in the iterations
    """

    num = distX.shape[0]
    arg = Args()
    NITER = arg.n_iter

    if arg.debug:
        print('\nInitial Parameters:',
              '\nc = ', c,
              '\nk = ', k,
              '\nr = ', r,
              '\nislocal = ', islocal,
              '\ndimension = ', num,
              '\nNITER = ', NITER)

    distX1 = np.sort(distX, axis=1)
    idx = np.argsort(distX, axis=1)
    A = np.zeros((num, num))
    rr = get_gamma(distX, k)

    if r < 0:
        r = np.mean(rr)

    lmd = np.mean(rr)

    eps = 10e-10
    for i in range(0, num):
        A[i, idx[i, 1: k + 2]] = 2 * (distX1[i, k + 1] - distX1[i, 1: k + 2]) / (r + eps)

    print("\nAverage number of non zero elements per row before optimizing: ", (A > 0).sum() / 148,
          "\nNumber of negative elements: ", (A < 0).sum())

    A0 = (A + A.T) / 2
    A0 = row_normalize(A0)
    D0 = np.diag(A0.sum(axis=1))
    L0 = D0 - A0
    evs, F = get_eigen(L0, c + 1)  # Taking c + 1 eig values
    F = F[:, 0:c]  # removing last one
    ev = []
    if sum(evs) < 10e-10:
        raise Exception('The number of connected component in the graph is greater than {}', c)

    for iter in range(0, NITER):
        distF = L2_distance(F.T, F.T)
        distF = np.sqrt((distF >= 0) * distF)
        A = np.zeros((num, num))

        for i in range(0, num):
            if islocal:
                idxa0 = idx[i, 1:k+1]
            else:
                idxa0 = np.arange(num)

            dfi = distF[i, idxa0]
            dxi = distX[i, idxa0]
            d = dxi + lmd * dfi
            ad = -d / (2 * r)
            res, _ = EProjSimplex(ad)
            A[i, idxa0] = res

        np.fill_diagonal(A, 0)
        A = (A + A.T) / 2
        A = row_normalize(A)
        D = np.diag(A.sum(axis=1))
        L = D - A
        F_old = F

        evs, F = get_eigen(L, c + 1)
        ev.append(evs)
        F = F[:, 0:c]  # removing last one

        if sum(evs[0:c]) > 10e-10:
            lmd = 2 * lmd
        elif sum(evs) < 10e-10:
            lmd = lmd / 2
            F = F_old
        else:
            break
        print("Iter:", iter,
              "lambda = ", lmd,
              "Regularization Parameter r = ", r)

    return A, ev
            F = F_old
        else:
            break
        print("Iter:", iter,
              "lambda = ", lmd,
              "Regularization Parameter r = ", r)

    return A, ev


if __name__ == '__main__':
    # Read data
    data_dir = os.path.join(os.path.dirname(os.getcwd()), 'AD-Data_Organized')
    sub = '027_S_2336'
    connectome_list = readMatricesFromDirectory(os.path.join(data_dir, sub))
    args = Args()

    output_dir = os.path.join(data_dir, sub + '_smoothed')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    smoothed_connectomes = []
    for t in range(0, len(connectome_list)):
        A = connectome_list[t]
        dX = np.sqrt(1 - A)
        np.fill_diagonal(dX, 2)
        S, _ = CAN(dX, args.n_module, k=args.k, islocal=True)
        #S = (S.T + S)/2
        np.fill_diagonal(S, 0)
        S = row_normalize(S)
        smoothed_connectomes.append(S)
예제 #30
0
파일: __init__.py 프로젝트: zhangqb/rs
def main():

    args = Args().get_args()
    Core(args).recommend()