Esempio n. 1
0
def main():
    """ Main routine of PINK data preconditioning """

    parser = argparse.ArgumentParser(description='PINK data preconditioning')
    parser.add_argument('data',
                        help='Data input file (.npy or .bin)',
                        action=tools.check_extension({'npy', 'bin'}))
    parser.add_argument('-o', '--output', help='Data output file')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Be talkative')
    parser.add_argument(
        '-s',
        '--scale',
        action='store_true',
        help='Scale the input data to be within the range [0, 1]')
    args = parser.parse_args()

    if os.path.splitext(args.data)[1][1:] == "npy":
        data = np.load(args.data).astype(np.float32)
    elif os.path.splitext(args.data)[1][1:] == "bin":
        data = tools.load_data(args.data)

    print('shape:             ', np.shape(data))
    print('size:              ', data.size)
    print('min value:         ', np.amin(data))
    print('max value:         ', np.amax(data))
    print('non-zero elements: ', np.count_nonzero(data))
    print('sparsity:          ', np.count_nonzero(data) / data.size)

    if args.scale:

        print('Data will be linearly scaled to be within the range [0.0, 1.0]')

        min_element = np.amin(data)
        max_element = np.amax(data)
        factor = 1 / (max_element - min_element)

        print('min value: ', min_element)
        print('max value: ', max_element)
        print('factor: ', factor)

        data = (data - min_element) * factor

        print('min value: ', np.amin(data))
        print('max value: ', np.amax(data))

    if args.output:
        print('Output file written at', args.output)
        if os.path.splitext(args.output)[1][1:] == "npy":
            np.save(args.output, data)
        elif os.path.splitext(args.output)[1][1:] == "bin":
            tools.save_data(args.output, data)
        else:
            raise RuntimeError('Unsupported output file extension: ',
                               os.path.splitext(args.output)[1][1:])

    print('All done.')
Esempio n. 2
0
    def source_data(self, redis_data):
        order_data = json.loads(redis_data)  # 订单号,推送接口,征信接口
        try:
            # 获取数据
            orderNo = order_data['orderNo']
            reportUrl = order_data['reportUrl']
            operatorUrl = order_data['operatorUrl']
            baseUrl = order_data['baseUrl']

            my_sign = generateMD5('HZRESK001A20181122huichuanghuichuang666!')
            try:
                # 合并数据
                new_data = mergeData(orderNo, baseUrl, my_sign, reportUrl,
                                     operatorUrl)
                # 保存数据
                logger.info('save {0} original data ...'.format(
                    str(new_data['orderNo'])))

                save_data(new_data)
                logger.info('{0}-SUCCESS'.format(str(new_data['orderNo'])))

            except Exception as e:
                msg = '{0} 保存 数据失败 {1}'.format(str(order_data['orderNo']),
                                               datetime.datetime.now())
                logger.info(msg)
                result_data = {
                    "desc": '-1',
                    "status": "内部错误",
                    "orderNo": order_data['orderNo'],
                    "mode_type": '',
                    "comprehensive_score": "0"
                }
                save_score(result_data)
                # send_mail('msg')

            else:

                # 计算

                try:
                    model_type = 3
                    r_data = MyThread(rule_data, [
                        new_data,
                    ])
                    p_data = MyThread(param_generate, [new_data, model_type])
                    r_data.start()
                    p_data.start()
                    r_data.join()
                    p_data.join()
                    logger.info('{0}-get ruleData:'.format(
                        order_data['orderNo']))

                    ruleData = r_data.get_result()
                    logger.info('{0}-get model_data:'.format(
                        order_data['orderNo']))
                    model_data = p_data.get_result()

                    # 规则得分入库入库
                    logger.info('{0}-save ruleData and model_data:'.format(
                        order_data['orderNo']))
                    ths = Thread(target=saveRuleModelScore,
                                 args=(json.loads(model_data), ruleData))
                    ths.start()
                    score = final_score(model_data, ruleData)
                    result_data = {
                        "merchantId": new_data['merchantId'],
                        "appId": new_data['appId'],
                        "channel": new_data['data']['channel'],
                        "Time": datetime.datetime.now()
                    }
                    result_data.update(json.loads(score))
                    logger.info('{0}-save finally score...:'.format(
                        order_data['orderNo']))
                    save_score(result_data)
                    logger.info('{0}-save finally score SUCCESS:'.format(
                        order_data['orderNo']))

                except Exception as e:
                    msg1 = '{0} param_generate or rule_data ERROR'.format(
                        new_data['orderNo'])
                    logger.info(msg1)
                    result_data = {
                        "merchantId": "",
                        "appId": "",
                        "desc": '-3',
                        "status": "内部错误",
                        "orderNo": order_data['orderNo'],
                        "mode_type": '',
                        "comprehensive_score": "0"
                    }
                    save_score(result_data)
                    # send_mail(msg1)
        except Exception as e:
            #  返回模型分数 为 0
            msg2 = '{0}-source_data Error:'.format(order_data['orderNo'])
            logger.info(msg2)
            logger.info(traceback.format_exc())
            # 保存本地
            error_msg = str(traceback.format_exc()).replace('"', "'").replace(
                '\n', '')
            error_msg_dict = {
                'orderNo': order_data['orderNo'],
                "error_msg": str(error_msg),
                'error_type': 2
            }
            save_error(error_msg_dict)
            result_data = {
                "merchantId": "",
                "appId": "",
                "desc": '-2',
                "status": "内部错误",
                "orderNo": order_data['orderNo'],
                "mode_type": '',
                "comprehensive_score": "0"
            }
            save_score(result_data)
Esempio n. 3
0
def run_experiment(arglist):

    # Get the experiment parameters
    p = tools.Params("context_dmc")
    p.set_by_cmdline(arglist)

    # Open up the stimulus window
    m = tools.WindowInfo(p)
    win = visual.Window(**m.window_kwargs)

    # Set up the stimulus objects
    fix = visual.PatchStim(win, tex=None, mask="circle",
                       color=p.fix_color, size=p.fix_size)
    r_fix = visual.PatchStim(win, tex=None, mask="circle",
                             color=p.fix_resp_color, size=p.fix_size)
    grate = visual.PatchStim(win, "sin", "circle", sf=p.stim_sf,
                             size=p.stim_size, opacity=1)
    color = visual.PatchStim(win, None, "circle",
                             size=p.stim_size, opacity=.4)
    disk = visual.PatchStim(win, tex=None, mask="circle",
                            color=win.color, size=p.stim_size / 6)
    stims = [grate, color, disk, fix]

    # TODO more options in params.py
    color_text = visual.TextStim(win, text="color")
    orient_text = visual.TextStim(win, text="orient")
    cue_stims = dict(color=color_text, orient=orient_text)

    # Draw the instructions and wait to go
    instruct = dedent("""
    Look at some things and do some stuff""")  # TODO
    tools.WaitText(win, instruct, height=.7)()

    # Start a data file and write the params to it
    f, fname = tools.start_data_file(p.subject, "context_dmc")
    p.to_text_header(f)

    # TODO log by stim, add total time
    header = ["trial", "block", "context",
              "samp_color", "samp_orient",
              "samp_color_cat", "samp_orient_cat",
              "targ_color", "targ_orient",
              "targ_color_cat", "targ_orient_cat",
              "delay", "response", "rt", "acc", "elapsed"]
    tools.save_data(f, *header)

    # Set up output variable
    save_name = op.join("./data", op.splitext(fname)[0])

    # Start a clock and flush the event buffer
    exp_clock = core.Clock()
    trial_clock = core.Clock()
    event.clearEvents()

    # Get the schedule for this run
    sched_file = "schedules/run_%02d.csv" % p.run
    s = pandas.read_csv(sched_file)

    context_map = ["color", "orient"]

    # Main experiment loop
    # --------------------
    try:

        # Dummy scans
        fix.draw()
        win.flip()
        core.wait(p.dummy_trs * p.tr)

        for t in s.trial:

            context = context_map[s.context[t]]

            # Cue period
            cue_stims[context].draw()
            win.flip()
            core.wait(p.cue_dur)

            # Pre-stim fixation (PSI)
            fix.draw()
            win.flip()
            core.wait(s.psi_tr[t] * p.tr)

            # Sample stimulus
            a_cat = s.attend_cat[t]
            a_exemp = s.attend_exemp[t]

            i_cat = s.ignore_cat[t]
            i_exemp = s.ignore_exemp[t]

            if context == "color":
                stim_color = p.cat_colors[a_cat][a_exemp]
                stim_orient = p.cat_orients[i_cat][i_exemp]
            else:
                stim_color = p.cat_colors[i_cat][i_exemp]
                stim_orient = p.cat_orients[a_cat][a_exemp]

            color.setColor(stim_color)
            grate.setOri(stim_orient)

            draw_all(*stims)
            win.flip()
            core.wait(p.stim_samp_dur)

            # Post stim fix and ISI
            fix.draw()
            win.flip()
            total_isi = p.stim_sfix_dur + s.isi_tr[t] * p.tr
            core.wait(total_isi)

            # Target stimulus
            # TODO ugh this logic sucks
            # TODO also check that it works in general
            match = s.match[t]
            idx2 = randint(2)
            idx3 = randint(3)
            if match:
                if context == "color":
                    stim_color = p.cat_colors[a_cat][idx3]
                    stim_orient = p.cat_orients[idx2][idx3]
                elif context == "orient":
                    stim_color = p.cat_colors[idx2][idx3]
                    stim_orient = p.cat_orients[a_cat][idx3]
            else:
                if context == "color":
                    stim_color = p.cat_colors[int(not a_cat)][idx3]
                    stim_orient = p.cat_orients[idx2][idx3]
                elif context == "orient":
                    stim_color = p.cat_colors[idx2][idx3]
                    stim_orient = p.cat_orients[int(not a_cat)][idx3]

            color.setColor(stim_color)
            grate.setOri(stim_orient)

            draw_all(*stims)
            win.flip()
            core.wait(p.stim_targ_dur)

            # Response
            r_fix.draw()
            win.flip()
            core.wait(p.resp_dur)

            # Collect the response
            keys = event.getKeys(timeStamped=trial_clock)
            corr, response, resp_rt = 0, 0, -1
            for key, stamp in keys:
                if key in p.quit_keys:
                    core.quit()
                elif key in p.match_keys:
                    corr = 1 if match else 0
                    response = 1
                    resp_rt = stamp
                    break
                elif key in p.nonmatch_keys:
                    corr = 0 if match else 1
                    response = 2
                    resp_rt = stamp
                    break

            # ITI interval
            fix.draw()
            win.flip()
            core.wait(s.iti_tr[t] * p.tr)

    finally:
        # Clean up
        f.close()
        win.close()
Esempio n. 4
0
def run_experiment(arglist):

    # Get the experiment paramters
    p = tools.Params("gape")
    p.set_by_cmdline(arglist)

    # Sequence categories
    cat_list = [[0, 1, 0, 1], [0, 0, 1, 1], [0, 1, 1, 0]]
    cat_names = ["alternated", "paired", "reflected"]

    # Get this run's schedule in a manner that is consistent
    # within and random between subjects
    if p.train:
        letter = letters[p.run - 1]
        p.sched_id = "train_%s" % letter
        sched_file = "sched/schedule_%s.csv" % p.sched_id

    else:
        state = RandomState(abs(hash(p.subject)))
        choices = list(letters[:p.total_schedules])
        p.sched_id = state.permutation(choices)[p.run - 1]
        sched_file = "sched/schedule_%s.csv" % p.sched_id

    # Read in this run's schedule
    s = read_csv(sched_file)

    # Max the screen brightness
    tools.max_brightness(p.monitor_name)

    # Open up the stimulus window
    calib.monitorFolder = "./calib"
    mon = calib.Monitor(p.monitor_name)
    m = tools.WindowInfo(p, mon)
    win = visual.Window(**m.window_kwargs)

    # Set up the stimulus objects
    fix = visual.PatchStim(win, tex=None, mask="circle",
                           color=p.fix_color, size=p.fix_size)
    a_fix = visual.PatchStim(win, tex=None, mask="circle",
                             color=p.fix_antic_color, size=p.fix_size)
    r_fix = visual.PatchStim(win, tex=None, mask="circle",
                             color=p.fix_resp_color, size=p.fix_size)
    d_fix = visual.PatchStim(win, tex=None, mask="circle",
                             color=p.fix_demo_color, size=p.fix_size)
    c_fix = visual.PatchStim(win, tex=None, mask="circle",
                             color=p.fix_catch_color, size=p.fix_size)
    b_fix = visual.PatchStim(win, tex=None, mask="circle",
                             color=p.fix_break_color, size=p.fix_size)
    halo = visual.PatchStim(win, tex=None, mask=p.demo_halo_mask,
                            opacity=p.demo_halo_opacity,
                            color=p.demo_halo_color,
                            size=p.demo_halo_size)
    grate = visual.PatchStim(win, "sin", p.stim_mask, size=p.stim_size,
                             contrast=p.stim_contrast, sf=p.stim_sf,
                             opacity=p.stim_opacity)
    disk = visual.PatchStim(win, tex=None, mask=p.stim_mask,
                            color=win.color, size=p.stim_disk_ratio)
    stims = [grate, disk, fix]

    # Set up some timing variables
    running_time = 0
    antic_secs = p.tr
    demo_secs = 4 * p.demo_stim_dur + 3 * p.demo_stim_isi + p.tr
    seq_secs = p.tr + 4 * p.stim_dur + 3 * p.stim_isi
    catch_secs = p.tr
    rest_secs = p.rest_trs * p.tr

    # Draw the instructions and wait to go
    instruct = dedent("""
    Watch the sample sequence and say if the target sequences match

    Blue dot: sample sequence
    Red dot: get ready
    Orange dot: relax
    Green dot: say if sequence matched the sample
    Button 1: same    Button 2: different

    Grey dot: quick break


    Experimenter: Press space to prep for scan""")  # TODO
    # Draw the instructions and wait to go
    tools.WaitText(win, instruct, height=.7)(check_keys=["space"])

    # Possibly wait for the scanner
    if p.fmri:
        tools.wait_for_trigger(win, p)

    # Start a data file and write the params to it
    f, fname = tools.start_data_file(p.subject, p.experiment_name,
                                     p.run, train=p.train)
    p.to_text_header(f)

    # Save run params to JSON
    save_name = op.join("./data", op.splitext(fname)[0])
    p.to_json(save_name)

    # Write the datafile header
    header = ["trial", "block",
              "cat_id", "cat_name",
              "event_type",
              "event_sched", "event_time",
              "ori_a", "ori_b",
              "oddball", "odd_item", "odd_orient",
              "iti", "response", "rt", "acc"]
    tools.save_data(f, *header)

    # Start a clock and flush the event buffer
    exp_clock = core.Clock()
    trial_clock = core.Clock()
    event.clearEvents()

    # Main experiment loop
    # --------------------
    try:

        # Dummy scans
        fix.draw()
        win.flip()
        dummy_secs = p.dummy_trs * p.tr
        running_time += dummy_secs
        wait_check_quit(dummy_secs, p.quit_keys)

        for t in s.trial:

            cat_seq = cat_list[s.cat_id[t]]
            block_ori_list = np.array([s.ori_a[t], s.ori_b[t]])[cat_seq]

            # Set up some defaults for variables that aren't always set
            oddball_seq = [0, 0, 0 ,0]
            odd_item, odd_ori = -1, -1
            acc, response, resp_rt = -1, -1, -1

            # Possibly rest and then bail out of the rest of the loop
            if s.ev_type[t] == "rest":
                if p.train and not p.fmri:
                    b_fix.draw()
                    win.flip()
                    wait_check_quit(2)
                    before = exp_clock.getTime()
                    msg = "Quick break! Press space to continue."
                    tools.WaitText(win, msg, height=.7)(check_keys=["space"])
                    b_fix.draw()
                    win.flip()
                    wait_check_quit(2)
                    after = exp_clock.getTime()
                    rest_time = after - before
                    running_time += rest_time
                    continue
                else:
                    b_fix.draw()
                    win.flip()
                    wait_check_quit(rest_secs)
                    running_time += rest_secs
                    continue
 
            # Otherwise, we always get an anticipation
            if p.antic_fix_dur <= p.tr:  # possibly problematic
                fix.draw()
                win.flip()
                core.wait(p.tr - p.antic_fix_dur)
            if s.ev_type[t] == "demo":
                stim = d_fix
            else:
                stim = a_fix
            end_time = running_time + p.antic_fix_dur
            tools.precise_wait(win, exp_clock, end_time, stim)
            running_time += antic_secs

            # The event is about to happen so stamp that time
            event_sched = running_time
            event_time = exp_clock.getTime()

            # Demo sequence
            if s.ev_type[t] == "demo":

                for i, ori in enumerate(block_ori_list):
                    # Draw each stim
                    grate.setOri(ori)
                    halo.draw()
                    draw_all(*stims)
                    d_fix.draw()
                    win.flip()
                    core.wait(p.demo_stim_dur)

                    # Short isi fix
                    if i < 3:
                        d_fix.draw()
                        win.flip()
                        core.wait(p.demo_stim_isi)
                    check_quit()

                # Demo always has >1 TR fixation
                fix.draw()
                win.flip()
                wait_check_quit(p.tr)

                # Update timing
                running_time += demo_secs

            # Proper test sequence
            if s.ev_type[t] == "seq":

                # If this is an oddball, figure out where
                if s.oddball[t]:
                    oddball_seq = multinomial(1, [.25] * 4).tolist()
                    odd_item = oddball_seq.index(1)

                # Iterate through each element in the sequence
                for i, ori in enumerate(block_ori_list):

                    # Set the grating attributes
                    if oddball_seq[i]:
                        ori_choices = [o for o in p.stim_orients
                                       if not o == ori]
                        odd_ori = ori_choices[randint(3)]
                        grate.setOri(odd_ori)
                    else:
                        grate.setOri(ori)
                    grate.setPhase(uniform())

                    # Draw the grating set
                    draw_all(*stims)
                    win.flip()
                    core.wait(p.stim_dur)

                    # ISI Fix (on all but last stim)
                    if i < 3:
                        fix.draw()
                        win.flip()
                        core.wait(p.stim_isi)
                    check_quit()

                # Response fixation
                r_fix.draw()
                trial_clock.reset()
                event.clearEvents()
                win.flip()
                acc, response, resp_rt = wait_get_response(p,
                                                           trial_clock,
                                                           s.oddball[t],
                                                           p.resp_dur)

                # Update timing
                running_time += seq_secs

            # Catch trial
            if s.ev_type[t] == "catch":
                c_fix.draw()
                win.flip()
                wait_check_quit(p.tr)
                running_time += catch_secs

            # Save data to the datafile
            data = [t, s.block[t],
                    s.cat_id[t], cat_names[s.cat_id[t]],
                    s.ev_type[t],
                    event_sched, event_time,
                    s.ori_a[t], s.ori_b[t],
                    s.oddball[t],
                    odd_item, odd_ori, s.iti[t],
                    response, resp_rt, acc]
            tools.save_data(f, *data)

            # ITI interval
            # Go by screen refreshes for precise timing
            this_iti = s.iti[t] * p.tr
            end_time = running_time + this_iti
            tools.precise_wait(win, exp_clock, end_time, fix)
            running_time += this_iti

            
    finally:
        # Clean up
        f.close()
        win.close()

    # Good execution, print out some info
    try:
        data_file = op.join("data", fname)
        with open(data_file, "r") as fid:
            lines = fid.readlines()
            n_comments = len([l for l in lines if l.startswith("#")])
        df = read_csv(data_file, skiprows=n_comments, na_values=["-1"])

        info = dict()
        time_error = df.event_sched - df.event_time
        info["run"] = p.run
        info["acc"] = df.acc.mean()
        info["mean_rt"] =  df.rt.mean()
        info["missed_resp"] = (df.response == 0).sum()
        info["time_error_mean"] = abs(time_error).mean()
        info["time_error_max"] = max(time_error)

        print dedent("""Performance summary for run %(run)d:

        Accuracy: %(acc).3f
        Mean RT: %(mean_rt).3f
        Missed responses: %(missed_resp)d

        Mean timing error: %(time_error_mean).4f
        Max timing error: %(time_error_max).4f
        """ % info)

    except Exception as err:
        print "Could not read data file for summary"
        print err
Esempio n. 5
0
if __name__ == "__main__":
    start_time = time.time()
    time_budget = float("inf")
    parser = argparse.ArgumentParser()
    parser.add_argument("--index", type=int)
    parser.add_argument("--file_param", type=str)
    parser.add_argument("--file_ready", type=str)
    parser.add_argument("--file_result", type=str)
    parser.add_argument("--file_lock", type=str)
    args = parser.parse_args()
    aoe_data = None
    if torch.cuda.is_available():
        torch.zeros(1).cuda()
    with FileLock(args.file_lock):
        save_data(args.file_ready, os.getpid())
    while True:
        if aoe_data is None and os.path.exists(file_path("AOE.data")):
            with FileLock(file_path("AOE.ready")):
                aoe_data = load_data(file_path("AOE.data"))
        if os.path.exists(args.file_param):
            if aoe_data is None and os.path.exists(file_path("AOE.data")):
                with FileLock(file_path("AOE.ready")):
                    aoe_data = load_data(file_path("AOE.data"))
            start_time = time.time()  # 重置开始时间
            with FileLock(args.file_lock):
                param = load_data(args.file_param)
            time_budget = param.time_budget  # 重置时间限制
            assert param.model == "ModelEnsemble"
            result = main_ensemble(data=aoe_data,
                                   num_features=param.num_features,
Esempio n. 6
0
    def train_predict(data, time_budget, n_class, schema):
        start_time = time.time()
        LOGGER.info("Start!")
        LOGGER.info("time_budget: {0}".format(time_budget))

        data = generate_data(data, LOGGER)
        LOGGER.info("Num of features: {0}".format(data.num_features))
        LOGGER.info("Num of classes: {0}".format(data.num_class))
        params = [
            Param("ModelGCN", [1, [16, 16], "leaky_relu"]),
            Param("ModelGCN", [1, [32, 32], "leaky_relu"]),
            Param("ModelGAT", [1, [32, 32], "leaky_relu"]),
            Param("ModelGAT4", [1, [32, 32], "leaky_relu"]),
            Param("ModelGCN", [1, [64, 64], "leaky_relu"]),
            Param("ModelGAT", [1, [64, 64], "leaky_relu"]),
            Param("ModelGAT4", [1, [64, 64], "leaky_relu"]),
            Param("ModelGCN", [1, [128, 128], "leaky_relu"]),
            Param("ModelGAT", [1, [128, 128], "leaky_relu"]),
            Param("ModelGAT4", [1, [128, 128], "leaky_relu"]),
            Param("ModelGCN", [1, [256, 256], "leaky_relu"]),
            Param("ModelGAT", [1, [256, 256], "leaky_relu"]),
            Param("ModelGAT4", [1, [256, 256], "leaky_relu"]),
            Param("ModelGCN", [2, [16, 16, 16], "leaky_relu"]),
            Param("ModelGCN", [2, [32, 32, 32], "leaky_relu"]),
            Param("ModelGCN", [2, [64, 64, 64], "leaky_relu"]),
            Param("ModelGCN", [2, [128, 128, 128], "leaky_relu"]),
            Param("ModelGCN", [2, [256, 256, 256], "leaky_relu"]),
            Param("ModelGCN", [3, [16, 16, 16, 16], "leaky_relu"]),
            Param("ModelGCN", [3, [32, 32, 32, 32], "leaky_relu"]),
            Param("ModelGCN", [3, [64, 64, 64, 64], "leaky_relu"]),
            Param("ModelGCN", [3, [128, 128, 128, 128], "leaky_relu"]),
            Param("ModelGCN", [3, [256, 256, 256, 256], "leaky_relu"]),
            Param("ModelGCN", [1, [16, 16], "relu"]),
            Param("ModelGCN", [1, [32, 32], "relu"]),
            Param("ModelGAT", [1, [32, 32], "relu"]),
            Param("ModelGAT4", [1, [32, 32], "relu"]),
            Param("ModelGCN", [1, [64, 64], "relu"]),
            Param("ModelGAT", [1, [64, 64], "relu"]),
            Param("ModelGAT4", [1, [64, 64], "relu"]),
            Param("ModelGCN", [1, [128, 128], "relu"]),
            Param("ModelGAT", [1, [128, 128], "relu"]),
            Param("ModelGAT4", [1, [128, 128], "relu"]),
            Param("ModelGCN", [1, [256, 256], "relu"]),
            Param("ModelGAT", [1, [256, 256], "relu"]),
            Param("ModelGAT4", [1, [256, 256], "relu"]),
            Param("ModelGCN", [2, [16, 16, 16], "relu"]),
            Param("ModelGCN", [2, [32, 32, 32], "relu"]),
            Param("ModelGCN", [2, [64, 64, 64], "relu"]),
            Param("ModelGCN", [2, [128, 128, 128], "relu"]),
            Param("ModelGCN", [2, [256, 256, 256], "relu"]),
            Param("ModelGCN", [3, [16, 16, 16, 16], "relu"]),
            Param("ModelGCN", [3, [32, 32, 32, 32], "relu"]),
            Param("ModelGCN", [3, [64, 64, 64, 64], "relu"]),
            Param("ModelGCN", [3, [128, 128, 128, 128], "relu"]),
            Param("ModelGCN", [3, [256, 256, 256, 256], "relu"]),
            Param("ModelGCN", [1, [16, 16], "leaky_relu"]),
            Param("ModelGCN", [1, [32, 32], "leaky_relu"]),
            Param("ModelGAT", [1, [32, 32], "leaky_relu"]),
            Param("ModelGAT4", [1, [32, 32], "leaky_relu"]),
            Param("ModelGCN", [1, [64, 64], "leaky_relu"]),
            Param("ModelGAT", [1, [64, 64], "leaky_relu"]),
            Param("ModelGAT4", [1, [64, 64], "leaky_relu"]),
            Param("ModelGCN", [1, [128, 128], "leaky_relu"]),
            Param("ModelGAT", [1, [128, 128], "leaky_relu"]),
            Param("ModelGAT4", [1, [128, 128], "leaky_relu"]),
            Param("ModelGCN", [1, [256, 256], "leaky_relu"]),
            Param("ModelGAT", [1, [256, 256], "leaky_relu"]),
            Param("ModelGAT4", [1, [256, 256], "leaky_relu"]),
            Param("ModelGCN", [2, [16, 16, 16], "leaky_relu"]),
            Param("ModelGCN", [2, [32, 32, 32], "leaky_relu"]),
            Param("ModelGCN", [2, [64, 64, 64], "leaky_relu"]),
            Param("ModelGCN", [2, [128, 128, 128], "leaky_relu"]),
            Param("ModelGCN", [2, [256, 256, 256], "leaky_relu"]),
            Param("ModelGCN", [3, [16, 16, 16, 16], "leaky_relu"]),
            Param("ModelGCN", [3, [32, 32, 32, 32], "leaky_relu"]),
            Param("ModelGCN", [3, [64, 64, 64, 64], "leaky_relu"]),
            Param("ModelGCN", [3, [128, 128, 128, 128], "leaky_relu"]),
            Param("ModelGCN", [3, [256, 256, 256, 256], "leaky_relu"]),
            Param("ModelGCN", [1, [16, 16], "relu"]),
            Param("ModelGCN", [1, [32, 32], "relu"]),
            Param("ModelGAT", [1, [32, 32], "relu"]),
            Param("ModelGAT4", [1, [32, 32], "relu"]),
            Param("ModelGCN", [1, [64, 64], "relu"]),
            Param("ModelGAT", [1, [64, 64], "relu"]),
            Param("ModelGAT4", [1, [64, 64], "relu"]),
            Param("ModelGCN", [1, [128, 128], "relu"]),
            Param("ModelGAT", [1, [128, 128], "relu"]),
            Param("ModelGAT4", [1, [128, 128], "relu"]),
            Param("ModelGCN", [1, [256, 256], "relu"]),
            Param("ModelGAT", [1, [256, 256], "relu"]),
            Param("ModelGAT4", [1, [256, 256], "relu"]),
            Param("ModelGCN", [2, [16, 16, 16], "relu"]),
            Param("ModelGCN", [2, [32, 32, 32], "relu"]),
            Param("ModelGCN", [2, [64, 64, 64], "relu"]),
            Param("ModelGCN", [2, [128, 128, 128], "relu"]),
            Param("ModelGCN", [2, [256, 256, 256], "relu"]),
            Param("ModelGCN", [3, [16, 16, 16, 16], "relu"]),
            Param("ModelGCN", [3, [32, 32, 32, 32], "relu"]),
            Param("ModelGCN", [3, [64, 64, 64, 64], "relu"]),
            Param("ModelGCN", [3, [128, 128, 128, 128], "relu"]),
            Param("ModelGCN", [3, [256, 256, 256, 256], "relu"]),
            Param("ModelGCN", [1, [16, 16], "leaky_relu"]),
            Param("ModelGCN", [1, [32, 32], "leaky_relu"]),
            Param("ModelGAT", [1, [32, 32], "leaky_relu"]),
            Param("ModelGAT4", [1, [32, 32], "leaky_relu"]),
            Param("ModelGCN", [1, [64, 64], "leaky_relu"]),
            Param("ModelGAT", [1, [64, 64], "leaky_relu"]),
            Param("ModelGAT4", [1, [64, 64], "leaky_relu"]),
            Param("ModelGCN", [1, [128, 128], "leaky_relu"]),
            Param("ModelGAT", [1, [128, 128], "leaky_relu"]),
            Param("ModelGAT4", [1, [128, 128], "leaky_relu"]),
            Param("ModelGCN", [1, [256, 256], "leaky_relu"]),
            Param("ModelGAT", [1, [256, 256], "leaky_relu"]),
            Param("ModelGAT4", [1, [256, 256], "leaky_relu"]),
            Param("ModelGCN", [2, [16, 16, 16], "leaky_relu"]),
            Param("ModelGCN", [2, [32, 32, 32], "leaky_relu"]),
            Param("ModelGCN", [2, [64, 64, 64], "leaky_relu"]),
            Param("ModelGCN", [2, [128, 128, 128], "leaky_relu"]),
            Param("ModelGCN", [2, [256, 256, 256], "leaky_relu"]),
            Param("ModelGCN", [3, [16, 16, 16, 16], "leaky_relu"]),
            Param("ModelGCN", [3, [32, 32, 32, 32], "leaky_relu"]),
            Param("ModelGCN", [3, [64, 64, 64, 64], "leaky_relu"]),
            Param("ModelGCN", [3, [128, 128, 128, 128], "leaky_relu"]),
            Param("ModelGCN", [3, [256, 256, 256, 256], "leaky_relu"]),
            Param("ModelGCN", [1, [16, 16], "relu"]),
            Param("ModelGCN", [1, [32, 32], "relu"]),
            Param("ModelGAT", [1, [32, 32], "relu"]),
            Param("ModelGAT4", [1, [32, 32], "relu"]),
            Param("ModelGCN", [1, [64, 64], "relu"]),
            Param("ModelGAT", [1, [64, 64], "relu"]),
            Param("ModelGAT4", [1, [64, 64], "relu"]),
            Param("ModelGCN", [1, [128, 128], "relu"]),
            Param("ModelGAT", [1, [128, 128], "relu"]),
            Param("ModelGAT4", [1, [128, 128], "relu"]),
            Param("ModelGCN", [1, [256, 256], "relu"]),
            Param("ModelGAT", [1, [256, 256], "relu"]),
            Param("ModelGAT4", [1, [256, 256], "relu"]),
            Param("ModelGCN", [2, [16, 16, 16], "relu"]),
            Param("ModelGCN", [2, [32, 32, 32], "relu"]),
            Param("ModelGCN", [2, [64, 64, 64], "relu"]),
            Param("ModelGCN", [2, [128, 128, 128], "relu"]),
            Param("ModelGCN", [2, [256, 256, 256], "relu"]),
            Param("ModelGCN", [3, [16, 16, 16, 16], "relu"]),
            Param("ModelGCN", [3, [32, 32, 32, 32], "relu"]),
            Param("ModelGCN", [3, [64, 64, 64, 64], "relu"]),
            Param("ModelGCN", [3, [128, 128, 128, 128], "relu"]),
            Param("ModelGCN", [3, [256, 256, 256, 256], "relu"]),
            Param("ModelGCN", [1, [16, 16], "leaky_relu"]),
            Param("ModelGCN", [1, [32, 32], "leaky_relu"]),
            Param("ModelGAT", [1, [32, 32], "leaky_relu"]),
            Param("ModelGAT4", [1, [32, 32], "leaky_relu"]),
            Param("ModelGCN", [1, [64, 64], "leaky_relu"]),
            Param("ModelGAT", [1, [64, 64], "leaky_relu"]),
            Param("ModelGAT4", [1, [64, 64], "leaky_relu"]),
            Param("ModelGCN", [1, [128, 128], "leaky_relu"]),
            Param("ModelGAT", [1, [128, 128], "leaky_relu"]),
            Param("ModelGAT4", [1, [128, 128], "leaky_relu"]),
            Param("ModelGCN", [1, [256, 256], "leaky_relu"]),
            Param("ModelGAT", [1, [256, 256], "leaky_relu"]),
            Param("ModelGAT4", [1, [256, 256], "leaky_relu"]),
            Param("ModelGCN", [2, [16, 16, 16], "leaky_relu"]),
            Param("ModelGCN", [2, [32, 32, 32], "leaky_relu"]),
            Param("ModelGCN", [2, [64, 64, 64], "leaky_relu"]),
            Param("ModelGCN", [2, [128, 128, 128], "leaky_relu"]),
            Param("ModelGCN", [2, [256, 256, 256], "leaky_relu"]),
            Param("ModelGCN", [3, [16, 16, 16, 16], "leaky_relu"]),
            Param("ModelGCN", [3, [32, 32, 32, 32], "leaky_relu"]),
            Param("ModelGCN", [3, [64, 64, 64, 64], "leaky_relu"]),
            Param("ModelGCN", [3, [128, 128, 128, 128], "leaky_relu"]),
            Param("ModelGCN", [3, [256, 256, 256, 256], "leaky_relu"]),
            Param("ModelGCN", [1, [16, 16], "relu"]),
            Param("ModelGCN", [1, [32, 32], "relu"]),
            Param("ModelGAT", [1, [32, 32], "relu"]),
            Param("ModelGAT4", [1, [32, 32], "relu"]),
            Param("ModelGCN", [1, [64, 64], "relu"]),
            Param("ModelGAT", [1, [64, 64], "relu"]),
            Param("ModelGAT4", [1, [64, 64], "relu"]),
            Param("ModelGCN", [1, [128, 128], "relu"]),
            Param("ModelGAT", [1, [128, 128], "relu"]),
            Param("ModelGAT4", [1, [128, 128], "relu"]),
            Param("ModelGCN", [1, [256, 256], "relu"]),
            Param("ModelGAT", [1, [256, 256], "relu"]),
            Param("ModelGAT4", [1, [256, 256], "relu"]),
            Param("ModelGCN", [2, [16, 16, 16], "relu"]),
            Param("ModelGCN", [2, [32, 32, 32], "relu"]),
            Param("ModelGCN", [2, [64, 64, 64], "relu"]),
            Param("ModelGCN", [2, [128, 128, 128], "relu"]),
            Param("ModelGCN", [2, [256, 256, 256], "relu"]),
            Param("ModelGCN", [3, [16, 16, 16, 16], "relu"]),
            Param("ModelGCN", [3, [32, 32, 32, 32], "relu"]),
            Param("ModelGCN", [3, [64, 64, 64, 64], "relu"]),
            Param("ModelGCN", [3, [128, 128, 128, 128], "relu"]),
            Param("ModelGCN", [3, [256, 256, 256, 256], "relu"]),
        ]

        logger_killed_model_process = [True for _ in range(max_num_parallel)]
        params_running = [None for _ in range(max_num_parallel)]
        while True:
            for i in range(max_num_parallel):
                if time.time() - start_time >= time_budget - 5:
                    break
                if not is_subprocess_alive(pid_model[i]):
                    if logger_killed_model_process[i]:
                        LOGGER.info(
                            "Model process {0} has been killed".format(i))
                        if params_running[i]:
                            params_running[i].running = False
                            params_running[
                                i].retry = params_running[i].retry - 1
                        logger_killed_model_process[i] = False
                if os.path.exists(file_path("AOE_MODEL_{0}.result".format(i))):
                    with FileLock(file_path("AOE_MODEL_{0}.lock".format(i))):
                        temp_result = load_data(
                            file_path("AOE_MODEL_{0}.result".format(i)))
                        if temp_result.result is None:
                            params_running[i].running = False
                            params_running[
                                i].retry = params_running[i].retry - 1
                            os.remove(
                                file_path(
                                    file_path(
                                        "AOE_MODEL_{0}.result".format(i))))
                            LOGGER.info("Result of Model {0} is None".format(
                                params_running[i].index))
                        else:
                            params[
                                params_running[i].index].result = temp_result
                            os.remove(
                                file_path(
                                    file_path(
                                        "AOE_MODEL_{0}.result".format(i))))
                            LOGGER.info(
                                "Get result of Model {0}, {1}, {2}, {3}, {4}".
                                format(
                                    params_running[i].index,
                                    "loss_train = {0:.6f}".format(
                                        params_running[i].result.loss_train),
                                    "loss_valid = {0:.6f}".format(
                                        params_running[i].result.loss_valid),
                                    "acc_train = {0:.6f}".format(
                                        params_running[i].result.acc_train),
                                    "acc_valid = {0:.6f}".format(
                                        params_running[i].result.acc_valid)))
                if not os.path.exists(
                        file_path("AOE_MODEL_{0}.param".format(
                            i))) and not os.path.exists(
                                file_path("AOE_MODEL_{0}.result".format(i))):
                    with FileLock(file_path("AOE_MODEL_{0}.lock".format(i))):
                        for params_index in range(len(params)):
                            if not params[params_index].running and params[
                                    params_index].retry > 0:
                                params[params_index].index = params_index
                                params[params_index].running = True
                                params[
                                    params_index].time_budget = time_budget - (
                                        time.time() - start_time)
                                params_running[i] = params[params_index]
                                save_data(
                                    file_path("AOE_MODEL_{0}.param".format(i)),
                                    params[params_index])
                                LOGGER.info(
                                    "Start Model {0}".format(params_index))
                                break
            if time.time() - start_time >= time_budget - 5:
                break
            if_continue = False
            for i in range(len(params)):
                if params[i].result is None:
                    if_continue = True
                    break
            if not if_continue:
                break

        os.system(
            "kill -9 `ps -ef | grep AutoGraphModel.py | awk '{print $2}' `")
        LOGGER.info("Start merge the result")
        params_result = []
        for i in range(len(params)):
            if params[i].result is not None:
                params_result.append(params[i])
        LOGGER.info("Num of result: {0}".format(len(params_result)))
        for i in range(len(params_result)):
            for j in range(i + 1, len(params_result)):
                if params_result[i].result.acc_valid > params_result[
                        j].result.acc_valid:
                    params_result[i], params_result[j] = params_result[
                        j], params_result[i]
        params_result = params_result[-4:]
        # 下面这段话?
        # params_result.reverse()
        # for i in range(1, len(params_result)):
        #     if params_result[i].result.acc_valid + 0.01 < params_result[0].result.acc_valid:
        #         params_result = params_result[0:i]
        #         break
        # params_result.reverse()
        # 上面这段话?
        for param in params_result:
            LOGGER.info("Final Model {0} {1}".format(param.index, param.model))
        result = [item.result.result for item in params_result]

        # ensemble
        torch.backends.cudnn.deterministic = True
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        model = ModelEnsemble(num_features=data.num_features,
                              num_class=data.num_class)
        data.split_train_valid()
        model = model.to(device)
        mask_train, mask_valid, mask_test, y = data.mask_train, data.mask_valid, data.mask_test, data.y
        mask_train = mask_train.to(device)
        mask_valid = mask_valid.to(device)
        mask_test = mask_test.to(device)
        y = y.to(device)
        for i in range(len(result)):
            result[i] = result[i].to(device)
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=0.005,
                                     weight_decay=5e-4)

        epoch = 1
        best_loss_train = float("inf")
        best_loss_valid = float("inf")
        best_result = None
        best_epoch = 0
        while best_epoch + 10 >= epoch:
            model.train()
            optimizer.zero_grad()
            predict = model(result)
            loss_train = nll_loss(predict[mask_train], y[mask_train])
            loss_valid = nll_loss(predict[mask_valid], y[mask_valid])
            loss_train.backward()
            optimizer.step()
            if loss_valid < best_loss_valid:
                best_loss_train = loss_train
                best_loss_valid = loss_valid
                best_result = predict
                best_epoch = epoch
            epoch += 1
        LOGGER.info("Finish merge the result")
        return best_result[mask_test].max(1)[1].cpu().numpy().flatten()
 def save(self):
     """
         Save perspetive point fixes
     """
     tools.save_data(self.pitch, self.points,
                     'calibrations/perspective.json')
Esempio n. 8
0
def run_experiment(arglist):

    # Get the experiment parameters
    p = tools.Params("context_dmc")
    p.set_by_cmdline(arglist)

    # Open up the stimulus window
    calib.monitorFolder = "./calib"
    mon = calib.Monitor(p.monitor_name)
    m = tools.WindowInfo(p, mon)
    win = visual.Window(**m.window_kwargs)

    # Set up the stimulus objects
    fix = visual.PatchStim(win, tex=None, mask="circle",
                       color=p.fix_color, size=p.fix_size)
    r_fix = visual.PatchStim(win, tex=None, mask="circle",
                             color=p.fix_resp_color, size=p.fix_size)
    grate = visual.PatchStim(win, "sin", p.stim_mask, sf=p.stim_sf,
                             size=p.stim_size, contrast=p.stim_contrast)
    color = visual.PatchStim(win, None, p.stim_mask,
                             size=p.stim_size, opacity=.4)
    disk = visual.PatchStim(win, tex=None, mask="gauss",
                            color=win.color, size=p.stim_size / 6)
    stims = [grate, color, disk, fix]

    # Set up the cue stimuli
    color_text = visual.TextStim(win, text="color")
    orient_text = visual.TextStim(win, text="orient")
    cue_stims = dict(color=color_text, orient=orient_text)

    # Get the schedule for this run
    sched_file = "schedules/run_%02d.csv" % p.run
    s = pandas.read_csv(sched_file)

    # Convenience map
    context_map = ["color", "orient"]

    # Draw the instructions and wait to go
    instruct = dedent("""
    Say whether the two stimuli in each trial match
    on the relevant category dimension

    1 = match                          2 = nonmatch

    Experimenter: press space to begin""")
    tools.WaitText(win, instruct, height=.7)(check_keys=["space"])

    # Possibly wait for the scanner
    if p.fmri:
        tools.wait_for_trigger(win, p)

    # Start a data file and write the params to it
    f, fname = tools.start_data_file(p.subject, p.experiment_name, p.run)
    p.to_text_header(f)

    # Save the params to json
    save_name = op.join("./data", op.splitext(fname)[0])
    p.to_json(save_name)

    # Write the datafile header
    header = ["trial", "context", "match",
              "samp_color", "samp_orient",
              "samp_color_cat", "samp_orient_cat",
              "targ_color", "targ_orient",
              "targ_color_cat", "targ_orient_cat",
              "cue_time", "block_time",
              "psi_time", "isi_time", "iti_time",
              "response", "rt", "acc"]
    tools.save_data(f, *header)

    # Start a clock and flush the event buffer
    total_time = 0
    exp_clock = core.Clock()
    trial_clock = core.Clock()
    block_clock = core.Clock()
    event.clearEvents()

    # Main experiment loop
    # --------------------
    try:

        # Dummy scans
        fix.draw()
        win.flip()
        dummy_secs = p.dummy_trs * p.tr
        total_time += dummy_secs
        wait_check_quit(dummy_secs, p.quit_keys)

        for t in s.trial:

            # Get this trial's context
            context = context_map[s.context[t]]

            # Get this trial's timing info
            psi_secs = s.psi_tr[t] * p.tr
            isi_secs = p.stim_sfix_dur + (s.isi_tr[t] * p.tr)
            iti_secs = s.iti_tr[t] * p.tr
            block_time = (p.cue_dur + psi_secs +
                          p.stim_samp_dur + isi_secs +
                          p.stim_targ_dur + p.resp_dur + iti_secs)
            total_time += block_time

            # Cue period
            cue_stims[context].draw()
            win.flip()
            cue_time = exp_clock.getTime()
            block_clock.reset()
            core.wait(p.cue_dur)

            # Pre-stim fixation (PSI)
            fix.draw()
            win.flip()
            wait_check_quit(psi_secs, p.quit_keys)

            # Sample stimulus
            a_cat = s.attend_cat[t]
            a_exemp = s.attend_exemp[t]

            i_cat = s.ignore_cat[t]
            i_exemp = s.ignore_exemp[t]

            c_cat = a_cat if context == "color" else i_cat
            o_cat = a_cat if context == "orient" else i_cat
            c_exemp = a_exemp if context == "color" else i_exemp
            o_exemp = a_exemp if context == "orient" else i_exemp

            if context == "color":
                samp_color = p.cat_colors[a_cat][a_exemp]
                samp_orient = p.cat_orients[i_cat][i_exemp]
            else:
                samp_color = p.cat_colors[i_cat][i_exemp]
                samp_orient = p.cat_orients[a_cat][a_exemp]

            color.setColor(samp_color)
            grate.setOri(samp_orient)

            draw_all(*stims)
            win.flip()
            core.wait(p.stim_samp_dur)

            # Post stim fix and ISI
            fix.draw()
            win.flip()
            wait_check_quit(isi_secs, p.quit_keys)

            # Target stimulus
            match = s.match[t]
            idx2 = randint(2)
            idx3 = randint(3)
            if match:
                if context == "color":
                    t_c_cat = a_cat
                    t_o_cat = idx2
                elif context == "orient":
                    t_c_cat = idx2
                    t_o_cat = a_cat
            else:
                if context == "color":
                    t_c_cat = int(not a_cat)
                    t_o_cat = idx2
                elif context == "orient":
                    t_c_cat = idx2
                    t_o_cat = int(not a_cat)
            t_c_exemp, t_o_exemp = idx3, idx3
            targ_color = p.cat_colors[t_c_cat][t_c_exemp]
            targ_orient = p.cat_orients[t_o_cat][t_o_exemp]

            color.setColor(targ_color)
            grate.setOri(targ_orient)

            draw_all(*stims)
            win.flip()
            core.wait(p.stim_targ_dur)

            # Response
            r_fix.draw()
            trial_clock.reset()
            event.clearEvents()
            win.flip()
            core.wait(p.resp_dur)

            # Collect the response
            corr, resp, resp_rt = collect_response(p, trial_clock, match)

            # ITI interval
            # Go by screen refreshes for precise timing
            iti_time = total_time - exp_clock.getTime()
            iti_frames = int(iti_time * 60) # 60 Hz monitor
            for frame in xrange(iti_frames):
                fix.draw()
                win.flip()

            # Possibly check for late response
            if resp == -1:
                corr, resp, resp_rt = collect_response(p, trial_clock, match)
            else:
                check_quit()

            # Write out the trial data
            t_data = [t, context, match,
                      c_exemp, o_exemp,
                      c_cat, o_cat,
                      t_c_exemp, t_o_exemp,
                      t_c_cat, t_o_cat,
                      cue_time, block_time,
                      psi_secs, isi_secs, iti_secs,
                      resp, resp_rt, corr]
            tools.save_data(f, *t_data)

    finally:
        # Clean up
        f.close()
        win.close()

    # Calculate some performance data and print it to the screen
    data = csv2rec(op.join("data", fname))
    accuracy = data["acc"].mean()
    rt = data["rt"][data["rt"] > 0].mean()
    missed = (data["response"] == -1).sum()

    print "Run: %d" % p.run
    print "Accuracy: %.2f" % accuracy
    print "Mean RT: %.4f" % rt
    print "Missed responses: %d" % missed