def initializeMainGui():
    g.init('gui/MotilityTracking.ui')
    g.m.trackView = Window(np.zeros((3, 3, 3)))
    g.m.histogram = Histogram(title='Mean Single Lag Distance Histogram', labels={'left': 'Count', 'bottom': 'Mean SLD Per Track (Pixels)'})
    g.m.trackPlot = TrackPlot()
    g.m.trackPlot.tracksChanged.connect(update_plots)
    g.m.trackView.imageview.addItem(g.m.trackPlot)
    g.m.trackView.imported = pg.ScatterPlotItem()
    g.m.trackView.imageview.addItem(g.m.trackView.imported)

    g.m.actionImportBin.triggered.connect(lambda : open_file_gui(lambda f: import_mat(open_bin(f)),  prompt='Import .bin file of tracks', filetypes='*.bin'))
    g.m.actionImportBackground.triggered.connect(lambda : open_file_gui(set_background_image,  prompt='Select tif file as background image', filetypes='*.tif *.tiff *.stk'))
    g.m.actionImportCoordinates.triggered.connect(lambda : open_file_gui(import_coords, prompt='Import coordinates from txt file', filetypes='*.txt'))
    g.m.actionSimulateDistances.triggered.connect(lambda : save_file_gui(simulate_distances, prompt = 'Save simulated distances', filetypes='*.txt'))
    g.m.actionExportMSD.triggered.connect(lambda : save_file_gui(exportMSD, prompt='Export Mean Squared Displacement Values', filetypes='*.txt'))
    g.m.actionExportHistogram.triggered.connect(lambda : save_file_gui(g.m.histogram.export, prompt='Export Histogram Values', filetypes='*.txt'))
    g.m.actionExportTrackLengths.triggered.connect(lambda : save_file_gui(export_track_lengths, prompt='Export Track Lengths', filetypes='*.txt'))
    g.m.actionExportOutlined.triggered.connect(g.m.trackPlot.export_gui)
    g.m.actionExportDistances.triggered.connect(lambda : save_file_gui(export_real_distances,  prompt='Export Distances', filetypes='*.txt'))
    
    g.m.MSLDMinSpin.setOpts(value=0, decimals=2, maximum=1000)
    g.m.MSLDMaxSpin.setOpts(value=100, decimals=2, maximum=1000)
    g.m.neighborsSpin.setOpts(value=0, maximum=100, int=True, step=1)
    g.m.neighborDistanceSpin.setOpts(value=1, decimals=2, maximum=100)
    g.m.minLengthSpin.setOpts(value=4, maximum=1000, int=True, step=1)
    g.m.maxLengthSpin.setOpts(value=20, maximum=1000, int=True, step=1)

    g.m.MSLDGroupBox.toggled.connect(g.m.trackPlot.filter)
    g.m.additionalGroupBox.toggled.connect(g.m.trackPlot.filter)
    g.m.MSLDMaxSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.MSLDMinSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.neighborDistanceSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.neighborsSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.minLengthSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.maxLengthSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.hideBackgroundCheck.toggled.connect(lambda v: g.m.trackView.imageview.getImageItem().setVisible(not v))
    g.m.ignoreOutsideCheck.toggled.connect(g.m.trackPlot.filter)
    g.m.plotMeansCheck.toggled.connect(g.m.trackPlot.means.setVisible)
    g.m.plotTracksCheck.toggled.connect(g.m.trackPlot.tracks.setVisible)

    g.m.viewTab.layout().insertWidget(0, g.m.trackView)

    g.m.MSDWidget = pg.PlotWidget(title='Mean Squared Displacement Per Lag', labels={'left': 'Mean Squared Disance (p^2)', 'bottom': 'Lag Count'})
    g.m.analysisTab.layout().addWidget(g.m.MSDWidget)
    
    g.m.analysisTab.layout().addWidget(g.m.histogram)

    g.m.CDFWidget = CDFWidget()#pg.PlotWidget(title = 'Cumulative Distribution Function', labels={'left': 'Cumulative Probability', 'bottom': 'Single Lag Displacement Squared'})
    #g.m.CDFPlot = pg.PlotCurveItem()
    g.m.cdfTab.layout().addWidget(g.m.CDFWidget)

    g.m.installEventFilter(mainWindowEventEater)
    g.m.setWindowTitle('Motility Tracking')
    g.m.show()
Beispiel #2
0
# Main class to start the game
import pygame
import random
from dungeonGen import dMap
from items import Block
from player_monster import PlayerMonster
import global_vars

global_vars.init()

PLAYER_SPEED = 3

# Pygame start
pygame.init()

screen_width = 800
screen_height = 600

screen = pygame.display.set_mode([screen_width, screen_height])

# This is a list of every sprite in the game
all_sprites_list = pygame.sprite.Group()

# items we can't pass through
all_items_list = pygame.sprite.Group()

# list of all the blocks in the game (for the dungeon)
block_list = pygame.sprite.Group()

actor_list = pygame.sprite.Group()
def main():
    """Main function"""
    # construct the argument parse and parse the arguments

    # init global variables
    g.init()

    from optparse import OptionParser

    # Configure command line options
    parser = OptionParser()
    parser.add_option("-i", "--input_file", dest="input_file",
                      help="Input video/image file")
    parser.add_option("-o", "--output_file", dest="output_file",
                      help="Output (destination) video/image file")
    parser.add_option("-m", "--method", dest="method", default='m1',
                      help="m1 -> for method 1 - m2 -> for method 2 ...")
    parser.add_option("-v", "--video", dest="video_mode", default='cv2',
                      help="video_mode choice cv2 or movepi")
    parser.add_option("-P", "--preview_only",
                      action="store_true", default=False,
                      help="preview annoted video/image")
    parser.add_option("-I", "--image_only",
                      action="store_true", dest="image_only", default=False,
                      help="Annotate image (defaults to annotating video)")
    parser.add_option("-T", "--trackbars",
                      dest="TRACKBARS", action="store_true", default=False,
                      help="Enable debug trackbars.")

    # Get and parse command line options
    options, args = parser.parse_args()
    print ('options:',options)
    print ('args:',args)

    input_file = options.input_file
    output_file = options.output_file
    image_only = options.image_only
    preview_only = options.preview_only
    video_mode = options.video_mode
    method = options.method
    g.trackbar_enabled = options.TRACKBARS

    if preview_only:
        if video_mode == 'movepi':
            if method == 'm1':
                annotate_video_preview_1(input_file)
            if method == 'm2':
                annotate_video_preview_2(input_file)
            if method == 'm3':
                annotate_video_preview_3(input_file)
            if method == 'm4':
                annotate_video_preview_4(input_file)
            if method == 'm5':
                remove_shadows_video_preview(input_file)
        if video_mode == 'cv2':
            if method == 'm1':
                lane_detector_method_1(input_file)
            if method == 'm2':
                lane_detector_method_2(input_file)
            if method == 'm3':
                lane_detector_method_3(input_file)
            if method == 'm4':
                lane_detector_method_4(input_file)
            if method == 'm5':
                remove_shadows(input_file)
        sys.exit()

    if image_only:
        if method == 'm1':
            annotate_image_1(input_file, output_file)
        if method == 'm2':
            annotate_image_2(input_file, output_file)
        if method == 'm3':
            annotate_image_3(input_file, output_file)
        if method == 'm4':
            annotate_image_4(input_file, output_file)
        if method == 'm5':
            remove_shadows_image(input_file, output_file)
    else:
        if method == 'm1':
            annotate_video_1(input_file, output_file)
        if method == 'm2':
            annotate_video_2(input_file, output_file)
        if method == 'm3':
            annotate_video_3(input_file, output_file)
        if method == 'm4':
            annotate_video_4(input_file, output_file)
        if method == 'm5':
            remove_shadows_video(input_file, output_file)
        if args.mal:
            return_dict['mal_suc_count'] = 0
        for t in range(args.T):
            if not os.path.exists(gv.dir_name + 'global_weights_t%s.npy' % t):
                print('No directory found for iteration %s' % t)
                break
            if args.mal:
                # p_eval = Process(target=eval_func, args=(
                #     X_test, Y_test_uncat, t, return_dict, mal_data_X, mal_data_Y))
                eval_func(X_test, Y_test_uncat, t, return_dict, mal_data_X,
                          mal_data_Y)
            else:
                # p_eval = Process(target=eval_func, args=(
                #        X_test, Y_test_uncat, t, return_dict))
                eval_func(X_test, Y_test_uncat, t, return_dict)

            # p_eval.start()
            # p_eval.join()

        if args.mal:
            print('Malicious agent succeeded in %s of %s iterations' %
                  (return_dict['mal_suc_count'], (t - 1) * args.mal_num))


if __name__ == "__main__":
    gv.init()
    tf.set_random_seed(777)
    np.random.seed(777)
    args = gv.args
    main()
Beispiel #5
0
def agent(i, X_shard, Y_shard, t, gpu_id, return_dict, X_test, Y_test, lr=None):
    tf.keras.backend.set_learning_phase(1)

    args = gv.init()
    if lr is None:
        lr = args.eta
    print('Agent %s on GPU %s' % (i,gpu_id))
    # set environment
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    shared_weights = np.load(gv.dir_name + 'global_weights_t%s.npy' % t, allow_pickle=True)
    shard_size = len(X_shard)

    if 'theta{}'.format(gv.mal_agent_index) in return_dict.keys():
        pre_theta = return_dict['theta{}'.format(gv.mal_agent_index)]
    else:
        pre_theta = None

    # if i == 0:
    #     # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
    #     eval_success, eval_loss = eval_minimal(X_test,Y_test,shared_weights)
    #     print('Global success at time {}: {}, loss {}'.format(t,eval_success,eval_loss))

    if args.steps is not None:
        num_steps = args.steps
    else:
        num_steps = int(args.E * shard_size / args.B)

    # with tf.device('/gpu:'+str(gpu_id)):
    if args.dataset == 'census':
        x = tf.placeholder(shape=(None,
                                            gv.DATA_DIM), dtype=tf.float32)
        # y = tf.placeholder(dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)
    else:
        x = tf.placeholder(shape=(None,
                                            gv.IMAGE_ROWS,
                                            gv.IMAGE_COLS,
                                            gv.NUM_CHANNELS), dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)

    if 'MNIST' in args.dataset:
        agent_model = model_mnist(type=args.model_num)
    elif args.dataset == 'census':
        agent_model = census_model_1()
    elif args.dataset == 'CIFAR-10':
        agent_model = cifar10_model()
    else:
        return

    logits = agent_model(x)

    if args.dataset == 'census':
        # loss = tf.nn.sigmoid_cross_entropy_with_logits(
        #     labels=y, logits=logits)
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits))
    else:
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits))
    prediction = tf.nn.softmax(logits)

    if args.optimizer == 'adam':
        optimizer = tf.train.AdamOptimizer(
            learning_rate=lr).minimize(loss)
    elif args.optimizer == 'sgd':
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=lr).minimize(loss)

    if args.k > 1:
        config = tf.ConfigProto(gpu_options=gv.gpu_options)
        # config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
    elif args.k == 1:
        sess = tf.Session()
    else:
        return
    tf.compat.v1.keras.backend.set_session(sess)
    sess.run(tf.global_variables_initializer())

    if pre_theta is not None:
        theta = pre_theta - gv.moving_rate * (pre_theta - shared_weights)
    else:
        theta = shared_weights
    agent_model.set_weights(theta)
    # print('loaded shared weights')

    start_offset = 0
    if args.steps is not None:
        start_offset = (t * args.B * args.steps) % (shard_size - args.B)

    for step in range(num_steps):
        offset = (start_offset + step * args.B) % (shard_size - args.B)
        X_batch = X_shard[offset: (offset + args.B)]
        Y_batch = Y_shard[offset: (offset + args.B)]
        Y_batch_uncat = np.argmax(Y_batch, axis=1)
        _, loss_val = sess.run([optimizer, loss], feed_dict={x: X_batch, y: Y_batch_uncat})
        if step % 1000 == 0:
            print('Agent %s, Step %s, Loss %s, offset %s' % (i, step, loss_val, offset))
            # local_weights = agent_model.get_weights()
            # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
            # print('Agent {}, Step {}: success {}, loss {}'.format(i,step,eval_success,eval_loss))

    local_weights = agent_model.get_weights()
    local_delta = local_weights - shared_weights

    # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
    eval_success, eval_loss = eval_minimal(X_test, Y_test, local_weights)

    print('Agent {}: success {}, loss {}'.format(i, eval_success, eval_loss))

    return_dict[str(i)] = np.array(local_delta)
    return_dict["theta{}".format(i)] = np.array(local_weights)

    np.save(gv.dir_name + 'ben_delta_%s_t%s.npy' % (i, t), local_delta)

    return
import global_vars

global_vars.init()
b = global_vars.cvar.b
if b != "string b":
    raise RuntimeError("Unexpected string: " + b)
global_vars.cvar.b = "a string value"
b = global_vars.cvar.b
if b != "a string value":
    raise RuntimeError("Unexpected string: " + b)

x = global_vars.cvar.x
if x != 1234:
    raise RuntimeError("Unexpected x: " + str(x))
global_vars.cvar.x = 9876
x = global_vars.cvar.x
if x != 9876:
    raise RuntimeError("Unexpected string: " + str(x))

fail = True
try:
    global_vars.cvar.notexist = "something"
except AttributeError, e:
    fail = False
if fail:
    raise RuntimeError("AttributeError should have been thrown")

fail = True
try:
    g = global_vars.cvar.notexist
except AttributeError, e:
Beispiel #7
0
            elif selection == 1:
                p_name = input("Select a Party name: ")
                try:
                    assert p_name + "_party.json" not in listFiles('.')

                    obj_party = party.Party(p_name)

                    while True:
                        print("Let's create our Party Members now.")
                        try:
                            m_cnt = input(
                                "How many Heroes would you like to add? ")
                            m_cnt = int(m_cnt)
                            for cnt in range(1, m_cnt + 1):
                                print(cnt)
                        except Exception as err:
                            print("[Exception] :: %s" % err)
                            pass
                        break
                    printJson(obj_party)
                except AssertionError:
                    print("That Party Name already exists!!! Start again...")
                    pass

        else:
            print('Invalid selection')


gv.init()  # call only once
createParty()
        return_dict = manager.dict()
        return_dict['eval_success'] = 0.0
        return_dict['eval_loss'] = 0.0
        if args.mal:
            return_dict['mal_suc_count'] = 0
        for t in range(args.T):
            if not os.path.exists(gv.dir_name + 'global_weights_t%s.npy' % t):
                print('No directory found for iteration %s' % t)
                break
            if args.mal:
                p_eval = Process(target=eval_func,
                                 args=(X_test, Y_test_uncat, t, return_dict,
                                       mal_data_X, mal_data_Y))
            else:
                p_eval = Process(target=eval_func,
                                 args=(X_test, Y_test_uncat, t, return_dict))

            p_eval.start()
            p_eval.join()

        if args.mal:
            print('Malicious agent succeeded in %s of %s iterations' %
                  (return_dict['mal_suc_count'], (t - 1) * args.mal_num))


if __name__ == "__main__":
    args = gv.init()
    tf.set_random_seed(777)
    np.random.seed(777)
    main(args)
Beispiel #9
0
                                    passwd=passwd)
    crawler.crawl(spider)
    reactor_control.add_crawler()
    crawler.start()
    log.msg('Crawler %d started...' % index, log.INFO)


if __name__ == '__main__':
    import sys
    if len(sys.argv) < 2:
        print 'python %s <uid-list-path>' % sys.argv[1]
        sys.exit(0)

    uid_list_path = sys.argv[1]
    # 初始化全局变量
    global_vars.init(uid_list_path)

    spider_name = 'userinfo'

    reactor_control = ReactorControl()
    log.start(loglevel=log.DEBUG, logstdout=False)
    settings = get_project_settings()
    #crawler = Crawler(settings)
    # 从配置文件中获取微博账户信息
    weibo_accounts = settings.getlist('WEIBO_USER_ACCOUNTS')

    for i in range(len(weibo_accounts)):
        setup_crawler(spider_name, i, weibo_accounts[i]['username'],
                      weibo_accounts[i]['passwd'])

    reactor.run()
Beispiel #10
0
    if g.combined_filter_type == 'MAG_DIR_HLS_HSV':
        combined[(((mag_bin == 1) & (dir_bin == 1))) | ((hls_bin == 1) |
                                                        (hsv_bin == 1))] = 1

    if g.combined_filter_type == 'DEFAULT' or g.combined_filter_type == 'SOBEL_MAG_DIR_HLS_HSV':
        combined[(abs_bin == 1 | ((mag_bin == 1) & (dir_bin == 1))) |
                 ((hls_bin == 1) | (hsv_bin == 1))] = 1

    return combined, abs_bin, mag_bin, dir_bin, hls_bin, hsv_bin  # DEBUG


if __name__ == '__main__':
    img_file = 'test_images/straight_lines1.jpg'
    img_file = 'test_images/test5.jpg'

    g.init()
    img = mpimg.imread(img_file)

    img = cv2.undistort(img, g.mtx, g.dist, None, g.mtx)

    combined, abs_bin, mag_bin, dir_bin, hls_bin, hsv_bin = combined_thresh(
        img)

    plt.subplot(2, 3, 1)
    plt.imshow(abs_bin, cmap='gray', vmin=0, vmax=1)
    plt.subplot(2, 3, 2)
    plt.imshow(mag_bin, cmap='gray', vmin=0, vmax=1)
    plt.subplot(2, 3, 3)
    plt.imshow(dir_bin, cmap='gray', vmin=0, vmax=1)
    plt.subplot(2, 3, 4)
    plt.imshow(hls_bin, cmap='gray', vmin=0, vmax=1)
def mal_agent(X_shard, Y_shard, mal_data_X, mal_data_Y, t, gpu_id, return_dict,
              mal_visible, X_test, Y_test):
    args = gv.init()

    shared_weights = np.load(gv.dir_name + 'global_weights_t%s.npy' % t,
                             allow_pickle=True)
    if 'theta{}'.format(gv.mal_agent_index) in return_dict.keys():
        pre_theta = return_dict['theta{}'.format(gv.mal_agent_index)]
    else:
        pre_theta = None

    holdoff_flag = 0
    if 'holdoff' in args.mal_strat:
        print('Checking holdoff')
        if 'single' in args.mal_obj:
            target, target_conf, actual, actual_conf = mal_eval_single(
                mal_data_X, mal_data_Y, shared_weights)
            if target_conf == 1:
                print('Holding off')
                holdoff_flag = 1

    # tf.reset_default_graph()

    tf.keras.backend.set_learning_phase(1)

    print('Malicious Agent on GPU %s' % gpu_id)
    # set environment
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    if args.dataset == 'census':
        x = tf.placeholder(shape=(None, gv.DATA_DIM), dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)
    else:
        x = tf.placeholder(shape=(None, gv.IMAGE_ROWS, gv.IMAGE_COLS,
                                  gv.NUM_CHANNELS),
                           dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)

    if 'MNIST' in args.dataset:
        agent_model = model_mnist(type=args.model_num)
    elif args.dataset == 'CIFAR-10':
        agent_model = cifar10_model()
    elif args.dataset == 'census':
        agent_model = census_model_1()
    else:
        return

    logits = agent_model(x)
    prediction = tf.nn.softmax(logits)
    eval_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                       logits=logits))

    config = tf.ConfigProto(gpu_options=gv.gpu_options)
    # config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    tf.keras.backend.set_session(sess)

    if t >= args.mal_delay and holdoff_flag == 0:
        if args.mal_obj == 'all':
            final_delta = mal_all_algs(x, y, logits, agent_model,
                                       shared_weights, sess, mal_data_X,
                                       mal_data_Y, t)
        elif args.mal_obj == 'single' or 'multiple' in args.mal_obj:
            final_delta, penul_delta = mal_single_algs(
                x, y, logits, agent_model, shared_weights, sess, mal_data_X,
                mal_data_Y, t, mal_visible, X_shard, Y_shard, pre_theta)
        else:
            return
    elif t < args.mal_delay or holdoff_flag == 1:
        print('Delay/Hold-off')
        final_delta, _ = benign_train(x, y, agent_model, logits, X_shard,
                                      Y_shard, sess, shared_weights)
    else:
        return

    final_weights = shared_weights + final_delta
    agent_model.set_weights(final_weights)

    print('---Eval at mal agent---')
    if 'single' in args.mal_obj:
        target, target_conf, actual, actual_conf = mal_eval_single(
            mal_data_X, mal_data_Y, final_weights)
        print(
            'Target:%s with conf. %s, Curr_pred on malicious model for iter %s:%s with conf. %s'
            % (target, target_conf, t, actual, actual_conf))
    elif 'multiple' in args.mal_obj:
        suc_count_local = mal_eval_multiple(mal_data_X, mal_data_Y,
                                            final_weights)
        print('%s of %s targets achieved' % (suc_count_local, args.mal_num))

    eval_success, eval_loss = eval_minimal(X_test, Y_test, final_weights)
    return_dict['mal_success'] = eval_success
    print('Malicious Agent: success {}, loss {}'.format(
        eval_success, eval_loss))
    write_dict = dict()
    # just to maintain ordering
    write_dict['t'] = t + 1
    write_dict['eval_success'] = eval_success
    write_dict['eval_loss'] = eval_loss
    file_write(write_dict, purpose='mal_eval_loss')

    return_dict[str(gv.mal_agent_index)] = np.array(final_delta)
    return_dict["theta{}".format(gv.mal_agent_index)] = np.array(final_weights)
    np.save(gv.dir_name + 'mal_delta_t%s.npy' % t, final_delta)

    if 'auto' in args.mal_strat or 'multiple' in args.mal_obj:
        penul_weights = shared_weights + penul_delta
        if 'single' in args.mal_obj:
            target, target_conf, actual, actual_conf = mal_eval_single(
                mal_data_X, mal_data_Y, penul_weights)
            print(
                'Penul weights ---- Target:%s with conf. %s, Curr_pred on malicious model for iter %s:%s with conf. %s'
                % (target, target_conf, t, actual, actual_conf))
        elif 'multiple' in args.mal_obj:
            suc_count_local = mal_eval_multiple(mal_data_X, mal_data_Y,
                                                penul_weights)
            print('%s of %s targets achieved' %
                  (suc_count_local, args.mal_num))

        eval_success, eval_loss = eval_minimal(X_test, Y_test, penul_weights)
        print('Penul weights ---- Malicious Agent: success {}, loss {}'.format(
            eval_success, eval_loss))

    return
def mal_single_algs(x, y, logits, agent_model, shared_weights, sess,
                    mal_data_X, mal_data_Y, t, mal_visible, X_shard, Y_shard,
                    pre_theta):
    # alg_num = 2
    args = gv.init()

    alpha_m = 1.0 / args.k

    print(mal_visible)

    if args.gar == 'avg':
        delta_other_prev = est_accuracy(mal_visible, t)

    if pre_theta is None:
        start_weights = shared_weights
        constrain_weights = shared_weights
    else:
        start_weights = pre_theta - gv.moving_rate * (pre_theta -
                                                      shared_weights)
        constrain_weights = pre_theta - gv.moving_rate * (pre_theta -
                                                          shared_weights)

    if len(mal_visible) >= 1 and 'prev_1' in args.mal_strat:
        # Starting with weights that account for other agents
        start_weights = shared_weights + delta_other_prev
        print('Alg 1: Adding benign estimate')

    if 'dist' in args.mal_strat:
        if 'dist_oth' in args.mal_strat and t >= 1:
            constrain_weights = start_weights + delta_other_prev
        else:
            final_delta, _ = benign_train(x, y, agent_model, logits, X_shard,
                                          Y_shard, sess, shared_weights)
            constrain_weights = start_weights + final_delta
            tf.keras.backend.set_session(sess)
    elif 'add_ben' in args.mal_strat:
        ben_delta, loss_val_shard = benign_train(x, y, agent_model, logits,
                                                 X_shard, Y_shard, sess,
                                                 shared_weights)
    elif 'unlimited' in args.mal_strat:
        ben_delta, loss_val_shard = benign_train(x, y, agent_model, logits,
                                                 X_shard, Y_shard, sess,
                                                 shared_weights)

    loss1 = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                       logits=logits))

    mal_loss1 = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                       logits=logits))

    prediction = tf.nn.softmax(logits)

    if 'dist' in args.mal_strat:
        # Adding weight based regularization
        loss, loss2, mal_loss = weight_constrain(loss1, mal_loss1, agent_model,
                                                 constrain_weights, t)
    else:
        loss = loss1
        mal_loss = mal_loss1
        loss2 = None
        weights_pl = None

    if 'adam' in args.optimizer:
        optimizer = tf.train.AdamOptimizer(
            learning_rate=args.eta).minimize(loss)
        mal_optimizer = tf.train.AdamOptimizer(
            learning_rate=args.eta).minimize(mal_loss)
    elif 'sgd' in args.optimizer:
        mal_optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=args.eta).minimize(mal_loss)
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=args.eta).minimize(loss)

    sess.run(tf.global_variables_initializer())

    if pre_theta is None:
        agent_model.set_weights(shared_weights)
    else:
        theta = pre_theta - gv.moving_rate * (pre_theta - shared_weights)
        agent_model.set_weights(theta)

    print('loaded shared weights for malicious agent')

    mal_data_Y = mal_data_Y.reshape((len(mal_data_Y), ))
    shard_size = len(X_shard)
    delta_mal = []
    for l in range(len(start_weights)):
        layer_shape = start_weights[l].shape
        delta_mal.append(np.zeros(shape=layer_shape))
    # Not including training loss
    if 'train' not in args.mal_strat:
        num_mal_epochs = args.mal_E
        step = 0
        mal_loss_val = 100
        while mal_loss_val > 1e-6 or step < num_mal_epochs:
            step_weight_start = np.array(agent_model.get_weights())
            sess.run(mal_optimizer, feed_dict={x: mal_data_X, y: mal_data_Y})
            if 'auto' in args.mal_strat:
                step_weight_end = agent_model.get_weights()
                delta_mal += (1 / args.mal_boost) * (step_weight_end -
                                                     step_weight_start)
                agent_model.set_weights(start_weights +
                                        (1 / args.mal_boost) * delta_mal)
            if step % 100 == 0:
                mal_obj_pred, mal_loss_val = sess.run([prediction, mal_loss],
                                                      feed_dict={
                                                          x: mal_data_X,
                                                          y: mal_data_Y
                                                      })
                if 'single' in args.mal_obj:
                    print(
                        'Target:%s w conf.: %s, Curr_pred at step %s:%s, Loss: %s'
                        % (mal_data_Y, mal_obj_pred[:, mal_data_Y], step,
                           np.argmax(mal_obj_pred, axis=1), mal_loss_val))
                elif 'multiple' in args.mal_obj:
                    suc_count_local = np.sum(
                        mal_data_Y == np.argmax(mal_obj_pred, axis=1))
                    print('%s of %s targets achieved at step %s, Loss: %s' %
                          (suc_count_local, args.mal_num, step, mal_loss_val))
            step += 1

    # Including training loss
    elif 'train' in args.mal_strat:
        # mal epochs different from benign epochs
        if args.mal_E > args.E:
            num_mal_epochs = args.mal_E
        else:
            num_mal_epochs = args.E
        # fixed number of steps
        if args.steps is not None:
            num_steps = args.steps
            start_offset = (t * args.B * args.steps) % (shard_size - args.B)
        else:
            num_steps = num_mal_epochs * shard_size / args.B
            start_offset = 0

        if 'alternate' in args.mal_strat:
            if 'unlimited' not in args.mal_strat:
                delta_mal_ret = alternate_train(sess, t, optimizer, loss,
                                                mal_optimizer, mal_loss, x, y,
                                                logits, X_shard, Y_shard,
                                                mal_data_X, mal_data_Y,
                                                agent_model, num_steps,
                                                start_offset, loss1, loss2)
            elif 'unlimited' in args.mal_strat:
                # train until loss matches that of benign trained
                alternate_train_unlimited(sess, t, optimizer, loss,
                                          mal_optimizer, mal_loss, x, y,
                                          logits, X_shard, Y_shard, mal_data_X,
                                          mal_data_Y, agent_model, num_steps,
                                          start_offset, loss_val_shard, loss1,
                                          loss2)
        elif 'concat' in args.mal_strat:
            # training with concatenation
            concat_train(sess, optimizer, loss, mal_optimizer, mal_loss, x, y,
                         logits, X_shard, Y_shard, mal_data_X, mal_data_Y,
                         agent_model, num_steps, start_offset)
        elif 'data_poison' in args.mal_strat:
            num_steps += (num_mal_epochs * args.data_rep) / args.B
            data_poison_train(sess, optimizer, loss, mal_optimizer, mal_loss,
                              x, y, logits, X_shard, Y_shard, mal_data_X,
                              mal_data_Y, agent_model, num_steps, start_offset)

    if 'auto' not in args.mal_strat:
        # Explicit boosting
        delta_naive_mal = agent_model.get_weights() - start_weights
        if len(mal_visible) >= 1 and 'prev_2' in args.mal_strat:
            print('Alg 2: Deleting benign estimate')
            # Algorithm 2: Adjusting weights after optimzation
            delta_mal = delta_naive_mal - delta_other_prev
        elif len(mal_visible) < 1 or 'prev_2' not in args.mal_strat:
            delta_mal = delta_naive_mal

        # Boosting weights
        if 'no_boost' in args.mal_strat or 'alternate' in args.mal_strat or 'concat' in args.mal_strat or 'data_poison' in args.mal_strat:
            print('No boosting')
            delta_mal = delta_mal
        else:
            print('Boosting by %s' % args.mal_boost)
            delta_mal = args.mal_boost * delta_mal
            if 'add_ben' in args.mal_strat:
                print('Direct addition of benign update')
                delta_mal += ben_delta

    else:
        # Implicit boosting
        print('In auto mode')
        delta_naive_mal = alpha_m * delta_mal_ret
        delta_mal = delta_mal_ret

    return delta_mal, delta_naive_mal
def benign_train(x, y, agent_model, logits, X_shard, Y_shard, sess,
                 shared_weights):
    args = gv.init()
    print('Training benign model at malicious agent')

    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                       logits=logits))

    prediction = tf.nn.softmax(logits)

    if args.optimizer == 'adam':
        optimizer = tf.train.AdamOptimizer(
            learning_rate=args.eta).minimize(loss)
    elif args.optimizer == 'sgd':
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=args.eta).minimize(loss)

    if args.k > 1:
        config = tf.ConfigProto(gpu_options=gv.gpu_options)
        # config.gpu_options.allow_growth = True
        temp_sess = tf.Session(config=config)
    elif args.k == 1:
        temp_sess = tf.Session()

    tf.keras.backend.set_session(temp_sess)

    temp_sess.run(tf.global_variables_initializer())

    agent_model.set_weights(shared_weights)
    shard_size = len(X_shard)

    if args.mal_E > args.E:
        num_mal_epochs = args.mal_E
    else:
        num_mal_epochs = args.E

    for step in range(int(num_mal_epochs * shard_size / args.B)):
        offset = (step * args.B) % (shard_size - args.B)
        X_batch = X_shard[offset:(offset + args.B)]
        Y_batch = Y_shard[offset:(offset + args.B)]
        Y_batch_uncat = np.argmax(Y_batch, axis=1)
        _, loss_val = temp_sess.run([optimizer, loss],
                                    feed_dict={
                                        x: X_batch,
                                        y: Y_batch_uncat
                                    })
        # if step % 100 == 0:
        #     print loss_val
    final_weights = agent_model.get_weights()
    final_delta = final_weights - shared_weights

    agent_model.set_weights(final_weights)

    num_steps_temp = int(shard_size / args.B)
    offset_temp = 0
    loss_val_shard = 0.0
    for step_temp in range(num_steps_temp):
        offset_temp = (offset + step_temp * args.B) % (shard_size - args.B)
        X_batch = X_shard[offset:(offset + args.B)]
        Y_batch = Y_shard[offset:(offset + args.B)]
        Y_batch_uncat = np.argmax(Y_batch, axis=1)
        loss_val_shard += temp_sess.run(loss,
                                        feed_dict={
                                            x: X_batch,
                                            y: Y_batch_uncat
                                        })
    loss_val_shard = loss_val_shard / num_steps_temp
    print('Average loss on the data shard %s' % loss_val_shard)

    temp_sess.close()

    return final_delta, loss_val_shard
def alternate_train(sess,
                    t,
                    optimizer,
                    loss,
                    mal_optimizer,
                    mal_loss,
                    x,
                    y,
                    logits,
                    X_shard,
                    Y_shard,
                    mal_data_X,
                    mal_data_Y,
                    agent_model,
                    num_steps,
                    start_offset,
                    loss1=None,
                    loss2=None):
    args = gv.init()
    step = 0
    num_local_steps = args.ls
    shard_size = len(X_shard)
    curr_weights = agent_model.get_weights()
    delta_mal_local = []
    for l in range(len(curr_weights)):
        layer_shape = curr_weights[l].shape
        delta_mal_local.append(np.zeros(shape=layer_shape))

    while step < num_steps:
        offset = (start_offset + step * args.B) % (shard_size - args.B)
        # Benign
        if step < num_steps:
            for l_step in range(num_local_steps):
                # training
                # print offset
                offset = (offset + l_step * args.B) % (shard_size - args.B)
                X_batch = X_shard[offset:(offset + args.B)]
                Y_batch = Y_shard[offset:(offset + args.B)]
                Y_batch_uncat = np.argmax(Y_batch, axis=1)
                if 'dist' in args.mal_strat:
                    loss1_val, loss2_val, loss_val = sess.run(
                        [loss1, loss2, loss],
                        feed_dict={
                            x: X_batch,
                            y: Y_batch_uncat
                        })
                    sess.run([optimizer],
                             feed_dict={
                                 x: X_batch,
                                 y: Y_batch_uncat
                             })
                else:
                    loss_val = sess.run([loss],
                                        feed_dict={
                                            x: X_batch,
                                            y: Y_batch_uncat
                                        })
                    sess.run([optimizer],
                             feed_dict={
                                 x: X_batch,
                                 y: Y_batch_uncat
                             })
            mal_loss_val_bef = sess.run([mal_loss],
                                        feed_dict={
                                            x: mal_data_X,
                                            y: mal_data_Y
                                        })
        # Malicious, only if mal loss is non-zero
        print(mal_loss_val_bef)
        if step >= 0 and mal_loss_val_bef[0] > 0.0:
            # print('Boosting mal at step %s' % step)
            weights_ben_local = np.array(agent_model.get_weights())
            if 'dist' in args.mal_strat:
                sess.run([mal_optimizer],
                         feed_dict={
                             x: mal_data_X,
                             y: mal_data_Y
                         })
            else:
                sess.run([mal_optimizer],
                         feed_dict={
                             x: mal_data_X,
                             y: mal_data_Y
                         })
            if 'auto' in args.mal_strat:
                step_weight_end = agent_model.get_weights()
                if 'wt_o' in args.mal_strat:
                    for l in range(len(delta_mal_local)):
                        if l % 2 == 0:
                            delta_mal_local[l] += (1 / args.mal_boost) * (
                                step_weight_end[l] - weights_ben_local[l])
                else:
                    delta_mal_local += (1 / args.mal_boost) * (
                        step_weight_end - weights_ben_local)
                agent_model.set_weights(curr_weights +
                                        (1 / args.mal_boost) * delta_mal_local)
            else:
                delta_mal_local = agent_model.get_weights() - weights_ben_local
                if 'wt_o' in args.mal_strat:
                    # Boosting only weights
                    boosted_delta = delta_mal_local.copy()
                    for l in range(len(delta_mal_local)):
                        if l % 2 == 0:
                            boosted_delta[
                                l] = args.mal_boost * delta_mal_local[l]
                    boosted_weights = weights_ben_local + boosted_delta
                else:
                    boosted_weights = weights_ben_local + args.mal_boost * delta_mal_local
                agent_model.set_weights(boosted_weights)
            mal_loss_val_aft = sess.run([mal_loss],
                                        feed_dict={
                                            x: mal_data_X,
                                            y: mal_data_Y
                                        })

        if step % 10 == 0 and 'dist' in args.mal_strat:
            print(
                'Benign: Loss1 - %s, Loss2 - %s, Loss - %s; Mal: Loss_bef - %s Loss_aft - %s'
                % (loss1_val, loss2_val, loss_val, mal_loss_val_bef,
                   mal_loss_val_aft))
        elif step % 10 == 0 and 'dist' not in args.mal_strat:
            print('Benign: Loss - %s; Mal: Loss_bef - %s, Loss_aft - %s' %
                  (loss_val, mal_loss_val_bef, mal_loss_val_aft))

        if step % 100 == 0 and t < 5:
            np.save(gv.dir_name + 'mal_delta_t%s_step%s.npy' % (t, step),
                    delta_mal_local)

        step += num_local_steps

    return delta_mal_local
def initializeMainGui():
    g.init('gui/MotilityTracking.ui')
    g.m.trackView = Window(np.zeros((3, 3, 3)))
    g.m.histogram = Histogram(title='Mean Single Lag Distance Histogram',
                              labels={
                                  'left': 'Count',
                                  'bottom': 'Mean SLD Per Track (Pixels)'
                              })
    g.m.trackPlot = TrackPlot()
    g.m.trackPlot.tracksChanged.connect(update_plots)
    g.m.trackView.imageview.addItem(g.m.trackPlot)
    g.m.trackView.imported = pg.ScatterPlotItem()
    g.m.trackView.imageview.addItem(g.m.trackView.imported)

    g.m.actionImportBin.triggered.connect(
        lambda: open_file_gui(lambda f: import_mat(open_bin(f)),
                              prompt='Import .bin file of tracks',
                              filetypes='*.bin'))
    g.m.actionImportBackground.triggered.connect(
        lambda: open_file_gui(set_background_image,
                              prompt='Select tif file as background image',
                              filetypes='*.tif *.tiff *.stk'))
    g.m.actionImportCoordinates.triggered.connect(
        lambda: open_file_gui(import_coords,
                              prompt='Import coordinates from txt file',
                              filetypes='*.txt'))
    g.m.actionSimulateDistances.triggered.connect(
        lambda: save_file_gui(simulate_distances,
                              prompt='Save simulated distances',
                              filetypes='*.txt'))
    g.m.actionExportMSD.triggered.connect(
        lambda: save_file_gui(exportMSD,
                              prompt='Export Mean Squared Displacement Values',
                              filetypes='*.txt'))
    g.m.actionExportHistogram.triggered.connect(
        lambda: save_file_gui(g.m.histogram.export,
                              prompt='Export Histogram Values',
                              filetypes='*.txt'))
    g.m.actionExportTrackLengths.triggered.connect(lambda: save_file_gui(
        export_track_lengths, prompt='Export Track Lengths', filetypes='*.txt')
                                                   )
    g.m.actionExportOutlined.triggered.connect(g.m.trackPlot.export_gui)
    g.m.actionExportDistances.triggered.connect(lambda: save_file_gui(
        export_real_distances, prompt='Export Distances', filetypes='*.txt'))

    g.m.MSLDMinSpin.setOpts(value=0, decimals=2, bounds=(0, 100))
    g.m.MSLDMaxSpin.setOpts(value=100, decimals=2, bounds=(0, 100))
    g.m.neighborsSpin.setOpts(value=0, bounds=(0, 100), int=True, step=1)
    g.m.neighborDistanceSpin.setOpts(value=1, decimals=2, bounds=(0, 100))
    g.m.minLengthSpin.setOpts(value=4, bounds=(0, 1000), int=True, step=1)
    g.m.maxLengthSpin.setOpts(value=20, bounds=(0, 1000), int=True, step=1)

    g.m.MSLDGroupBox.toggled.connect(g.m.trackPlot.filter)
    g.m.additionalGroupBox.toggled.connect(g.m.trackPlot.filter)
    g.m.MSLDMaxSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.MSLDMinSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.neighborDistanceSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.neighborsSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.minLengthSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.maxLengthSpin.sigValueChanged.connect(g.m.trackPlot.filter)
    g.m.hideBackgroundCheck.toggled.connect(
        lambda v: g.m.trackView.imageview.getImageItem().setVisible(not v))
    g.m.ignoreOutsideCheck.toggled.connect(g.m.trackPlot.filter)
    g.m.plotMeansCheck.toggled.connect(g.m.trackPlot.means.setVisible)
    g.m.plotTracksCheck.toggled.connect(g.m.trackPlot.tracks.setVisible)

    g.m.viewTab.layout().insertWidget(0, g.m.trackView)

    g.m.MSDWidget = pg.PlotWidget(title='Mean Squared Displacement Per Lag',
                                  labels={
                                      'left': 'Mean Squared Disance (p^2)',
                                      'bottom': 'Lag Count'
                                  })
    g.m.analysisTab.layout().addWidget(g.m.MSDWidget)

    g.m.analysisTab.layout().addWidget(g.m.histogram)

    g.m.CDFWidget = CDFWidget(
    )  #pg.PlotWidget(title = 'Cumulative Distribution Function', labels={'left': 'Cumulative Probability', 'bottom': 'Single Lag Displacement Squared'})
    #g.m.CDFPlot = pg.PlotCurveItem()
    g.m.cdfTab.layout().addWidget(g.m.CDFWidget)

    g.m.installEventFilter(mainWindowEventEater)
    g.m.setWindowTitle('Motility Tracking')
    g.m.show()
Beispiel #16
0
    crawler.signals.connect(reactor_control.remove_crawler, signal=signals.spider_closed)
    spider = crawler.spiders.create(spider_name, index = index, username=username, passwd = passwd)
    crawler.crawl(spider)
    reactor_control.add_crawler()
    crawler.start()
    log.msg('Crawler %d started...' % index, log.INFO)
    
if __name__ == '__main__':
    import sys
    if len(sys.argv) < 2:
        print 'python %s <uid-list-path>' % sys.argv[1]
        sys.exit(0)
        
    uid_list_path = sys.argv[1]
    # 初始化全局变量
    global_vars.init(uid_list_path)
    
    spider_name = 'userinfo'
    
    reactor_control = ReactorControl()
    log.start(loglevel = log.DEBUG, logstdout = False)
    settings = get_project_settings()
    #crawler = Crawler(settings)
    # 从配置文件中获取微博账户信息
    weibo_accounts = settings.getlist('WEIBO_USER_ACCOUNTS')
    
    for i in range(len(weibo_accounts)):
        setup_crawler(spider_name, i, weibo_accounts[i]['username'], weibo_accounts[i]['passwd'])

    reactor.run()