Example #1
0
def train():
    train_list, test_list = read_train_test_stems()
    train_list = [FLAGS.data_dir + fn for fn in train_list]
    test_list = [FLAGS.data_dir + fn for fn in test_list]
    # train_list=['/home/liuyuan/data/S3DIS/sampled_train_nolimits/'+fn for fn in train_list]
    # test_list=['/home/liuyuan/data/S3DIS/sampled_test_nolimits/'+fn for fn in test_list]

    train_provider = Provider(train_list, FLAGS.batch_size, read_pkl, True)
    test_provider = Provider(test_list, FLAGS.batch_size, read_pkl, False)

    pls = build_placeholder(FLAGS.batch_size)
    batch_num_per_epoch = 2000 / FLAGS.batch_size
    ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'], pls['is_training'],
                    batch_num_per_epoch)

    feed_dict = {}
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False
    sess = tf.Session(config=config)
    saver = tf.train.Saver(max_to_keep=500)
    if FLAGS.restore:
        saver.restore(sess, FLAGS.restore_model)
    else:
        sess.run(tf.global_variables_initializer())

    summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=sess.graph)

    for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
        train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                        epoch_num, feed_dict)
        test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                       feed_dict)
Example #2
0
def get_first_and_last_date(db_root):
    source_names = get_all_provider_names(db_root)

    p = Provider(db_root, source_names[0])
    all_days = p.get_all_days()

    return utils.make_date_from_string(all_days[0]), utils.make_date_from_string(all_days[-1])
Example #3
0
def get_statistics_from_last_update_for_all_sources(db_root):
    last_metainfo_by_day = dict()

    # fetch the summary of the last day for every source
    for source_name in get_all_provider_names(db_root):
        p = Provider(db_root, source_name)
        all_days = p.get_all_days()
        if all_days:
            last_day = utils.get_latest_day(all_days)
            last_metainfo_by_day[source_name] = (last_day, p.get_cached_metainfos_for_day(last_day))

    # not every source has data for the real last day, search for th
    last_days = set([v[0] for k, v in last_metainfo_by_day.items()])
    real_last_day = utils.get_latest_day(last_days)

    # build the overall metainfos using only the source which have data for the real last day
    overall_metainfo = defaultdict(int)
    provider_count = 0
    for name, data in last_metainfo_by_day.items():
        day, metainfos = data
        if day == real_last_day:
            provider_count += 1
            for k, v in metainfos.items():
                overall_metainfo[k] += v

    overall_metainfo.update(dict(provider_count=provider_count))
    return overall_metainfo
def eval():
    test_list=['data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)]
    def read_fn(model,fn):
        data=read_pkl(fn)
        return data[0],data[2],data[3],data[11]

    test_provider = Provider(test_list,'test',FLAGS.batch_size*FLAGS.num_gpus,read_fn)

    try:
        pls=build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch=11000/FLAGS.num_gpus
        ops=train_ops(pls['xyzs'],pls['feats'],pls['lbls'],pls['is_training'],batch_num_per_epoch)

        feed_dict={}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess,FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,graph=sess.graph)
        test_one_epoch(ops,pls,sess,saver,test_provider,0,feed_dict,summary_writer)

    finally:
        test_provider.close()
Example #5
0
def eval():
    train_list, test_list = get_block_train_test_split()
    test_list = ['data/S3DIS/sampled_test/' + fn for fn in test_list]

    def fn(model, filename):
        data = read_pkl(filename)
        return data[0], data[2], data[3], data[4], data[12]

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
def eval():
    train_list, test_list = get_block_train_test_split()
    test_list = ['data/S3DIS/room_block_10_10/' + fn for fn in test_list]

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = {}
        pls['xyzs'],pls['lbls'],pls['rgbs'],pls['covars'],pls['nidxs'],\
        pls['nidxs_lens'],pls['nidxs_bgs'],pls['cidxs']=[],[],[],[],[],[],[],[]
        pls['weights'] = []
        for i in xrange(FLAGS.num_gpus):
            pls['xyzs'].append(
                tf.placeholder(tf.float32, [None, 3], 'xyz{}'.format(i)))
            pls['rgbs'].append(
                tf.placeholder(tf.float32, [None, 3], 'rgb{}'.format(i)))
            pls['covars'].append(
                tf.placeholder(tf.float32, [None, 9], 'covar{}'.format(i)))
            pls['lbls'].append(
                tf.placeholder(tf.int64, [None], 'lbl{}'.format(i)))
            pls['nidxs'].append(
                tf.placeholder(tf.int32, [None], 'nidxs{}'.format(i)))
            pls['nidxs_lens'].append(
                tf.placeholder(tf.int32, [None], 'nidxs_lens{}'.format(i)))
            pls['nidxs_bgs'].append(
                tf.placeholder(tf.int32, [None], 'nidxs_bgs{}'.format(i)))
            pls['cidxs'].append(
                tf.placeholder(tf.int32, [None], 'cidxs{}'.format(i)))
            pls['weights'].append(
                tf.placeholder(tf.float32, [None], 'weights{}'.format(i)))

        pmiu = neighbor_anchors_v2()
        pls['is_training'] = tf.placeholder(tf.bool, name='is_training')
        pls['pmiu'] = tf.placeholder(tf.float32, name='pmiu')

        batch_num_per_epoch = 2500 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['rgbs'], pls['covars'], pls['lbls'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pmiu.shape[1], pls['is_training'],
                        batch_num_per_epoch, pls['pmiu'], pls['weights'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
Example #7
0
    def __init__(self):
        """Init the connexion to Dropbox.

        Returns:
        The Dropbox client.
        """
        Provider.__init__(self, 'dropbox')
        self.getToken()
Example #8
0
 def __init__(self, resource_type):
     '''
     Constructor
     
     @see:  nublic_resource.Provider.__init__
     '''
     Provider.__init__(self, resource_type)
     setup_all(create_tables=True)
    def write(self):
        mkdir_p(self.path)

        db_handler = DatabaseHandler("SampleDB", pkg=self.pkg)
        provider = Provider(classname="ItemProvider", pkg=self.pkg)

        db_triggers = DatabaseTriggers(pkg=self.pkg)
        db_triggers.add(*self.triggers)

        db_views = DatabaseViews(pkg=self.pkg)
        db_views.add(*self.views)

        # Generate dbitem files
        for table in self.tables:
            item = DBItem(table, pkg=self.pkg)
            filename = item.classname + ".java"
            fpath = os.path.join(self.path, filename)
            with open(fpath, 'w') as javafile:
                javafile.write(str(item))

            # Add to other classes
            db_handler.add_dbitems(item)
            provider.add_dbitems(item)

        # Abstract DBItem
        fpath = os.path.join(self.path,
                             "DBItem.java")
        with open(fpath, 'w') as javafile:
            javafile.write(dbitem.DBITEM_CLASS.format(pkg=self.pkg))

        # Triggers
        fpath = os.path.join(self.path,
                             "DatabaseTriggers.java")
        with open(fpath, 'w') as javafile:
            javafile.write(str(db_triggers))

        # Views
        fpath = os.path.join(self.path,
                             "DatabaseViews.java")
        with open(fpath, 'w') as javafile:
            javafile.write(str(db_views))

        # Database handler
        fpath = os.path.join(self.path,
                             db_handler.classname + ".java")
        with open(fpath, 'w') as javafile:
            javafile.write(str(db_handler))

        # Provider
        fpath = os.path.join(self.path,
                             provider.classname + ".java")
        with open(fpath, 'w') as javafile:
            javafile.write(str(provider))

        # And print manifest stuff
        self.print_manifest(provider)
Example #10
0
    def test_compute_elevation(self):
        provider = Provider("mongodb://localhost:27017/windmobile", "AIzaSyAK2QNa8fWYCDK1o3McUP4--qdNtzl-wsQ")

        # Le Suchet
        elevation, is_peak = provider.compute_elevation(46.7724, 6.4662)
        self.assertTrue(is_peak)

        # Mont Poupet
        elevation, is_peak = provider.compute_elevation(46.9722, 5.86472)
        self.assertFalse(is_peak)
Example #11
0
def get_summed_statistics_for_all_sources(db_root):
    overall_metainfo = defaultdict(int)
    for source_name in get_all_provider_names(db_root):
        p = Provider(db_root, source_name)
        source_metainfo = p.get_cached_metainfos()

        for k, v in source_metainfo.items():
            overall_metainfo[k] += v

    return overall_metainfo
Example #12
0
    def __init__(self):
        """Init the connexion to Google Drive.

        A credentials file is used to store the token and to renew it.

        Returns:
            The drive service.
        """

        Provider.__init__(self, 'googledrive')
Example #13
0
async def main(websocket, path):
    async for message in websocket:
        data = json.loads(message.replace('\n', ''))
        answer = dict()
        answer[names.STATUS] = 200
        answer[names.EVENT] = data[names.EVENT]
        USERS[data.get(names.PAYLOAD).get(names.ID_USER)] = websocket
        if data[names.EVENT] == names.GET_HISTORY:
            if data[names.PAYLOAD].get(names.ID_ROOM):
                result = Provider.get_history_room(data[names.PAYLOAD])
            else:
                result = Provider.get_history(data[names.PAYLOAD])
            answer[names.BODY] = result
            await websocket.send(json.dumps(answer))
        if data[names.EVENT] == names.GET_ROOMS:
            if data[names.PAYLOAD].get(names.ID_USER):
                result = Provider.get_rooms_user(data[names.PAYLOAD])
            else:
                result = Provider.get_rooms(data[names.PAYLOAD])
            answer[names.BODY] = result
            await websocket.send(json.dumps(answer))
        elif data[names.EVENT] == names.JOIN:
            result = Provider.join_room(data[names.PAYLOAD])
            answer[names.BODY] = result
            await websocket.send(json.dumps(answer))
        elif data[names.EVENT] == names.LEAVE:
            Provider.leave_room(data[names.PAYLOAD])
            await websocket.send(json.dumps(answer))
        elif data[names.EVENT] == names.SEND_MESSAGE:
            Provider.send_message(data[names.PAYLOAD])
            await notify_users(websocket, data.get(names.PAYLOAD))
Example #14
0
def get_queue_errors_for_all_sources(db_root):
    source_names = get_all_provider_names(db_root)

    all_errors = list()
    for name in source_names:
        p = Provider(db_root, name)
        errors = p.get_queue_errors()
        if len(errors):
            all_errors.append((name, errors))

    return all_errors
Example #15
0
def get_queue_error_count_for_all_sources(db_root, day_count=300):
    source_names = get_all_provider_names(db_root)

    all_errors = list()
    for name in source_names:
        p = Provider(db_root, name)
        error_count = p.get_queue_error_count_for_last_days(day_count)
        if error_count:
            all_errors.append((name, error_count))

    return all_errors
Example #16
0
def playMatchesBetweenVersions(env,
                               run_version,
                               player1version,
                               player2version,
                               EPISODES,
                               logger,
                               turns_until_tau0,
                               goes_first=0):
    env = Game()
    if player1version == -1:
        player1 = User("user1", env.state_size, env.action_size)
    else:
        player1_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE,
                                  env.input_shape, env.action_size,
                                  config.HIDDEN_CNN_LAYERS)

        if player1version > 0:
            name = env.name + "{0:0>4}".format(player1version)
            if Provider.getNetByName(name) == None:
                return
            player1_network = player1_NN.read(env.name, run_version,
                                              player1version)
            player1_NN.model.set_weights(player1_network.get_weights())
        netName = env.name + "{0:0>4}".format(player1version)
        player1 = Agent(netName, env.state_size, env.action_size,
                        config.MCTS_SIMS, config.CPUCT, player1_NN)

    if player2version == -1:
        name = input('enter username: ')
        user2 = Provider.getPersonByName(name)
        player2 = User(user2.name, env.state_size, env.action_size)
    else:
        player2_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE,
                                  env.input_shape, env.action_size,
                                  config.HIDDEN_CNN_LAYERS)

        if player2version > 0:
            name = env.name + "{0:0>4}".format(player2version)
            if Provider.getNetByName(name) == None:
                return
            player2_network = player2_NN.read(env.name, run_version,
                                              player2version)
            player2_NN.model.set_weights(player2_network.get_weights())
        net2Name = env.name + "{0:0>4}".format(player2version)
        player2 = Agent(net2Name, env.state_size, env.action_size,
                        config.MCTS_SIMS, config.CPUCT, player2_NN)

    scores, memory, points, sp_scores = playMatches(player1, player2, EPISODES,
                                                    logger, turns_until_tau0,
                                                    None, goes_first)

    return (scores, memory, points, sp_scores)
Example #17
0
def get_data_from_provider_update_db(connection, control):
    # Получаем данные от провайдера
    p = Provider()
    data = p.get_data()
    data = build_graph(data)

    # Записываем данные в базу
    cur = connection.cursor()
    r = control.upsert_request(data)
    cur.execute(r)
    conn.commit()
    cur.close()
    return data
    def write(self):
        mkdir_p(self.path)

        db_handler = DatabaseHandler("SampleDB", pkg=self.pkg)
        provider = Provider(classname="ItemProvider", pkg=self.pkg)

        db_triggers = DatabaseTriggers(pkg=self.pkg)
        db_triggers.add(*self.triggers)

        db_views = DatabaseViews(pkg=self.pkg)
        db_views.add(*self.views)

        # Generate dbitem files
        for table in self.tables:
            item = DBItem(table, pkg=self.pkg)
            filename = item.classname + ".java"
            fpath = os.path.join(self.path, filename)
            with open(fpath, 'w') as javafile:
                javafile.write(str(item))

            # Add to other classes
            db_handler.add_dbitems(item)
            provider.add_dbitems(item)

        # Abstract DBItem
        fpath = os.path.join(self.path, "DBItem.java")
        with open(fpath, 'w') as javafile:
            javafile.write(dbitem.DBITEM_CLASS.format(pkg=self.pkg))

        # Triggers
        fpath = os.path.join(self.path, "DatabaseTriggers.java")
        with open(fpath, 'w') as javafile:
            javafile.write(str(db_triggers))

        # Views
        fpath = os.path.join(self.path, "DatabaseViews.java")
        with open(fpath, 'w') as javafile:
            javafile.write(str(db_views))

        # Database handler
        fpath = os.path.join(self.path, db_handler.classname + ".java")
        with open(fpath, 'w') as javafile:
            javafile.write(str(db_handler))

        # Provider
        fpath = os.path.join(self.path, provider.classname + ".java")
        with open(fpath, 'w') as javafile:
            javafile.write(str(provider))

        # And print manifest stuff
        self.print_manifest(provider)
Example #19
0
def test_data_iter():
    from provider import Provider, default_unpack_feats_labels
    from draw_util import output_points, get_class_colors
    import time
    import random

    train_list, test_list = get_block_train_test_split()
    random.shuffle(train_list)
    train_list = ['data/S3DIS/room_block_10_10/' + fn for fn in train_list]
    test_list = ['data/S3DIS/room_block_10_10/' + fn for fn in test_list]

    train_provider = Provider(train_list, 'train', 4, read_fn)
    test_provider = Provider(test_list, 'test', 4, read_fn)
    print len(train_list)
    try:
        begin = time.time()
        i = 0
        for data in test_provider:
            i += 1
            pass
        print 'batch_num {}'.format(i * 4)
        print 'test set cost {} s'.format(time.time() - begin)
        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            pass
        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        print 'done'
        train_provider.close()
        test_provider.close()
Example #20
0
def get_summary_from_last_update_for_all_sources(db_root):
    source_names = get_all_provider_names(db_root)

    last_update = list()
    for name in source_names:
        p = Provider(db_root, name)
        all_days = p.get_all_days()
        if all_days:
            last_day = utils.get_latest_day(all_days)

            summary = p.get_cached_metainfos_for_day(last_day)
            last_update.append((name, utils.make_date_from_string(last_day), summary))

    return last_update
Example #21
0
def test_read_semantic_dataset():
    from provider import Provider, default_unpack_feats_labels
    train_list, test_list = get_semantic3d_block_train_list()
    # print train_list
    # exit(0)
    train_list = [
        'data/Semantic3D.Net/block/sampled/merged/' + fn for fn in train_list
    ]
    test_list = [
        'data/Semantic3D.Net/block/sampled/merged/' + fn for fn in test_list
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train', 4, read_fn)
    test_provider = Provider(test_list, 'test', 4, read_fn)

    try:
        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            cxyzs, rgbs, covars, lbls, = default_unpack_feats_labels(data, 4)
            for k in xrange(4):
                print len(cxyzs[k])

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        train_provider.close()
        test_provider.close()
Example #22
0
def executeCommand():
	pluginHandle = int(sys.argv[1])
	success = False

	if ( mycgi.EmptyQS() ):
		success = ShowProviders()
	else:
		(providerName, clearCache, testForwardedIP) = mycgi.Params( u'provider', u'clearcache', u'testforwardedip' )

		if clearCache != u'':
			httpManager.ClearCache()
			return True
		
		elif testForwardedIP != u'':
			provider = Provider()
			provider.addon = addon

			httpManager.SetDefaultHeaders( provider.GetHeaders() )
			forwardedIP = provider.CreateForwardedForIP('0.0.0.0')
			
			return TestForwardedIP(forwardedIP)
			
		elif providerName != u'':
			log(u"providerName: " + providerName, xbmc.LOGDEBUG)
			if providerName <> u'':
				provider = providerfactory.getProvider(providerName)
				
				if provider is None:
					# ProviderFactory return none for providerName: %s
					logException = LoggingException(language(30000) % providerName)
					# 'Cannot proceed', Error processing provider name
					logException.process(language(30755), language(30020), xbmc.LOGERROR)
					return False
				
				if provider.initialise(httpManager, sys.argv[0], pluginHandle, addon, language, PROFILE_DATA_FOLDER, RESOURCE_PATH):
					success = provider.ExecuteCommand(mycgi)
					log (u"executeCommand done", xbmc.LOGDEBUG)

				"""
				print cookiejar
				print 'These are the cookies we have received so far :'

				for index, cookie in enumerate(cookiejar):
					print index, '  :  ', cookie
				cookiejar.save() 
				"""

	return success
class Reducer:
    provider = Provider()
    data = []

    def __init__(self):
        data = self.provider.data
        inputData = []
        targetData = []
        for d in data:
            inputData.append(d[0])
            targetData.append(d[1])
        X = np.array(inputData)
        pca = PCA(n_components=10)
        XX = pca.fit_transform(X)
        for i, t in enumerate(targetData):
            self.data.append([XX[i], t])

    def getLearnData(self):
        return self.data[:int(len(self.data) / 3)]

    def getValidationData(self):
        return self.data[int(len(self.data) / 3):int((2 * len(self.data)) / 3)]

    def getTestData(self):
        return self.data[int((2 * len(self.data)) / 3):]
Example #24
0
def ProviderCreator(n, max_mu, max_provider_bid, max_provider_skill):
  creatures =[]
  
  for i in np.arange(n):
    creatures.append(Provider(max_mu, max_provider_bid, max_provider_skill))
  
  return np.array(creatures)  
Example #25
0
 def get_urls(self):
     """ Return the URL patterns. """
     pat = Provider.get_urls(self)
     if self.forms:
         pat.append(url(r'choicescombo.js$', self.get_choices_combo_src))
         pat.append(url(r'(?P<formname>\w+).js$', self.get_form))
     return pat
def train():
    test_set = [
        'sg27_station4_intensity_rgb', 'bildstein_station1_xyz_intensity_rgb'
    ]
    train_list, test_list = get_context_train_test(test_set)
    train_list = [
        'data/Semantic3D.Net/context/block_avg/' + fn for fn in train_list
    ]
    test_list = [
        'data/Semantic3D.Net/context/block_avg/' + fn for fn in test_list
    ]
    read_fn = lambda model, filename: read_pkl(filename)
    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, read_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch = 5000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['ctx_pts'],
                        pls['ctx_idxs'], pls['lbls'], pls['is_training'],
                        batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        sess.run(tf.global_variables_initializer())
        if FLAGS.restore:
            all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            all_vars = [
                var for var in all_vars if not var.name.startswith('tower')
            ]
            restore_saver = tf.train.Saver(var_list=all_vars)
            restore_saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Example #27
0
 def getNewToken(self):
     """ Get a new token for a new app """
     Provider.__init__(self, 'dropbox')
     print 'test de connexion a ' + self.provider_name
     flow = dropbox.client.DropboxOAuth2FlowNoRedirect(
         self.app_key, self.app_secret)
     r = requests.get(flow.start())
     if r.status_code == 200:
         print "url:", flow.start()
         print "Please authorize in the browser. After you're done, press enter."
         auth_code = raw_input().strip()
         access_token, _ = flow.finish(auth_code)
         config = ConfigParser.ConfigParser()
         config.readfp(open('conf.ini'))
         config.set('dropbox', 'token', access_token)
     else:
         return False
     return True
def ProviderCreator(n, max_mu, max_provider_bid, time_unit):

    creatures =[]

    for _ in range(n):
        creatures.append(Provider(max_mu, max_provider_bid, time_unit))
        #creatures.append(Provider(max_mu, max_provider_bid, max_provider_skill))

    return np.array(creatures)
Example #29
0
    def __init__(self, backup,*args, **kw):
        Fuse.__init__(self, *args, **kw)
        config = Config()
        self.provider = Provider.getInstance('Virgin Media', config)
	resulttype, result = self.provider.login( config.username, config.password, backup )
	if resulttype != "ERROR":
	  print 'Init complete.'
	else:
	  print result[0]['message']
Example #30
0
async def notify_users(websocket, payload):
    USERS[payload.get(names.ID_USER)] = websocket
    users = Provider.get_users_by_room(payload)
    users_send = []
    for i in users:
        user = USERS.get(i.get(names.ID_USER))
        if user is not None:
            users_send.append(user)
    await asyncio.wait([u.send(json.dumps(payload)) for u in users_send])
Example #31
0
def train():
    train_list, test_list = get_block_train_test_split()
    # test_list=['data/S3DIS/sampled_train/'+fn for fn in train_list[:2]]
    train_list = ['data/S3DIS/sampled_train/' + fn for fn in train_list]
    test_list = ['data/S3DIS/sampled_test/' + fn for fn in test_list]

    def fn(model, filename):
        data = read_pkl(filename)
        return data[0], data[2], data[3], data[4], data[12]

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

            base_var = [
                var for var in tf.trainable_variables() if
                var.name.startswith('base') or var.name.startswith('class_mlp')
            ]
            base_saver = tf.train.Saver(base_var)
            base_saver.restore(sess, FLAGS.base_restore)

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Example #32
0
def train():
    with open('cached/scannet_train_filenames.txt', 'r') as f:
        train_list = [line.strip('\n') for line in f.readlines()]
    train_list = [
        'data/ScanNet/sampled_train/{}'.format(fn) for fn in train_list
    ]
    test_list = [
        'data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, read_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)
    try:
        pls = build_placeholder(FLAGS.num_gpus)
        pmiu = neighbor_anchors_v2()

        batch_num_per_epoch = 11000 / FLAGS.num_gpus
        ops = train_ops(pls['cxyzs'], pls['dxyzs'], pls['covars'],
                        pls['vlens'], pls['vlens_bgs'], pls['vcidxs'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pls['lbls'], pls['weights'],
                        pmiu.shape[1], pls['is_training'], batch_num_per_epoch,
                        pls['pmiu'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Example #33
0
def train():
    train_list, test_list = prepare_input_list('data/S3DIS/point/fpfh/',
                                               FLAGS.batch_size)
    fetch_data_with_batch = functools.partial(fetch_data,
                                              batch_size=FLAGS.batch_size)
    train_provider = Provider(train_list,
                              1,
                              fetch_data_with_batch,
                              'train',
                              4,
                              fetch_batch,
                              max_worker_num=1)
    test_provider = Provider(test_list,
                             1,
                             fetch_data_with_batch,
                             'test',
                             4,
                             fetch_batch,
                             max_worker_num=1)
    trainset = ProviderMultiGPUWrapper(FLAGS.num_gpus, train_provider)
    testset = ProviderMultiGPUWrapper(FLAGS.num_gpus, test_provider)

    try:
        pls = {}
        pls['feats'] = tf.placeholder(tf.float32, [None, 39], 'feats')
        pls['labels'] = tf.placeholder(tf.int64, [
            None,
        ], 'labels')
        pls['is_training'] = tf.placeholder(tf.bool, [], 'is_training')
        ops = train_ops(pls['feats'], pls['labels'], pls['is_training'],
                        train_provider.batch_num)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=500)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.train_epoch_num):
            test_one_epoch(ops, pls, sess, saver, testset, epoch_num)
            train_one_epoch(ops, pls, sess, summary_writer, trainset,
                            epoch_num)

    finally:
        train_provider.close()
        test_provider.close()
Example #34
0
def test_semantic_read_pkl():
    from provider import Provider, default_unpack_feats_labels
    train_list, test_list = get_semantic3d_block_train_list()
    train_list = [
        'data/Semantic3D.Net/block/sampled/train_merge/{}.pkl'.format(i)
        for i in xrange(231)
    ]
    test_list = [
        'data/Semantic3D.Net/block/sampled/test/' + fn for fn in test_list
    ]
    simple_read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train', 4, simple_read_fn)
    # test_provider = Provider(test_list,'test',4,simple_read_fn)

    print len(train_list)
    try:
        # begin = time.time()
        # i = 0
        # for data in test_provider:
        #     i += 1
        #     pass
        # print 'batch_num {}'.format(i * 4)
        # print 'test set cost {} s'.format(time.time() - begin)

        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            if i % 2500 == 0:
                print 'cost {} s'.format(time.time() - begin)

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        print 'done'
        train_provider.close()
        test_provider.close()
Example #35
0
def executeCommand():
    pluginHandle = int(sys.argv[1])
    success = False

    if (mycgi.EmptyQS()):
        success = ShowProviders()
    else:
        (providerName, clearCache,
         testForwardedIP) = mycgi.Params(u'provider', u'clearcache',
                                         u'testforwardedip')

        if clearCache != u'':
            httpManager.ClearCache()
            return True

        elif testForwardedIP != u'':
            provider = Provider()
            provider.addon = addon

            httpManager.SetDefaultHeaders(provider.GetHeaders())
            forwardedIP = provider.CreateForwardedForIP('0.0.0.0')

            return TestForwardedIP(forwardedIP)

        elif providerName != u'':
            log(u"providerName: " + providerName, xbmc.LOGDEBUG)
            if providerName <> u'':
                provider = providerfactory.getProvider(providerName)

                if provider is None:
                    # ProviderFactory return none for providerName: %s
                    logException = LoggingException(
                        language(30000) % providerName)
                    # 'Cannot proceed', Error processing provider name
                    logException.process(language(30755), language(30020),
                                         xbmc.LOGERROR)
                    return False

                if provider.initialise(httpManager, sys.argv[0], pluginHandle,
                                       addon, language, PROFILE_DATA_FOLDER,
                                       RESOURCE_PATH):
                    success = provider.ExecuteCommand(mycgi)
                    log(u"executeCommand done", xbmc.LOGDEBUG)
                """
				print cookiejar
				print 'These are the cookies we have received so far :'

				for index, cookie in enumerate(cookiejar):
					print index, '  :  ', cookie
				cookiejar.save() 
				"""

    return success
Example #36
0
def eval():
    test_list = [
        'data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        pmiu = neighbor_anchors_v2()

        batch_num_per_epoch = 11000 / FLAGS.num_gpus
        ops = train_ops(pls['cxyzs'], pls['dxyzs'], pls['covars'],
                        pls['vlens'], pls['vlens_bgs'], pls['vcidxs'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pls['lbls'], pls['weights'],
                        pmiu.shape[1], pls['is_training'], batch_num_per_epoch,
                        pls['pmiu'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
def eval():
    from semantic3d_context_util import get_context_train_test
    test_set = [
        'sg27_station4_intensity_rgb', 'bildstein_station1_xyz_intensity_rgb'
    ]
    train_list, test_list = get_context_train_test(test_set)
    test_list = ['data/Semantic3D.Net/context/block/' + fn for fn in test_list]

    def read_fn(model, fn):
        xyzs, rgbs, covars, lbls, ctx_xyzs, ctx_idxs, block_mins = read_pkl(fn)
        return xyzs, rgbs, covars, lbls, block_mins

    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, read_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)

        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver = tf.train.Saver(max_to_keep=500)
        saver.restore(sess, FLAGS.eval_model)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)
        test_one_epoch(ops, pls, sess, saver, test_provider, 0, feed_dict,
                       summary_writer)

    finally:
        test_provider.close()
def train():
    train_list, test_list = get_block_train_test_split()
    train_list = ['data/S3DIS/sampled_train/' + fn for fn in train_list]
    test_list = ['data/S3DIS/sampled_test/' + fn for fn in test_list]
    fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        pmiu = neighbor_anchors_v2()

        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['cxyzs'], pls['dxyzs'], pls['rgbs'], pls['covars'],
                        pls['vlens'], pls['vlens_bgs'], pls['vcidxs'],
                        pls['cidxs'], pls['nidxs'], pls['nidxs_lens'],
                        pls['nidxs_bgs'], pls['lbls'], pmiu.shape[1],
                        pls['is_training'], batch_num_per_epoch, pls['pmiu'])

        feed_dict = {}
        feed_dict[pls['pmiu']] = pmiu
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
class Selector:
    data = []
    provider = Provider()
    outputs = 20

    def __init__(self):
        inputData = []
        targetData = []
        for d in self.provider.data:
            inputData.append(d[0])
            targetData.append(d[1][0])
        # feature extraction
        lab_enc = preprocessing.LabelEncoder()
        encoded = lab_enc.fit_transform(targetData)
        test = SelectKBest(score_func=chi2, k=self.outputs)
        fit = test.fit(inputData, encoded)
        # summarize scores
        fields = zip(fit.scores_, self.provider.dataCols)
        self.sortedFields = sorted(fields, key=lambda x: x[0],
                                   reverse=True)[0:self.outputs]
        # print(sortedFields[0:self.outputs])
        features = fit.transform(inputData)
        for i, t in enumerate(targetData):
            self.data.append([features[i], [t]])
        self.multiplier = self.provider.multiplier

    def getLearnData(self):
        return self.data[:int(len(self.data) / 3)]

    def getValidationData(self):
        return self.data[int(len(self.data) / 3):int((2 * len(self.data)) / 3)]

    def getTestData(self):
        return self.data[int((2 * len(self.data)) / 3):]

    def getDataRanges(self):
        ranges = []
        for cel in self.data[0][0]:
            ranges.append([cel, cel])
        for row in self.data:
            for i, cel in enumerate(row[0]):
                if ranges[i][0] > cel:
                    ranges[i][0] = cel
                if ranges[i][1] < cel:
                    ranges[i][1] = cel
        return ranges

    def getInputCount(self):
        return self.outputs
def train():
    import random
    train_list, test_list = get_block_train_test_split()
    train_list = [
        'data/S3DIS/sampled_train_nolimits/' + fn for fn in train_list
    ]
    random.shuffle(train_list)
    test_list = ['data/S3DIS/sampled_test_nolimits/' + fn for fn in test_list]

    def test_fn(model, filename):
        xyzs, rgbs, covars, lbls, block_mins = read_pkl(filename)
        return xyzs, rgbs, covars, lbls, block_mins

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, test_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, test_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        var_list = [
            var for var in tf.trainable_variables()
            if not var.name.startswith('class_mlp')
        ]
        saver = tf.train.Saver(max_to_keep=500, var_list=var_list)
        saver.restore(sess, FLAGS.restore_model)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, train_provider, epoch_num,
                            feed_dict)
            test_one_epoch(ops, pls, sess, test_provider, epoch_num, feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Example #41
0
def train():
    train_list, test_list = get_block_train_test_split()
    train_list = [
        'data/S3DIS/sampled_train_nolimits/' + fn for fn in train_list
    ]
    # train_list=['data/S3DIS/sampled_train_no_aug/'+fn for fn in train_list]
    # with open('cached/s3dis_merged_train.txt', 'r') as f:
    #     train_list=[line.strip('\n') for line in f.readlines()]
    random.shuffle(train_list)
    test_list = ['data/S3DIS/sampled_test_nolimits/' + fn for fn in test_list]

    train_provider = Provider(train_list, 'train',
                              FLAGS.batch_size * FLAGS.num_gpus, test_fn)
    test_provider = Provider(test_list, 'test',
                             FLAGS.batch_size * FLAGS.num_gpus, test_fn)

    try:
        pls = build_placeholder(FLAGS.num_gpus)
        batch_num_per_epoch = 2000 / FLAGS.num_gpus
        ops = train_ops(pls['xyzs'], pls['feats'], pls['lbls'],
                        pls['is_training'], batch_num_per_epoch)

        feed_dict = {}
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)
        saver = tf.train.Saver(max_to_keep=500)
        if FLAGS.restore:
            saver.restore(sess, FLAGS.restore_model)
        else:
            sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph=sess.graph)

        for epoch_num in xrange(FLAGS.restore_epoch, FLAGS.train_epoch_num):
            train_one_epoch(ops, pls, sess, summary_writer, train_provider,
                            epoch_num, feed_dict)
            test_one_epoch(ops, pls, sess, saver, test_provider, epoch_num,
                           feed_dict)

    finally:
        train_provider.close()
        test_provider.close()
Example #42
0
    def loginSubmit(self):
	self.loginButton.setEnabled(False)
	global backupInstance, config
	backupInstance = Provider.getInstance( self.providerBox.currentText(), config )
	resulttype, result = backupInstance.login( str(self.usernameEdit.text()), str(self.passwordEdit.text()), config.backupName )
	if resulttype != "ERROR":
	  # success
	  self.hide()
	  
	  global mainwin, choosebackuplocation
	  choosebackuplocation = BackupLocationWindow()
	  choosebackuplocation.show()
	  mainwin = choosebackuplocation
	else:
	  KMessageBox.error(None, result[0]['message'])
	
	self.loginButton.setEnabled(True)
Example #43
0
 def _runAction(self, resource, action):
     utils.log("Performing action %s on %s" % (action, resource))
     
     providerClass = Provider.resolve(self, resource.__class__.__name__, resource.provider)
     provider = providerClass(resource)
     
     try:
         providerAction = getattr(provider, 'action_%s' % action)
     except AttributeError:
         raise Fail("%r does not implement action %s" % (provider, action))
     
     providerAction()
     
     if resource.isUpdated:
         for action, res in resource.subscriptions['immediate']:
             utils.log("%s sending %s action to %s (immediate)" % (resource, action, res))
             self._runAction(res, action)
         
         for action, res in resource.subscriptions['delayed']:
             utils.log("%s sending %s action to %s (delayed)" % (resource, action, res))
         
         self.delayedActions |= resource.subscriptions['delayed']
 def __init__(self, *args, **kwds):
     self._trovebox = trovebox.Trovebox()
     self._photo_count = None
     Provider.__init__(self, *args, **kwds)
 def __init__(self):
     Provider.__init__(self)
Example #46
0
    def do_provider(self, _args):
        """
Interact with exported providers on the device
        """
        subconsole = Provider(self.session)
        subconsole.cmdloop()