def get_loader(image_path, train=False, val=False, test=False): """ Returns a data loader for the desired split """ assert train + val + test == 1, 'need to set exactly one of {train, val, test} to True' split = VQA(utils.path_for(train=train, val=val, test=test, question=True), utils.path_for(train=train, val=val, test=test, answer=True), image_path, answerable_only=train, transform=utils.get_transform(config.image_size, test, config.central_fraction)) if test: loader = torch.utils.data.DataLoader( split, batch_size=config.test_batch_size, shuffle=train, # only shuffle the data in training pin_memory=True, num_workers=config.data_workers, collate_fn=collate_fn, ) else: loader = torch.utils.data.DataLoader( split, batch_size=config.batch_size, shuffle=train, # only shuffle the data in training pin_memory=True, num_workers=config.data_workers, collate_fn=collate_fn, ) return loader
def get_loader( train=False, val=False, test=False ): #TODO you need todo some changes here, this here decides what is the data thta goes into loader while train/test """ Returns a data loader for the desired split """ assert train + val + test == 1, 'need to set exactly one of {train, val, test} to True' split = VQA( utils.path_for(train=train, val=val, test=test, question=True), utils.path_for(train=train, val=val, test=test, answer=True), config. preprocessed_path, ## make changed here- in config file #TODO you need todo some changes here, this here decides what is the data thta goes into loader while train/test answerable_only=train) if train and config.orig_edit_equal_batch: batch_size_given = int(config.batch_size * config.orig_amt) else: batch_size_given = config.batch_size loader = torch.utils.data.DataLoader( split, batch_size=batch_size_given, shuffle= train, #train, #TODO vedika you dont want to shuffle train during test!! #train, #train, # only shuffle the data in training #TODO vedika comment- good idea! pin_memory=True, num_workers=config.data_workers, collate_fn=collate_fn, ) #ipdb.set_trace() ## check what the loader is retunring here #len(loader.dataset) = 8438 for what color is the- works correctly- so all good- so far return split, loader
def main(): questions = utils.path_for(train=True, question=True) answers = utils.path_for(train=True, answer=True) with open(questions, 'r') as fd: questions = json.load(fd) with open(answers, 'r') as fd: answers = json.load(fd) questions = data.prepare_questions(questions) answers = data.prepare_answers(answers) question_vocab, question_vocabi = extract_vocab(questions, start=1) answer_vocab, answer_vocabi = extract_vocab(answers, top_k=config.max_answers) vocabs = { 'question': question_vocab, 'answer': answer_vocab, } with open(config.vocabulary_path, 'w') as fd: json.dump(vocabs, fd) print(answer_vocabi) vocabsi = { 'answeri': answer_vocabi, } with open(config.vocabularyi_path, 'w') as fd: json.dump(vocabsi, fd)
def get_loader(train=False, val=False, test=False, trainval=False): """ Returns a data loader for the desired split """ split = VQA( utils.path_for(train=train, val=val, test=test, trainval=trainval, question=True), utils.path_for(train=train, val=val, test=test, trainval=trainval, answer=True), config.preprocessed_trainval_path if not test else config.preprocessed_test_path, answerable_only=train or trainval, dummy_answers=test, ) loader = torch.utils.data.DataLoader( split, batch_size=config.batch_size, shuffle=train or trainval, # only shuffle the data in training pin_memory=True, num_workers=config.data_workers, collate_fn=collate_fn, ) return loader
def get_loader(train=False, val=False, test=False, batch_size=config.batch_size): """ Returns a data loader for the desired split """ assert train + val + test == 1, 'need to set exactly one of {train, val, test} to True' if train: im_path = config.train_path fdict_path = config.fdict_path + 'train.npy' elif val: im_path = config.val_path fdict_path = config.fdict_path + 'val.npy' else: im_path = config.test_path fdict_path = config.fdict_path + 'test.npy' split = VQA( utils.path_for(train=train, val=val, test=test, question=True), utils.path_for(train=train, val=val, test=test, answer=True), im_path, fdict_path, answerable_only=train, ) loader = torch.utils.data.DataLoader( split, batch_size=batch_size, shuffle=train, # only shuffle the data in training pin_memory=True, num_workers=config.data_workers, collate_fn=collate_fn, ) return loader
def get_loader(train=False, val=False, test=False, batch_size=128): """ Returns a data loader for the desired split """ assert train + val + test == 1, 'need to set exactly one of {train, val, test} to True' if test: # Test data doesnt have answers split = TestVQA( utils.path_for(train=train, val=val, test=test, question=True), config.preprocessed_path, answerable_only=False, ) else: split = VQA( utils.path_for(train=train, val=val, test=test, question=True), utils.path_for(train=train, val=val, test=test, answer=True), config.preprocessed_path, answerable_only=train, ) loader = torch.utils.data.DataLoader( split, batch_size=batch_size, shuffle=train, # only shuffle the data in training pin_memory=True, num_workers=8, collate_fn=collate_fn, ) return loader
def read_playlist(self, name): """Retrieve playlist data from a JSON file""" try: playlist = json.loads(open(os.path.join(utils.path_for('data'), '%s' % name),'r').read())['playlist'] except Exception, e: log.error('Error %s loading playlist "%s", attempting to fallback to default' % (e,name)) playlist = json.loads(open(os.path.join(utils.path_for('data'), 'default.json'),'r').read())['playlist']
def get_VQA(train=False, val=False, test=False): assert train + val + test == 1, 'need to set exactly one of {train, val, test} to True' split = VQA( utils.path_for(train=train, val=val, test=test, question=True), utils.path_for(train=train, val=val, test=test, answer=True), config.preprocessed_path, answerable_only=train, ) return split
def read_playlist(self, name): """Retrieve playlist data from a JSON file""" try: playlist = json.loads(open(os.path.join(utils.path_for('etc'), '%s' % name),'r').read())['playlist'] settings.content.playlist_name = playlist['name'] except Exception, e: log.error('Error %s loading playlist "%s", attempting to fallback to default' % (e,name)) playlist = json.loads(open(os.path.join(utils.path_for('etc'), self.filename),'r').read())['playlist'] settings.content.playlist_name = playlist['name']
def read_playlist(self, name): """Retrieve playlist data from a JSON file""" try: playlist = json.loads( open(os.path.join(utils.path_for('etc'), '%s' % name), 'r').read())['playlist'] settings.content.playlist_name = playlist['name'] except Exception, e: log.error( 'Error %s loading playlist "%s", attempting to fallback to default' % (e, name)) playlist = json.loads( open(os.path.join(utils.path_for('etc'), self.filename), 'r').read())['playlist'] settings.content.playlist_name = playlist['name']
def do_playlist(self, item): """Set current playlist""" self.playlist = item['playlist']['playlist'] # persist the playlist as the default for this player f = open(os.path.join(utils.path_for('etc'), self.filename), 'w').write(json.dumps(item['playlist'], 2)) return False
def do_playlist(self, item): """Set current playlist""" self.playlist = item['playlist']['playlist'] # persist the playlist as the default for this player f = open(os.path.join(utils.path_for('etc'), self.filename),'w').write(json.dumps(item['playlist'], 2)) return False
def wiki(page): s = Store(path_for(settings.content.path)) try: result = s.get_page(page.lower()) except: result = s.get_page('meta/EmptyPage') return result
def get_reduced_loader(reduced_path, train=False, val=False): assert train + val == 1 split = ReducedVQA( reduced_path, utils.path_for(train=train, val=val, test=False, question=True), utils.path_for(train=train, val=val, test=False, answer=True), config.preprocessed_path, answerable_only=train, ) loader = torch.utils.data.DataLoader( split, batch_size=config.batch_size, shuffle=train, # only shuffle the data in training pin_memory=True, num_workers=config.data_workers, collate_fn=collate_fn, ) return loader
def wiki(page): """Render a wiki page""" s = Store(path_for(settings.content.path)) try: result = s.get_page(page.lower()) except Exception, e: log.warn("%s rendering page %s" % (e, page)) result = s.get_page('meta/EmptyPage')
def get_loader(train=False, val=False, test=False): #return:dataloader for the desired split assert train + val + test == 1, 'need to set exactly one of {train, val, test} to True' dataset = VQA( #传入q,a,img的路径、answerable_only;得到VQA对象的实例 utils.path_for(train=train, val=val, test=test, question=True), utils.path_for(train=train, val=val, test=test, answer=True), config.preprocessed_path, answerable_only=train, ) loader = torch.utils.data.DataLoader( dataset, batch_size=config.batch_size, shuffle=train, # only shuffle the data in training pin_memory= True, #the data loader will copy tensors into CUDA pinned memory before returning them num_workers=config.data_workers, collate_fn=collate_fn, ) return loader
def get_loader(train=False, val=False, test=False): """ Returns a data loader for the desired split """ assert train + val + test == 1, 'need to set exactly one of {train, val, test} to True' print('starting dataset processing, Train : {}, Val : {}'.format( train, val)) split = VQA( utils.path_for(train=train, val=val, test=test, question=True), utils.path_for(train=train, val=val, test=test, answer=True), config.preprocessed_path, answerable_only=train, ) loader = torch.utils.data.DataLoader( split, batch_size=config.batch_size, shuffle=train, # only shuffle the data in training pin_memory=True, num_workers=config.data_workers, collate_fn=collate_fn) return loader
def main(): questions = utils.path_for(train=True, question=True) answers = utils.path_for(train=True, answer=True) with open(questions, 'r', encoding='utf-8') as fd: questions = json.load(fd) with open(answers, 'r', encoding='utf-8') as fd: answers = json.load(fd) questions = data.prepare_questions(questions) answers = data.prepare_answers(answers) question_vocab = extract_vocab(questions, start=1) answer_vocab = extract_vocab(answers, top_k=config.max_answers) vocabs = { 'question': question_vocab, 'answer': answer_vocab, } with open(config.vocabulary_path, 'w', encoding='utf-8') as fd: json.dump(vocabs, fd, ensure_ascii=False)
def get_loader(train=False, val=False, test=False, full_batch=False): """ Returns a data loader for the desired split """ assert train + val + test == 1, 'need to set exactly one of {train, val, test} to True' split = VQA( utils.path_for(train=train, val=val, test=test, question=True), utils.path_for(train=train, val=val, test=test, answer=True), config.preprocessed_path, answerable_only=train, ) # pdb.set_trace() batch_size = config.batch_size if full_batch: batch_size = len(split) loader = torch.utils.data.DataLoader( split, batch_size=batch_size, shuffle=train, # only shuffle the data in training pin_memory=True, num_workers=config.data_workers, collate_fn=collate_fn, ) return loader, len(split.token_to_index) + 1
import os, sys, platform, logging.config from utils import get_config, path_for try: settings except NameError: try: settings = get_config(path_for(os.path.join('etc','config.json'))) except Exception as e: print >> sys.stderr, ("Error while loading configuration file" % locals()) sys.exit(2) logging.config.dictConfig(dict(settings.logging)) log = logging.getLogger() log.info("Logging configured.")
import json import sys import os.path from collections import defaultdict import numpy as np import torch import utils import config q_path = utils.path_for(val=True, question=True) with open(q_path, 'r') as fd: q_json = json.load(fd) a_path = utils.path_for(val=True, answer=True) with open(a_path, 'r') as fd: a_json = json.load(fd) with open( os.path.join(config.qa_path, 'v2_mscoco_val2014_complementary_pairs.json')) as fd: pairs = json.load(fd) question_list = q_json['questions'] question_ids = [q['question_id'] for q in question_list] questions = [q['question'] for q in question_list] answer_list = a_json['annotations'] categories = [a['answer_type'] for a in answer_list] # {'yes/no', 'other', 'number'} accept_condition = { 'number': (lambda x: id_to_cat[x] == 'number'), 'yes/no': (lambda x: id_to_cat[x] == 'yes/no'),
Created by: Rui Carmo License: MIT (see LICENSE for details) """ import os, sys, time, json, logging, logging.config, itertools # Make sure our bundled libraries take precedence sys.path.insert( 0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib')) import config, utils # read configuration file config.settings = utils.get_config( os.path.join(utils.path_for('etc'), 'config.json')) # Set up logging logging.config.dictConfig(dict(config.settings.logging)) log = logging.getLogger() # load modules import models, controllers models.setup() models.setup_index() def grouper(n, iterable): it = iter(iterable) while True: chunk = tuple(itertools.islice(it, n))
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright (c) 2012, Rui Carmo Description: Shared configuration data License: MIT (see LICENSE.md for details) """ import os, sys, platform, logging.config from utils import get_config, path_for, tb try: settings except NameError: for host in [platform.node(), 'default']: try: settings = get_config(path_for(os.path.join('etc','%s.json' % host))) except IOError: continue except Exception as e: if sys.stderr.isatty(): tb = tb() print >> sys.stderr, ("Error while loading %(host)s.json: %(tb)s" % locals()) else: log.error("Error while loading %(host)s.json: %(tb)s" % locals()) sys.exit(2) logging.config.dictConfig(dict(settings.logging)) log = logging.getLogger() log.info("Configuration for %s loaded." % host) break
def media_asset(item): """Return page attachments""" return static_file(item, root=path_for(settings.content.path))
""" Fetcher script Created by: Rui Carmo License: MIT (see LICENSE for details) """ import os, sys, json, time, logging, logging.config # Make sure our bundled libraries take precedence sys.path.insert(0,os.path.join(os.path.dirname(os.path.abspath(__file__)),'lib')) import config, utils, markup.opml # read configuration file config.settings = utils.get_config(os.path.join(utils.path_for('etc'),'config.json')) # Set up logging logging.config.dictConfig(dict(config.settings.logging)) log = logging.getLogger() # load modules import models, controllers models.setup() if __name__ == "__main__": log.info("Starting importer.") fc = controllers.FeedController() uc = controllers.UserController() user = uc.get_user('default')
def static(filepath): """Handles all the remanining static files""" return static_file(filepath, root=path_for(os.path.join('themes',settings.theme,'static')))
def static(filepath): """Handles all the remanining static files""" return static_file(filepath, root=path_for('static'))
sys.path.insert(0,'lib') # prefer bundled libraries to local installs import bottle, utils # read configuration file and setup various globals, including logging formats from config import settings import gtk, pygtk # Set up logging log = logging.getLogger() # validate framebuffer settings settings = utils.check_resolution(settings) # setup static file root staticroot = utils.path_for('static') # check if we have a valid IP address ip_address = utils.get_ip_address(settings.interface) # Shared data used by other modules version = '0.13.08.02.1' # Flag for controlled thread termination running = True # Screen state sent from server screen = {} # Local URI Prefix local_uri = 'http://%s:%s' % (settings.http.bind_address, settings.http.port) window = gtk.Window() screen = window.get_screen() if not settings.staging:
""" Main signage client script Created by: Rui Carmo License: MIT (see LICENSE for details) """ import os, sys, re, stat, glob, time, json, logging, logging.config, threading, random import socket, urllib, urllib2, urlparse, commands, functools, subprocess sys.path.insert(0,'lib') # prefer bundled libraries to local installs import bottle, utils # read configuration file and setup various globals config = utils.get_config(os.path.join(utils.path_for('data'),'config.json')) # Set up logging logging.config.dictConfig(dict(config.logging)) log = logging.getLogger() # validate framebuffer settings config = utils.validate_resolution(config) # setup static file root staticroot = utils.path_for('static') # check if we have a valid IP address ip_address = utils.get_ip_address(config.interface) # Shared data used by other modules version = '0.13.02.27.6' # Flag for controlled thread termination running = True # Screen state sent from server screen = {}
import os, sys, re, stat, glob, time, json, logging, logging.config, threading, random import socket, urllib, urllib2, urlparse, commands, functools, subprocess sys.path.insert(0, 'lib') # prefer bundled libraries to local installs import bottle, utils # read configuration file and setup various globals, including logging formats from config import settings # Set up logging log = logging.getLogger() # validate framebuffer settings settings = utils.check_resolution(settings) # setup static file root staticroot = utils.path_for('static') # check if we have a valid IP address ip_address = utils.get_ip_address(settings.interface) # Shared data used by other modules version = '0.13.08.02.1' # Flag for controlled thread termination running = True # Screen state sent from server screen = {} # Local URI Prefix local_uri = 'http://%s:%s' % (settings.http.bind_address, settings.http.port) # Defaults sent to templates template_vars = { 'version': version, 'ip_address': ip_address,
import os, sys, platform, logging.config from utils import get_config, path_for try: settings except NameError: try: settings = get_config(path_for(os.path.join('etc', 'config.json'))) except Exception as e: print >> sys.stderr, ("Error while loading configuration file" % locals()) sys.exit(2) logging.config.dictConfig(dict(settings.logging)) log = logging.getLogger() log.info("Logging configured.")
""" Copyright (c) 2012, Rui Carmo Description: Shared configuration data License: MIT (see LICENSE.md for details) """ import os, sys, platform, logging.config from utils import get_config, path_for, tb try: settings except NameError: for host in [platform.node(), 'default']: try: settings = get_config( path_for(os.path.join('etc', '%s.json' % host))) except IOError: continue except Exception as e: if sys.stderr.isatty(): tb = tb() print >> sys.stderr, ( "Error while loading %(host)s.json: %(tb)s" % locals()) else: log.error("Error while loading %(host)s.json: %(tb)s" % locals()) sys.exit(2) logging.config.dictConfig(dict(settings.logging)) log = logging.getLogger() log.info("Configuration for %s loaded." % host) break
""" Copyright (c) 2012, Rui Carmo Description: Shared configuration data License: MIT (see LICENSE.md for details) """ import logging import logging.config import os import platform import sys from utils import get_config, path_for, tb for host in [platform.node(), "default"]: try: settings = get_config(path_for(os.path.join("etc", "%s.json" % host))) except IOError: continue except Exception as e: tb = tb() print tb if sys.stderr.isatty(): print >> sys.stderr, ("Error while loading %(host)s.json: %(tb)s" % locals()) else: log.error("Error while loading %(host)s.json: %(tb)s" % locals()) sys.exit(2) logging.config.dictConfig(dict(settings.logging)) log = logging.getLogger() log.info("Configuration for %s loaded." % host) break
def static(filepath): """Handles all the remanining static files""" return static_file(filepath, root=path_for( os.path.join('themes', settings.theme, 'static')))
""" Copyright (c) 2012, Rui Carmo Description: Shared configuration data License: MIT (see LICENSE.md for details) """ import logging import logging.config import os import platform import sys from utils import get_config, path_for, tb for host in [platform.node(), 'default']: try: settings = get_config(path_for(os.path.join('etc', '%s.json' % host))) except IOError: continue except Exception as e: tb = tb() print tb if sys.stderr.isatty(): print >> sys.stderr, ("Error while loading %(host)s.json: %(tb)s" % locals()) else: log.error("Error while loading %(host)s.json: %(tb)s" % locals()) sys.exit(2) logging.config.dictConfig(dict(settings.logging)) log = logging.getLogger() log.info("Configuration for %s loaded." % host) break
# -*- coding: utf-8 -*- """ Main application script Created by: Rui Carmo """ import os, sys, json, logging, logging.config # Make sure our bundled libraries take precedence sys.path.insert(0,os.path.join(os.path.dirname(os.path.abspath(__file__)),'lib')) import config, utils, bottle # read configuration file config.settings = utils.get_config(os.path.join(utils.path_for('etc'),'wiki.json')) # Set up logging logging.config.dictConfig(dict(config.settings.logging)) log = logging.getLogger() if __name__ == "__main__": if config.settings.reloader: if 'BOTTLE_CHILD' not in os.environ: log.debug('Using reloader, spawning first child.') else: log.debug('Child spawned.') if not config.settings.reloader or ('BOTTLE_CHILD' in os.environ):
Created by: Rui Carmo """ from gevent import monkey; monkey.patch_socket() from gevent.server import StreamServer import os, sys, json, logging, logging.config # Make sure our bundled libraries take precedence sys.path.insert(0,os.path.join(os.path.dirname(os.path.abspath(__file__)),'lib')) import utils from redis.server import RedisServer # read configuration file config = utils.get_config(os.path.join(utils.path_for('etc'),'store.json')) # set up logging logging.config.dictConfig(dict(config.logging)) log = logging.getLogger() if __name__ == "__main__": s = RedisServer(config) try: if config.engine == "gevent": s.halt = False server = StreamServer((config.net.bind_address, config.net.port), s.gevent_handler) server.serve_forever() else: s.run() except KeyboardInterrupt:
""" Main application script Created by: Rui Carmo License: MIT (see LICENSE for details) """ import os, sys, json, logging, logging.config # Make sure our bundled libraries take precedence sys.path.insert(0,os.path.join(os.path.dirname(os.path.abspath(__file__)),'lib')) import bottle, config, utils # read configuration file config.settings = utils.get_config(os.path.join(utils.path_for('data'),'config.json')) # Set up logging logging.config.dictConfig(dict(config.settings.logging)) log = logging.getLogger() if __name__ == "__main__": if config.settings.debug: if 'BOTTLE_CHILD' not in os.environ: log.debug('Using reloader, spawning first child.') else: log.debug('Child spawned.') if not config.settings.debug or ('BOTTLE_CHILD' in os.environ): log.info("Setting up application.")
def static(filepath): """Handles static files""" log.debug(path_for(os.path.join('themes',settings.theme,'static'))) return static_file(filepath, root=path_for(os.path.join('themes',settings.theme,'static')))
from gevent import monkey monkey.patch_socket() from gevent.server import StreamServer import os, sys, json, logging, logging.config # Make sure our bundled libraries take precedence sys.path.insert( 0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib')) import utils from redis.server import RedisServer # read configuration file config = utils.get_config(os.path.join(utils.path_for('etc'), 'store.json')) # set up logging logging.config.dictConfig(dict(config.logging)) log = logging.getLogger() if __name__ == "__main__": s = RedisServer(config) try: if config.engine == "gevent": s.halt = False server = StreamServer((config.net.bind_address, config.net.port), s.gevent_handler) server.serve_forever() else: s.run()
try: from gevent import monkey; monkey.patch_all() except: print "Could not load gevent, proceeding." import os, sys, logging, logging.config # Make sure our bundled libraries take precedence for f in ['lib','controllers']: sys.path.insert(0,os.path.join(os.path.dirname(os.path.abspath(__file__)),f)) import utils, bottle, config # read configuration file config.indexer = utils.get_config(os.path.join(utils.path_for('etc',__file__),'indexer.json')) # Set up logging logging.config.dictConfig(dict(config.indexer.logging)) log = logging.getLogger() if __name__ == "__main__": if config.indexer.reloader: if 'BOTTLE_CHILD' not in os.environ: log.debug('Using reloader, spawning first child.') else: log.debug('Child spawned.') if not config.indexer.reloader or ('BOTTLE_CHILD' in os.environ):