Ejemplo n.º 1
0
def get_full_card_image(card):
    """
  Searches scryfall for the specified card and saves it as a high res png image to data/scryfall/full-cards/[card-name]
  Nearly an identical function to get_card_art, can probably be condensed in the future

  PARAMETERS:
   - card: the card dictionary from the main all-cards dictionary. Example: dictionary["Lightning Bolt"]

  RETURNS:
   - Returns the filepath to the image if available
  """
    assert type(card) == dict

    try:
        name = parse_card_name(card["name"])
    except:
        print("Could not get the name from card " + card)
        return

    if path.exists("data/scryfall/full-cards/" + name + ".png"):
        # If the art exists, just return with a message
        # We can fix this protocol later
        return "data/scryfall/full-cards/" + name + ".png"

    # Otherwise, download the cropped art from scryfall
    try:
        uri = card["image_uris"]["png"]
    except:
        print('Could not get ["image_uris"]["png"] from card ' + name)
        return

    Updater.request_scryfall_data(uri,
                                  "data/scryfall/full-cards/" + name + ".png",
                                  verbose=False)
    return "data/scryfall/full-cards/" + name + ".png"
Ejemplo n.º 2
0
 def fit(self, X, y):
     self.X = X
     self.y = y
     n = X.shape[0]
     m = X.shape[1]
     n_class = np.unique(y).shape[0]
     self.n_class = n_class
     self.n = n
     self.m = m
     p = np.zeros((n, n_class))
     g = np.empty([n, n_class])
     self.g = g
     Trees = []
     self.Trees = Trees
     order = np.argsort(X, axis=0)
     updater = Updater(order, self.max_depth, self.eps)
     for it in range(self.max_iter):
         print 'iter:', it
         self.get_grad(p)
         trees = []
         for j in range(n_class):
             # negative grad
             tree = updater.fit(X, -g[:, j])
             trees.append(tree)
         Trees.append(trees)
         self.predict_raw(X, p)
Ejemplo n.º 3
0
def simpleUpdateChecker(colors):
    # run the autmoatic updater, but only alert if an update is available
    available = Updater.check(colors, False)
    if not available == None and available[0]:
        response = raw_input(colors.alert() + '::Update Alert::\nThere is an update available. Would you like to download it now? ' + colors.end()).lower()
        if response in ('y', 'yes', 'u', 'update', 'd', 'download', 'get', 'get it', 'install', 'install update'):
            verified = Updater.getUpdate(fastSearchDir, colors, available[1], available[2])
            if not verified:
                print colors.error() + '\nAn error occurred while trying to download and install the update. Check your internet connection and try again.\nIf the problem persists, contact the developer.\n' + colors.end()
        else:
            print colors.alert() + 'No updates were downloaded or installed.\n' + colors.end()
Ejemplo n.º 4
0
 def train(self, info):
     n_class = info['n_class']
     g = info['grad']
     h = info['hess']
     lamda = info['lamda']
     min_weight = info['min_weight']
     self.Trees = []
     # every loop geranerates a tree
     for i in range(n_class):
         updater = Updater(self.X, self.order, g[:, i], h[:, i], lamda,
                           min_weight)
         updater.generate()
         self.Trees.append(updater.get_tree())
Ejemplo n.º 5
0
def check_if_empty():
    """REALLY DO I NEED A DOCSTRING MANNN"""
    print(LONG_EQUAL_BARS)
    print('TEST UNIT : CHECK check_if_empty()', end="\n\n")
    print('EXPECTED VALUE : wat is dis')
    print(
        "Obtained Value : " +
        Updater.check_empty_variable('', 'wat is dis', 'test_check_if_empty'),
        end="\n\n")
    print('EXPECTED VALUE : i got a title')
    print("Obtained Value : " + Updater.check_empty_variable(
        'i got a title', 'wat is dis',
        'test_check_if_empty but str_ is not empty'),
          end="\n\n")
Ejemplo n.º 6
0
    def test_get_version_from_xml(self):
        xml_string = """
<Versions>
  <Windows>1</Windows>
</Versions>
"""
        version = Updater.get_version_from_xml(xml_string)
        self.assertEqual(version, 1)
        xml_string = """
<Versions>
  <Windows>1.5</Windows>
</Versions>
"""
        version = Updater.get_version_from_xml(xml_string)
        self.assertEqual(version, 1.5)
Ejemplo n.º 7
0
    def __init__(self, command_args):
        super(MarkdownEditorApp, self).__init__(command_args)

        self.localisation = Localisation.Localiser()
        self.localisation.listeners.append(USER_TEXT)
        self.localisation.listeners.append(TOOL_TIP)
        self.localisation.listeners.append(self)

        logging.info("Localisation setup.")
        logging.info("Application started with arguments: " +
                     unicode(command_args))
        args = self.parse_command_args(command_args[1:])
        if (args.reset_user_conf):
            Configuration.reset_options()
        self.language_changed(self.localisation.language())

        if (args.locale):
            self.localisation.set_language(args.locale)
        self.setWindowIcon(QtGui.QIcon(Configuration.IMAGES["icon"]))
        self.editor = MarkdownEditor.MarkdownEditor(args.files,
                                                    self.localisation)

        if (args.command):
            input_file = args.command[0]
            output_file = args.command[1]
            CommandLineMarkdownApp(input_file, output_file).run()
            self.editor.hide()
            sys.exit()

        logging.info("Updater checking for updates.")
        self.updater = Updater.Updater()
        self.timer = QtCore.QTimer(self)
        self.timer.timeout.connect(self.check_update_finished)
        self.timer.start(50)
Ejemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser(description="Vanilla_AE")
    parser.add_argument("--batchsize", "-b", type=int, default=128)
    parser.add_argument("--epoch", "-e", type=int, default=100)
    parser.add_argument("--gpu", "-g", type=int, default=0)
    parser.add_argument("--snapshot", "-s", type=int, default=10)
    parser.add_argument("--n_dimz", "-z", type=int, default=64)

    args = parser.parse_args()

    #print settings
    print("GPU:{}".format(args.gpu))
    print("epoch:{}".format(args.epoch))
    print("Minibatch_size:{}".format(args.batchsize))
    print('')

    batchsize = args.batchsize
    gpu_id = args.gpu
    max_epoch = args.epoch
    train_val, test = mnist.get_mnist(withlabel=False, ndim=1)
    train, valid = split_dataset_random(train_val, 50000, seed=0)
    model = Network.AE(n_dimz=args.n_dimz, n_out=784)

    #set iterator
    train_iter = iterators.SerialIterator(train, batchsize)
    valid_iter = iterators.SerialIterator(valid,
                                          batchsize,
                                          repeat=False,
                                          shuffle=False)

    #optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))
        return optimizer

    opt = make_optimizer(model)
    #trainer
    updater = Updater.AEUpdater(model=model,
                                iterator=train_iter,
                                optimizer=opt,
                                device=args.gpu)

    trainer = training.Trainer(updater, (max_epoch, 'epoch'), out='result')
    #trainer.extend(extensions.ExponentialShift('lr', 0.5),trigger=(30, 'epoch'))
    trainer.extend(extensions.LogReport(log_name='log'))
    trainer.extend(
        Evaluator.AEEvaluator(iterator=valid_iter,
                              target=model,
                              device=args.gpu))
    trainer.extend(extensions.snapshot_object(
        model, filename='model_snapshot_epoch_{.updater.epoch}.npz'),
                   trigger=(args.snapshot, 'epoch'))
    #trainer.extend(extensions.snapshot_object(optimizer, filename='optimizer_snapshot_epoch_{.updater.epoch}'), trigger=(args.snapshot, 'epoch'))
    trainer.extend(
        extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss']))
    trainer.extend(extensions.ProgressBar())
    trainer.run()
    del trainer
Ejemplo n.º 9
0
def startUpdate():
	global count
	global upd
	
	 		
	con = mdb.connect(host="mysqlsrv.cs.tau.ac.il", user="******", passwd="DbMysql15", db="DbMysql15",autocommit=True)
	
	upd = Updater(con)
	
	with con:
	    	cur = con.cursor()    
	    	cur.execute("SELECT group_fb_id, group_id FROM Groups")
	    	ls = cur.fetchall()
			
	    	for row in ls:
				gfid = row[0]
				gid = row[1]
				getGroupFeed(gfid)
				cur.execute("UPDATE Groups SET update_date = ( SELECT MAX(publish_date) FROM JobPost,JobPostGroup WHERE JobPost.post_id = JobPostGroup.post_id AND JobPostGroup.group_id = " + str(gid) + ")" + " WHERE Groups.group_id = " + str(gid))
					
				
		
	if DEBUG:
		#print count
		print gid
Ejemplo n.º 10
0
def runUpdater(fastSearchDir, colors):
    available = Updater.check(colors, True)
    if not available == None:
        if available[0] == True:
            response = raw_input(colors.blue() + 'There is an update available. Download? ' + colors.end()).lower()
            if response in ('y', 'yes', 'u', 'update', 'd', 'download', 'get', 'get it', 'install', 'install update'):
                verified = Updater.getUpdate(fastSearchDir, colors, available[1], available[2])
                if not verified:
                    print colors.error() + '\nAn error occurred while trying to download and install the update. Check your internet connection and ' \
                        'try again.\nIf the problem persists, try downloading the program again from fastsearch.alexlaird.net or contacting the developer.\n' + colors.end()
            else:
                print colors.alert() + 'No updates were downloaded or installed.\n' + colors.end()
        elif available[0] == -1:
            print colors.alert() + 'An update is available, but the current version of FastSearch cannot be automatically updated.\nPlease navigate to ' \
                'fastsearch.alexlaird.net in order to download and install the update.\n' + colors.end()
        else:
            print colors.alert() + 'There are no updates available at this time.\n' + colors.end()
Ejemplo n.º 11
0
def mainDebug():

	global upd
	con = mdb.connect(host="mysqlsrv.cs.tau.ac.il", user="******", passwd="DbMysql15", db="DbMysql15" , autocommit=True)
	
	upd = Updater(con)
	getGroupFeed('626511234087342')
	getGroupFeed('694416243907967')	
Ejemplo n.º 12
0
 def open_updater(self):
     self.config_updater()
     self.modify_ui = Updater.UpdaterUI(self.updater_win, reactor,
                                        width=0.5*SCREENWIDTH,
                                        height=0.5*SCREENHEIGHT)
     self.modify_ui.grid(row=0, column=0, sticky='nesw')
     self.updater_win.update()
     self.parent.update()
Ejemplo n.º 13
0
def get_card_art_crop(card, autoproxy_format=False):
    """
  Searches scryfall for the specified card art and saves it to data/scryfall/card-art/[card-name]

  PARAMETERS:
   - card: the card dictionary from the main all-cards dictionary. Example: dictionary["Lightning Bolt"]
   - autoproxy_format: how the filename should be saved. Formats it to be <CardName> (<Artist>).png to comply
     with https://github.com/ndepaola/mtg-autoproxy

  RETURNS:
   - Returns the filepath to the image if available
  """
    assert type(card) == dict

    try:
        if autoproxy_format:
            name = card["name"] + " (" + card["artist"] + ")"
            extension = ".jpg"
        else:
            name = parse_card_name(card["name"])
            extension = ".png"
    except:
        print("Could not get the name from card " + card)
        return

    if path.exists("data/scryfall/card-art/" + name + extension):
        # If the art exists, just return with a message
        # We can fix this protocol later
        print("Card art already loaded for " + name)
        return "data/scryfall/card-art/" + name + extension

    # Otherwise, download the cropped art from scryfall
    try:
        uri = card["image_uris"]["art_crop"]
    except:
        print('Could not get ["image_uris"]["art_crop"] from card ' + name)
        return

    Updater.request_scryfall_data(uri,
                                  'data/scryfall/card-art/' + name + extension,
                                  verbose=False)
    return "data/scryfall/card-art/" + name + extension
Ejemplo n.º 14
0
def run_repetition(ruleset, config, graph, logDir, repetition):
    updater = Updater.Updater(ruleset, config)
    updater.setGraph(
        graph,
        get_graph_logger('GraphLogger_' + logDir + 'graph_' + str(repetition),
                         logDir + 'graph_' + str(repetition) + '.log'))

    for iteration in range(config["sim_iterations"]):
        updater.update()

    updater.close()

    return {
        'log': logDir + 'graph_' + str(repetition) + '.log',
        'analyzer': updater.getAnalyzer(),
    }
Ejemplo n.º 15
0
def main():
    """Opens the file, runs fileparser, download links, status print, etc"""
    print("Downloader.py....")
    with open(FILE_NAME, 'r', encoding="utf-8") as file_:
        dl_loc = Updater.folder_check_empty(FILE_DESINATION, 'Downloader',
                                            'pics')
        links = file_parser(file_)
        length = len(links)
        for (i, link) in enumerate(links):
            percent = Updater.percent_former(i + 1, length)
            if os.path.exists(os.path.join(dl_loc, (str(i) + '.png'))):
                msg = 'Skipping ' + link[-30:]
                Updater.status_print(msg, percent, dl_loc)
            else:
                msg = "Downloading " + link[-30:]
                Updater.status_print(msg, percent, dl_loc)
                Updater.html_download(link.strip(), True,
                                      str(i) + '.png', dl_loc, False)
        print('\nCompleted')
Ejemplo n.º 16
0
def main():
    config.LoadConfig()
    email.Init()
    video.InitCams()

    try:
        web.Run()
    except:
        print("EXCEPTION!")
    shutdownState = web.GetShutdownState()
    print(shutdownState)

    Log("Server Shutting Down...")
    video.Stop()
    cv2.destroyAllWindows()

    if shutdownState == "restart":
        RestartServer()
    elif shutdownState == "update":
        Log("Updating server...")
        updater.Update()
        RestartServer()
Ejemplo n.º 17
0
# Set of tokens provided by the app
clientID = os.environ['CLIENT_ID']
clientSecret = os.environ['CLIENT_SECRET']
veritoken = os.environ['VERIFY_TOKEN']
commandSalt = os.environ['COMMAND_SALT']
agentSalt = os.environ['AGENT_SALT']

# Dictionary of SlackClients stored by TeamID
clientDictionary = {}

# Plugin objects
dante = DantesUpdater.Dantes_Updater()
user = UserManager.UserManager()
infra = InfraManager.InfraManager()
update = Updater.Updater()
status = StatusManager.StatusManager()
lab = LabManager.LabManager()

commandDict = {
        'dante':dante,
        'infra':infra,
        'user':user,
        'update':update,
        'agent':AgentManager,
        'status':status,
        'lab':lab
        }

# Encoder objects
commandHashids = Hashids(salt=commandSalt)
    'MSX_MiSTer',
    'NeoGeo_MiSTer',
    'NES_MiSTer',
    'SMS_MiSTer',
    'SNES_MiSTer',
    'TurboGrafx16_MiSTer',
    'MegaCD_MiSTer',
    'GBA_MiSTer',
)

# Matching: keywords of download-file
DOWNLOADING_MATCH_FILE_KEYWORDS = (
    'menu_',
    'MiSTer_',
    '.rbf',
)

# main-method
if __name__ == '__main__':
    # install: all dependency-libs
    install_all_dependencies()

    # create updater
    updater = Updater(DOWNLOADING_MATCH_FILE_KEYWORDS, UPDATE_MISTER_TUPLE)

    # setting updater repository-list from config-file
    updater.set_updater_repository_list()

    # setup and upgrade-download!
    updater.setup_and_upgrade_download(True)
 def Check_For_Updates(self):
     um = updater.Update_Manager(self.version)
     um.Check_For_Updates()
     if um.connection:
         um.connection.close()
Ejemplo n.º 20
0
def main():
    parser = argparse.ArgumentParser(description="Vanilla_AE")
    parser.add_argument("--batchsize", "-b", type=int, default=64)
    parser.add_argument("--epoch", "-e", type=int, default=100)
    parser.add_argument("--gpu", "-g", type=int, default=0)
    parser.add_argument("--snapshot", "-s", type=int, default=10)
    parser.add_argument("--n_dimz", "-z", type=int, default=16)
    parser.add_argument("--dataset", "-d", type=str, default='mnist')
    parser.add_argument("--network", "-n", type=str, default='conv')

    args = parser.parse_args()

    def transform(in_data):
        img = in_data
        img = resize(img, (32, 32))
        return img

    def transform2(in_data):
        img, label = in_data
        img = resize(img, (32, 32))
        return img, label

    #import program
    import Updater
    import Visualizer

    #print settings
    print("GPU:{}".format(args.gpu))
    print("epoch:{}".format(args.epoch))
    print("Minibatch_size:{}".format(args.batchsize))
    print('')
    out = os.path.join('result', args.network)
    batchsize = args.batchsize
    gpu_id = args.gpu
    max_epoch = args.epoch

    train_val, _ = mnist.get_mnist(withlabel=False, ndim=3)
    train_val = TransformDataset(train_val, transform)
    #for visualize
    _, test = mnist.get_mnist(withlabel=True, ndim=3)
    test = TransformDataset(test, transform2)
    label1 = 1
    label2 = 5
    test1 = [i[0] for i in test if (i[1] == label1)]
    test2 = [i[0] for i in test if (i[1] == label2)]
    test1 = test1[0:5]
    test2 = test2[5:10]

    if args.network == 'conv':
        import Network.mnist_conv as Network
    elif args.network == 'fl':
        import Network.mnist_fl as Network
    else:
        raise Exception('Error!')

    AE = Network.AE(n_dimz=args.n_dimz, batchsize=args.batchsize)
    train, valid = split_dataset_random(train_val, 50000, seed=0)

    #set iterator
    train_iter = iterators.SerialIterator(train, batchsize)
    valid_iter = iterators.SerialIterator(valid,
                                          batchsize,
                                          repeat=False,
                                          shuffle=False)

    #optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = optimizers.Adam(alpha=alpha, beta1=beta1)
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))
        return optimizer

    opt_AE = make_optimizer(AE)
    #trainer
    updater = Updater.AEUpdater(model=(AE),
                                iterator=train_iter,
                                optimizer={'AE': opt_AE},
                                device=args.gpu)

    trainer = training.Trainer(updater, (max_epoch, 'epoch'), out=out)
    trainer.extend(extensions.LogReport(log_name='log'))
    snapshot_interval = (args.snapshot, 'epoch')
    display_interval = (1, 'epoch')
    trainer.extend(extensions.snapshot_object(
        AE, filename='AE_snapshot_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.PrintReport(['epoch', 'AE_loss']),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar())
    trainer.extend(Visualizer.out_generated_image(AE, test1, test2, out),
                   trigger=(1, 'epoch'))
    trainer.run()
    del trainer
Ejemplo n.º 21
0
import Updater
import Database
from RSI import RSIResults

# TODO
# - clear out all dailydata for tickers starting w/ 'A'. Some of them have invalid data that I manually copied...oops
# - add manual data for all stocks w/ entries with a 0 volume, delete those entries, anmd rerun
# - create function to save all data into csvs
# - backup databases
# - remove dependency on csv files for stockdates (for like KO, IBM, GE)

Log.log_start("daily-update")

# Populate Stocks table
Log.log_segment("Updating Stocks table")
Updater.update_index_stocks()

# Populate MarketDates table
Log.log_segment("Updating MarketDates table")
Updater.update_stock_dates()

# Update watchlist, portfolio, and indices
Log.log_segment(
    "Updating IndexFunds table and updating index funds data in DailyData")
Updater.update_indexfund_prices()
Log.log_segment("Updating watchlist")
Updater.update_watchlist()
Log.log_segment("Updating portfolio")
Updater.update_portfolio()

# Populate DailyData table
Ejemplo n.º 22
0
import Updater
import time

if __name__ == "__main__":
	upd = Updater()
	while(1):
		upd.Update()
		time.sleep(0.1)
Ejemplo n.º 23
0
def main():
    parser = argparse.ArgumentParser(description="WGAN-gp")
    parser.add_argument("--batchsize", "-b", type=int, default=64)
    parser.add_argument("--epoch", type=int, default=500)
    parser.add_argument("--gpu", "-g", type=int, default=0)
    parser.add_argument("--snapshot_interval", "-s", type=int, default=50)
    parser.add_argument("--display_interval", "-d", type=int, default=1)
    parser.add_argument("--n_dimz", "-z", type=int, default=128)
    parser.add_argument("--dataset", "-ds", type=str, default="mnist")
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--out", "-o", type=str, default="result")
    parser.add_argument("--resume", '-r', default='')
    args = parser.parse_args()

    #import .py
    import Updater
    import Visualize
    import Network.mnist_net as Network

    #print settings
    print("GPU:{}".format(args.gpu))
    print("max_epoch:{}".format(args.epoch))
    print("Minibatch_size:{}".format(args.batchsize))
    print("Dataset:{}".format(args.dataset))
    print('')
    out = os.path.join(args.out, args.dataset)
    #Set up NN
    gen = Network.DCGANGenerator()
    dis = Network.WGANDiscriminator()

    if args.gpu >= 0:
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        dis.to_gpu()

    #Make optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.0, beta2=0.9):
        optimizer = chainer.optimizers.Adam(alpha=alpha,
                                            beta1=beta1,
                                            beta2=beta2)
        optimizer.setup(model)
        return optimizer

    opt_gen = make_optimizer(gen)
    opt_dis = make_optimizer(dis)

    #Get dataset
    train, _ = mnist.get_mnist(withlabel=True, ndim=3, scale=1.)
    train = [i[0] for i in train if (i[1] == 1)]  #ラベル1のみを選択

    #Setup iterator
    train_iter = iterators.SerialIterator(train, args.batchsize)
    #Setup updater
    updater = Updater.WGANUpdater(models=(gen, dis),
                                  iterator=train_iter,
                                  optimizer={
                                      'gen': opt_gen,
                                      'dis': opt_dis
                                  },
                                  n_dis=5,
                                  lam=10,
                                  device=args.gpu)

    #Setup trainer
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=out)
    snapshot_interval = (args.epoch, 'epoch')
    display_interval = (args.display_interval, 'epoch')
    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'),
        trigger=(args.epoch, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        gen, 'gen_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'gen/loss', 'dis/loss', 'loss_grad',
        'wasserstein_distance', 'elapsed_time'
    ]),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar())
    trainer.extend(Visualize.out_generated_image(gen, dis, 10, 10, args.seed,
                                                 args.out, args.dataset),
                   trigger=display_interval)

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
Ejemplo n.º 24
0
def main():
    parser = argparse.ArgumentParser(description="DCGAN")
    parser.add_argument("--batchsize", "-b", type=int, default=1)
    parser.add_argument("--epoch", "-e", type=int, default=5)
    parser.add_argument("--gpu", "-g", type=int, default=0)
    parser.add_argument("--snapshot_interval", "-s", type=int, default=1)
    parser.add_argument("--display_interval", "-d", type=int, default=1)
    parser.add_argument("--n_dimz", "-z", type=int, default=100)
    parser.add_argument("--dataset", "-ds", type=str, default="mnist")
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--out", "-o", type=str, default="result")
    parser.add_argument("--resume", '-r', default='')
    args = parser.parse_args()

    #import .py
    import Updater
    import Visualize
    import Network.mnist_net as Network
    #print settings
    print("GPU:{}".format(args.gpu))
    print("epoch:{}".format(args.epoch))
    print("Minibatch_size:{}".format(args.batchsize))
    print("Dataset:{}".format(args.dataset))
    print('')
    out = os.path.join(args.out, args.dataset)
    #Set up NN
    gen = Network.Generator(n_hidden=args.n_dimz)
    dis = Network.Discriminator()
    ser = Network.Searcher(n_hidden=args.n_dimz)

    load_path = 'DCGAN/result/mnist/gen_epoch_100.npz'
    chainer.serializers.load_npz(load_path, gen)
    load_path = 'DCGAN/result/mnist/dis_epoch_100.npz'
    chainer.serializers.load_npz(load_path, dis)

    if args.gpu >= 0:
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        dis.to_gpu()
        ser.to_gpu()
    #Make optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = optimizers.Adam(alpha=alpha, beta1=beta1) #init_lr = alpha
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(0.0001), 'hook_dec')
        return optimizer
    opt_gen = make_optimizer(gen)
    opt_dis = make_optimizer(dis)
    opt_ser = make_optimizer(ser)

    #Get dataset
    train_valid, test = mnist.get_mnist(withlabel=True, ndim=3)
    train, valid = split_dataset_random(train_valid, 50000, seed=0)
    #valid = [i[0] for i in valid if(i[1]==9)] #ラベル1のみを選択
    valid = [i[0] for i in test if(i[1]==8)]

    #ひとつに対して潜在空間座標を探索する.
    valid = valid[0:1]
    xp = gen.xp
    z_noise = Variable(xp.asarray(gen.make_hidden(args.batchsize)))

    #Setup iterator
    train_iter = iterators.SerialIterator(valid, args.batchsize)
    #Setup updater
    updater = Updater.ADGANUpdater(
        models=(gen, dis, ser),
        iterator=train_iter,
        optimizer={'gen':opt_gen, 'dis':opt_dis, 'ser':opt_ser},
        z_noise=z_noise,
        device=args.gpu)

    #Setup trainer
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=out)
    snapshot_interval = (args.snapshot_interval, 'epoch')
    display_interval = (args.display_interval, 'epoch')
    trainer.extend(
        extensions.snapshot(
        filename='snapshot_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        gen, 'gen_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        ser, 'ser_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(
        trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'ser/loss', 'gen/loss', 'dis/loss', 'elapsed_time'
    ]), trigger=display_interval)
    trainer.extend(extensions.ProgressBar())
    trainer.extend(Visualize.out_generated_image(
        gen, dis, ser, valid, args.out, args.dataset, z_noise),
        trigger=snapshot_interval)

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
Ejemplo n.º 25
0
    def __init__(self, interface):
        super().__init__(name='experiment')
        self.interface = interface

        # store the ledMatrix
        self.ledMatrix = self.interface.ledMatrix

        # at beginning of experiment, populate first Date-Time field in google sheet
        # with 'now' and propagate to end of the experiment field. Experimental Time should
        # be already filled
        # if you change the google sheets format, make sure to provide the
        # correct column-to-parameter mapping below in param_col_dict and the correct cell for
        # time resolution of experiment
        param_col_dict = {'imaging_mode': 'C', 'matrix_r': 'D', 'matrix_g': 'E',
                          'matrix_b': 'F', 'radius': 'G', 'opto_on': 'H'}
        data_col_dict = {'temperature': 'I', 'humidity': 'J'}
        if self.interface.image_processing_mode != 'None':
            data_col_dict['opto_on'] = 'H'
            data_col_dict['motion'] = 'K'
            if self.interface.image_processing_mode == 'neural net':
                if self.interface.nn_count_eggs:
                    data_col_dict['egg_count'] = 'L'
        time_res_cell = 'B2'
        led_dosage_cell = 'J2'
        # we'll make a shared resource that contains the current row of the spreadsheet so that the motion updater
        # will know where we are.
        self.sheet = SheetsTransferData.SheetsTransferData(interface.spreadsheet_id, interface.system_id,
                                                           interface.paired_system_id, param_col_dict, data_col_dict,
                                                           time_res_cell, led_dosage_cell, interface.exp_code)

        # clear any old data out of the data columns of the google spreadsheet
        for (k, v) in data_col_dict.items():
            self.sheet.clear_data(v)

        # if google sheet format is changed, make sure to provide the right cell
        # to init time to
        self.exp_start = self.sheet.init_time()
        self.exp_end = self.exp_start + datetime.timedelta(minutes=interface.explength)

        # instantiate the temperature/humidity sensor objects
        self.tempSensor = TempSensor(interface.teensy_config)

        # use the process_manager created in the interface class to store a few lists
        self.motion_list = self.interface.process_manager.list()
        self.motion_list_lock = self.interface.process_manager.Lock()
        self.egg_count_list = self.interface.process_manager.list()
        self.egg_count_list_lock = self.interface.process_manager.Lock()

        # instantiate the camera object
        self.piCam = CameraSupport(interface._camera._camera, interface.config_file, self.interface.imaging_params,
                                   self.interface.timelapse_option, self.ledMatrix,
                                   self.interface.stop_event, self.interface.stop_cam,
                                   self.interface.video_length, self.exp_end,
                                   self.motion_list, self.motion_list_lock,
                                   self.egg_count_list, self.egg_count_list_lock)

        # instantiate the updater process
        self.update_process = Updater.Updater(interface.config_file, self.ledMatrix, self.tempSensor,
                                              self.sheet, self.motion_list, self.motion_list_lock,
                                              self.egg_count_list, self.egg_count_list_lock,
                                              self.piCam.max_difference)

        # self.t_motion_queue_check = threading.Thread(name='motion_queue_check', target=self.check_queue)
        Logger.info('Experiment: start time is %s' % self.exp_start.strftime("%H:%M:%S %B %d, %Y"))
        Logger.info('Experiment: end time is %s' % self.exp_end.strftime("%H:%M:%S %B %d, %Y"))
        Logger.info('Experiment: Initialization complete')
Ejemplo n.º 26
0
def main():
    parser = argparse.ArgumentParser(description='WGAN')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=64,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=500,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument("--snapshot_interval", "-s", type=int, default=50)
    parser.add_argument("--display_interval", "-d", type=int, default=1)
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--dataset", "-ds", type=str, default="mnist")
    parser.add_argument("--n_dimz", "-z", type=int, default=128)
    args = parser.parse_args()

    out = os.path.join(args.out, args.dataset)
    # Networks
    import Network.mnist_net as Network

    gen = Network.DCGANGenerator(n_hidden=args.n_dimz)
    dis = Network.WGANDiscriminator()
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        gen.to_gpu()
        dis.to_gpu()

    # Optimizers
    opt_gen = chainer.optimizers.RMSprop(5e-5)
    opt_gen.setup(gen)
    opt_gen.add_hook(chainer.optimizer.GradientClipping(1))

    opt_dis = chainer.optimizers.RMSprop(5e-5)
    opt_dis.setup(dis)
    opt_dis.add_hook(chainer.optimizer.GradientClipping(1))
    opt_dis.add_hook(WeightClipping(0.01))

    #Get dataset
    train, _ = mnist.get_mnist(withlabel=True, ndim=3, scale=1.)
    train = [i[0] for i in train if (i[1] == 1)]  #ラベル1のみを選択

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)

    # Trainer
    import Updater
    updater = Updater.WGANUpdater(models=(gen, dis),
                                  iterator=train_iter,
                                  optimizer={
                                      'gen': opt_gen,
                                      'dis': opt_dis
                                  },
                                  n_dis=5,
                                  device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=out)

    snapshot_interval = (args.epoch, 'epoch')
    display_interval = (args.display_interval, 'epoch')

    # Extensions
    trainer.extend(extensions.dump_graph('wasserstein distance'))
    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'),
        trigger=(args.epoch, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        gen, 'gen_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PlotReport(['wasserstein distance'],
                              'epoch',
                              file_name='distance.png'))
    trainer.extend(
        extensions.PlotReport(['gen/loss'], 'epoch', file_name='loss.png'))
    trainer.extend(extensions.PrintReport(
        ['epoch', 'wasserstein distance', 'gen/loss', 'elapsed_time']),
                   trigger=display_interval)
    trainer.extend(extensions.ProgressBar())
    trainer.extend(Visualize.out_generated_image(gen, dis, 10, 10, args.seed,
                                                 args.out, args.dataset),
                   trigger=display_interval)
    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    # Run
    trainer.run()
Ejemplo n.º 27
0
def api_versions():
    versions = {
        "latest": updater.GetLatestVersion(),
        "current": config.GetValue("version")
    }
    return Response(json.dumps(versions))
Ejemplo n.º 28
0
import Updater

print "default output"
upd = Updater.New("Ultrabenosaurus/EasyXdcc", "1.2")
print "SIMPLE:\n\t%s" % upd.Simple()
print "NEW-FULL:\n\t" + upd.Main()
print "NEW-SHORT:\n\t" + upd.Main("short")

upd = Updater.New("Ultrabenosaurus/EasyXdcc", "9.9")
print "NO-NEW:\n\t" + upd.Main()

print "\ncustom output"
upd = Updater.New(
    "Ultrabenosaurus/EasyXdcc",
    "1.2",
    new_short="A new version of $repo$ is available.",
    new_full=
    "Please visit https://github.com/$repo$/releases/tag/$latest$ for the latest version."
)
print "NEW-FULL:\n\t" + upd.Main()
print "NEW-SHORT:\n\t" + upd.Main("short")
upd = Updater.New("Ultrabenosaurus/EasyXdcc",
                  "9.9",
                  no_new="Your version $current$ is the newest available.")
print "NO-NEW:\n\t" + upd.Main()
print "SIMPLE:\n\t%s" % upd.Simple()
Ejemplo n.º 29
0
import http.server
import socketserver
import os
import Updater

Updater.main()
PORT = 8200
Handler = http.server.SimpleHTTPRequestHandler


class HTTPRequestHandler(Handler):
    """Extend SimpleHTTPRequestHandler to handle PUT requests"""
    def do_PUT(self):
        """Save a file following a HTTP PUT request"""
        filename = os.path.basename(self.path)

        # Don't overwrite files
        if os.path.exists(filename):
            self.send_response(409, 'Conflict')
            self.end_headers()
            reply_body = '"%s" already exists\n' % filename
            self.wfile.write(reply_body.encode('utf-8'))
            return

        file_length = int(self.headers['Content-Length'])
        with open(filename, 'wb') as output_file:
            output_file.write(self.rfile.read(file_length))
        self.send_response(201, 'Created')
        self.end_headers()
        reply_body = 'Saved "%s"\n' % filename
        self.wfile.write(reply_body.encode('utf-8'))
	install_all_dependencies()

	# get device-name
	device_name = check_device_name()

	# new-os-instance
	cmd_os = Platform.gen_os_instance(device_name, UNZIPPED_DIR)

	# print tips
	cmd_os.gen_install_tips()

	# 2nd: confirm-input
	confirm_input(device_name)

	# create updater
	updater = Updater(DOWNLOADING_MATCH_FILE_KEYWORDS, UPDATE_MISTER_TUPLE)

	# setup and upgrade-download!
	updater.setup_and_upgrade_download(False)

	# query again! must be non-nil
	current_mister_files = check_current_mister_files()

	# create a temporary directory
	try_mk_unzipped_dir()

	# find release-core-file, and prepare unrar
	release_rar_file_name = get_release_rar_file_name(current_mister_files)

	# unrar-file!
	try_unrar_file(release_rar_file_name)
Ejemplo n.º 31
0
import Config
import Validator
import Updater

if __name__ == '__main__':
    """
    AutoUpdater Client
    """
    config = Config.Config()
    validator = Validator.Validator(config.get_checksum_list())
    updater = Updater.Updater(config.get_channel(), validator,
                              config.get_current_version())
    print(updater.has_new_version())
Ejemplo n.º 32
0
def main():
    parser = argparse.ArgumentParser(description="DCGAN")
    parser.add_argument("--batchsize", "-b", type=int, default=128)
    parser.add_argument("--epoch", "-e", type=int, default=100)
    parser.add_argument("--gpu", "-g", type=int, default=0)
    parser.add_argument("--snapshot_interval", "-s", type=int, default=10)
    parser.add_argument("--display_interval", "-d", type=int, default=1)
    parser.add_argument("--n_dimz", "-z", type=int, default=100)
    parser.add_argument("--dataset", "-ds", type=str, default="mnist")
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--out", "-o", type=str, default="result")
    parser.add_argument("--resume", '-r', default='')
    args = parser.parse_args()

    #import .py
    import Updater
    import Visualize
    import Network.mnist_net as Network
    #print settings
    print("GPU:{}".format(args.gpu))
    print("epoch:{}".format(args.epoch))
    print("Minibatch_size:{}".format(args.batchsize))
    print("Dataset:{}".format(args.dataset))
    print('')
    out = os.path.join(args.out, args.dataset)
    #Set up NN
    gen = Network.Generator(n_hidden=args.n_dimz)
    dis = Network.Discriminator()
    enc = Network.Encoder(n_hidden=args.n_dimz)

    if args.gpu >= 0:
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        dis.to_gpu()
    #Make optimizer
    def make_optimizer(model, alpha=0.0002, beta1=0.5):
        optimizer = optimizers.Adam(alpha=alpha, beta1=beta1) #init_lr = alpha
        optimizer.setup(model)
        optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(0.0001), 'hook_dec')
        return optimizer
    opt_gen = make_optimizer(gen)
    opt_dis = make_optimizer(dis)
    opt_enc = make_optimizer(enc)

    #Get dataset
    train_valid, test = mnist.get_mnist(withlabel=True, ndim=3)
    train, valid = split_dataset_random(train_valid, 50000, seed=0)
    train = [i[0] for i in train  if(i[1]==1)] #ラベル1のみを選択
    #Setup iterator
    train_iter = iterators.SerialIterator(train, args.batchsize)
    #Setup updater
    updater = Updater.DCGANUpdater(
        models=(gen, dis, enc),
        iterator=train_iter,
        optimizer={'gen':opt_gen, 'dis':opt_dis, 'enc':opt_enc},
        device=args.gpu)

    #Setup trainer
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=out)
    snapshot_interval = (args.snapshot_interval, 'epoch')
    display_interval = (args.display_interval, 'epoch')

    trainer.extend(extensions.snapshot_object(
        gen, 'gen_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'dis_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        enc, 'enc_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(
        trigger=display_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'gen/loss', 'dis/loss', 'enc/loss', 'elapsed_time'
    ]), trigger=display_interval)
    trainer.extend(extensions.ProgressBar())
    trainer.extend(Visualize.out_generated_image(
        gen, dis, enc,
        10, 10, args.seed, args.out, args.dataset),
        trigger=snapshot_interval)

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()