コード例 #1
0
def main():
    parser, args = parseArgs()

    if len(sys.argv) > 1:
        resolver(args.input, args.output, args.justificado, args.guion)
    else:
        parser.print_help(sys.stderr)
        sys.exit()
コード例 #2
0
def communism(bot, update):
    amount, reason = parseArgs(update.message, [ARG_AMOUNT, ARG_REST],
                               [None, ""],
                               "\nUsage: /communism <amount> [reason ...]")

    sender = getOrCreateUser(update.message.from_user)
    id = str(sender['id'])

    if id in communisms:
        update.message.reply_text("You already have a communism in progress")
        return

    communism = Communism(sender, amount, reason)
    communism.message = update.message.reply_text(
        str(communism), reply_markup=communism.message_markup)
    communisms[id] = communism
コード例 #3
0
ファイル: pay.py プロジェクト: M4GNV5/MateBot
def pay(bot, update):
    amount, reason = parseArgs(update.message, [ARG_AMOUNT, ARG_REST],
                               [None, ""],
                               "\nUsage: /pay <amount> [reason ...]")

    sender = getOrCreateUser(update.message.from_user)
    id = str(sender['id'])

    if id in pays:
        update.message.reply_text("You already have a pay in progress")
        return

    pay = Pay(sender, amount, reason)
    pay.message = update.message.reply_text(str(pay),
                                            reply_markup=pay.message_markup)
    pays[id] = pay
コード例 #4
0
ファイル: send.py プロジェクト: dennisdenk/MateBot
def send(bot, update):
    args = parseArgs(update.message, [ARG_AMOUNT, ARG_USER], [None, None],
                     "\nUsage: /send <amount> <user>")

    sender = getOrCreateUser(update.message.from_user)
    receiver = args[1]
    amount = args[0]

    if sender == receiver:
        update.message.reply_text("You cannot send money to yourself")
        return

    createTransaction(sender, -amount, "sent to {}".format(receiver['name']))
    createTransaction(receiver, amount,
                      "received from {}".format(sender['name']))
    update.message.reply_text("OK, you sent {}€ to {}" \
     .format(amount / float(100), receiver['name']))
コード例 #5
0
ファイル: consume.py プロジェクト: CrsiX/MateBot_old
def getAmountHelper(msg, name) -> int:
    args = parseArgs(msg, [ARG_INT], [1], "/{} [amount]".format(name))
    if isinstance(args[0], int):
        if args[0] == 0:
            msg.reply_text("You can't consume zero {}s".format(name))
            return 0
        elif args[0] < 0:
            msg.reply_text(
                "You can't consume a negative number of {}s".format(name))
            return 0
        elif args[0] > 10:
            msg.reply_text(
                "You can't consume more than 10 {}s at once!".format(name))
            return 0
        return args[0]
    else:
        msg.reply_text("Unknown parsing error")
        return 0
コード例 #6
0
ファイル: history.py プロジェクト: M4GNV5/MateBot
def history(bot, update):
    offset, count = parseArgs(update.message, [ARG_INT, ARG_INT], [0, 10],
                              "\nUsage: /history [offset = 0] [count = 10]")

    user = getOrCreateUser(update.message.from_user)
    entries = []

    with open("transactions.log", "r") as fd:
        for line in fd.readlines():
            entry = json.loads(line)
            if entry['user'] == user['id']:
                entries.insert(0, entry)

    texts = []
    for entry in entries[offset:offset + count]:
        time = datetime.datetime.fromtimestamp(
            entry['timestamp']).strftime("%Y-%m-%d %H:%M")
        texts.append("{} {:.2f}€ {}".format(time, entry['diff'] / float(100),
                                            entry['reason']))

    msg = "Transaction history for {}\n{}".format(user['name'],
                                                  "\n".join(texts))
    update.message.reply_text(msg, disable_notification=True)
コード例 #7
0
ファイル: length_filter.py プロジェクト: el-mat/ectools
from args import parseArgs, getHelpStr, CLArgument


description = (
    "Usage: length_filter.py [options] min_length(int) in1.{fa.fq} [in2.{fa,fq} ...]\n\n"
    "Filter reads by their lengths"
)

argument_list = [["maxlen", "maxlen", int, -1, "Maximum length of reads"]]

arguments = map(CLArgument._make, argument_list)

if not len(sys.argv) > 2:
    sys.exit(getHelpStr(description, arguments) + "\n")

(p_arg_map, args_remaining) = parseArgs(sys.argv[1:], arguments)


minlen = int(args_remaining[0])

files = args_remaining[1:]

if not all(map(os.path.exists, files)):
    sys.exit("Not all files exist")

file_readers = starmap(fileIterator, zip(files, map(iteratorFromExtension, files)))

filt_cond = lambda record: seqlen(record) > minlen
if p_arg_map["maxlen"] > 1:
    filt_cond = lambda record: seqlen(record) > minlen and seqlen(record) <= p_arg_map["maxlen"]
コード例 #8
0
ファイル: graphCellStats.py プロジェクト: yqwu1983/ectools
],
                 [
                     "counts", "counts", argflag, False,
                     ("Graph counts instead of bases ie. number of reads")
                 ],
                 [
                     "out", "out", str, "cellstats.pdf",
                     ("Output filename. Default: 'cellstats.pdf'")
                 ]]

arguments = map(CLArgument._make, argument_list)

if not len(sys.argv) > 1:
    sys.exit(getHelpStr(description, arguments) + "\n")

(p_arg_map, args_remaining) = parseArgs(sys.argv[1:], arguments)

if not len(args_remaining) >= 1:
    sys.exit(getHelpStr(description, arguments) + "\n")

title = args_remaining[0]
infiles = args_remaining[1:]

cellnames = map(lambda f: "_".join(f.split(".")[0].split("_")[:2]), infiles)

fit_gen = lambda filename: fileIterator(filename, lineItemIterator)
file_iterators = map(fit_gen, infiles)


def getBasesFromLineArr(arr):
    if not bool(arr):
コード例 #9
0
ファイル: AutoAnim.py プロジェクト: SamuelEllertson/AutoAnim
def getArgs():
    parserSetup = {
        "description":
        "Create animations programatically from a psd file.",
        "args": [
            {
                "flags": ["-s", "--script-path"],
                "options": {
                    "type": Path,
                    "required": True,
                    "metavar": "Path",
                    "dest": "script_path",
                    "help": "Path to animation script file"
                }
            },
            {
                "flags": ["-o", "--output-path"],
                "options": {
                    "default":
                    None,
                    "type":
                    Path,
                    "metavar":
                    "Path",
                    "dest":
                    "output_path",
                    "help":
                    "Animation output path (default: output.avi for video, output.png for textures)"
                }
            },
            {
                "flags": ["-p", "--psd-path"],
                "options": {
                    "default": None,
                    "type": Path,
                    "metavar": "Path",
                    "dest": "psd_path",
                    "help":
                    "Path to psd file that will be used to generate images"
                }
            },
            {
                "flags": ["-i", "--image-cache"],
                "options": {
                    "default": Path("./image_cache"),
                    "type": Path,
                    "metavar": "Path",
                    "dest": "directory",
                    "help":
                    "Path to the image cache (defaults to ./image_cache)"
                }
            },
            {
                "flags": ["--clear-cache"],
                "options": {
                    "dest": "clear_cache",
                    "action": "store_true",
                    "help": "Clears the image cache"
                }
            },
            {
                "flags": ["--fps"],
                "options": {
                    "dest": "fps",
                    "type": int,
                    "default": 24,
                    "metavar": "int",
                    "help": "Sets the output frames per second (default: 24)"
                }
            },
            {
                "flags": ["--speed"],
                "options": {
                    "dest":
                    "speed_multiplier",
                    "type":
                    float,
                    "default":
                    1.0,
                    "metavar":
                    "float",
                    "help":
                    "Set the speed multiplier for the output (default: 1.0)"
                }
            },
            {
                "flags": ["--codec"],
                "options": {
                    "dest":
                    "codec",
                    "type":
                    str,
                    "default":
                    "mp4v",
                    "metavar":
                    "codec",
                    "help":
                    "Sets the codec used when generating output video (default: mp4v)"
                }
            },
            {
                "flags": ["--dont-store"],
                "options": {
                    "dest":
                    "store_new",
                    "action":
                    "store_false",
                    "help":
                    "If set newly generated images will not be stored to the image directory"
                }
            },
            {
                "flags": ["-v", "--verbose"],
                "options": {
                    "dest": "verbose",
                    "action": "store_true",
                    "help": "Enable verbose output"
                }
            },
            {
                "flags": ["--print-states"],
                "options": {
                    "dest": "print_states",
                    "action": "store_true",
                    "help": "Print the full list of parsed states"
                }
            },
            {
                "flags": ["--no-output"],
                "options": {
                    "dest": "no_output",
                    "action": "store_true",
                    "help":
                    "Script will be parsed, but no output will be created"
                }
            },
            {
                "flags": ["-t", "--create-texture"],
                "options": {
                    "dest":
                    "create_texture",
                    "action":
                    "store_true",
                    "help":
                    "Instead of a video file, output animation as a tiled texture image"
                }
            },
            {
                "flags": ["--texture-layout"],
                "options": {
                    "dest": "texture_layout",
                    "default": "square",
                    "choices": ("square", "horizontal", "vertical"),
                    "help": "Sets texture image layout (default: square)"
                }
            },
            {
                "flags": ["--texture-dimensions"],
                "options": {
                    "dest":
                    "texture_dimensions",
                    "metavar":
                    "dimension list",
                    "default":
                    None,
                    "help":
                    "Takes a list of one or more widthXheight pairs for the output dimensions of the texture. Defaults to full scale. Example: --texture-dimensions 1280x800,1080x720"
                }
            },
            {
                "flags": ["--force-vector"],
                "options": {
                    "dest":
                    "force_vector",
                    "action":
                    "store_true",
                    "help":
                    "enables force vector mode in psd-tools compositing. Can solve some rending problems, but is very slow."
                }
            },
        ]
    }

    args = parseArgs(parserSetup)

    #Texture dimension validation and extraction
    if args.texture_dimensions is not None:
        try:
            dims = []
            for pair in args.texture_dimensions.split(","):
                width, _, height = pair.strip().lower().partition("x")
                dims.append((int(width), int(height)))
            args.texture_dimensions = dims
        except Exception:
            ArgumentTypeError("Invalid texture dimensions")

    #Speed multiplier validation
    if args.speed_multiplier <= 0:
        raise ArgumentTypeError("speed-multiplier must be a positive value")

    #Use the given output path, or default to output/out.{avi, png} depending on if we are creating a texture or video
    if args.output_path is None:
        args.output_path = Path(
            "./output/out.png") if args.create_texture else Path(
                "./output/out.avi")

    return args
コード例 #10
0
        # UPDATE LEARNER
        if 'observeTransition' in dir(agent):
            agent.observeTransition(environment,state, action, reward)


        returns += reward * totalDiscount
        gamma = agent.getGamma(agent,state)
        totalDiscount *= gamma
        environment.state = nextState


######################################################
#### This runs q-learning with specified arguments####
######################################################

parameters = args.parseArgs(sys.argv[1:])
if parameters['m'] == 'old':
    data = oldData()
if parameters['m'] == 'young':
    data = youngData()
environment = animalMDP.animalMDP(data)
rat = agent.ratAgent(environment,parameters['e'],parameters['a'],parameters['d'])
startState= MDP.State('f2',1,0,0,None,0)
iterations = 100



with open('decisions.txt','w') as fi:
    fi.write('0,')
    if iterations > 0:
        print
コード例 #11
0
ファイル: rallycli.py プロジェクト: duffj/rallycli.py
#!/usr/bin/env python
"""
Rally client.
"""

import sys
import os
from config import RallyCliConfig
from client import RallyClient
import args

options = [opt for opt in sys.argv[1:] if opt.startswith('--')]

if __name__ == "__main__":
	args = args.parseArgs()
	config = RallyCliConfig(args)
	client = RallyClient(config)

	getattr(client, args.command.lower()).__call__(args)
コード例 #12
0
ファイル: lengthhist.py プロジェクト: yqwu1983/ectools
from nucio import lineRecordIterator, M4Record, M4RecordTypes, lineItemIterator
from args import arglist, parseArgs, getHelpStr, CLArgument, argflag

description = ("lengthhist.py [options] title alginments.m4 [alignments2.m4...]\n"
               "Plot Make histograms from alignment (nucmer show-coords) "
               "files")

argument_list = [["labels","labels",arglist,["Ectools","PacbioToCA"], "Labels for each dataset"],
                 ["ymax", "ymax", int, -1, "Maximum Y-value"],
                 ["lenfiles", "lenfiles", int, -1, ("Input files are just a column of lengths, "
                                                    "argument specifies the column")]]

args = map(CLArgument._make, argument_list)

(parg_map, args_remaining) = parseArgs(sys.argv[1:],args)

if not len(args_remaining) >= 3:
    sys.exit(getHelpStr(description,args) + "\n")

title = args_remaining[0]
igetter = attrgetter("qseqlength")

flens = []
for fname in args_remaining[1:]:
    with open(fname) as fh:
        if parg_map["lenfiles"] < 0:
            records = lineRecordIterator(fh, M4Record, M4RecordTypes)
            flens.append( map(igetter, records) )
        else:
            getter = itemgetter(parg_map["lenfiles"])
コード例 #13
0
      # preprocessing
      cropped_image = tf.random_crop(raw_image, [args.img_crop_height, args.img_crop_width, args.img_depth], name='cropped_image')
      image = tf.image.resize_images(cropped_image, [args.output_height, args.output_width])

      # generate batches
      image_batch = tf.train.shuffle_batch([image], args.batch_size, capacity=4 * args.batch_size, 
                                           min_after_dequeue=args.batch_size, num_threads=args.n_threads, 
                                           name='image_batch')
      return tf.cast(image_batch, tf.float32)


if __name__ == '__main__':
  import traceback
  import sys
  from args import parseArgs
  args = parseArgs()
  image_batch = None
  label_batch = None

  if args.dataset == 'mnist':
    image_batch, label_batch = loadData(args)
  else:
    image_batch = loadData(args)

  with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    try:
      print 'Trying to load 100 data batches'
      for i in xrange(100):