def run():
	parser = argparse.ArgumentParser()
	'''Model configuration'''
	parser.add_argument("--input_dim", type=int, default=512)
	parser.add_argument("--hidden_dim", type=int, default=128)
	parser.add_argument("--out_dim", type=int, default=128)
	parser.add_argument("--layer_hidden", type=int, default=2)
	parser.add_argument("--layer_output", type=int, default=3)
	parser.add_argument("--skip_connection_type", type=str, default="gsc")
	parser.add_argument("--norm_type", type=str, default="no")
	
	'''Training configuration'''
	parser.add_argument("--batch_train", type=int, default=128)
	parser.add_argument("--batch_test", type=int, default=128)
	parser.add_argument("--lr", type=float, default=1e-4)
	parser.add_argument("--lr_decay", type=float, default=0.99)
	parser.add_argument("--decay_interval", type=int, default=10)
	parser.add_argument("--iteration", type=int, default=100)
	parser.add_argument("--loss_type", type=str, default="FocalLoss")
	parser.add_argument("--load_model_path", help='Write the address if you want to load the model.', type=str, default="")

	'''Miscellaneous'''
	parser.add_argument("--mode", type=str, help='Choose either train or test', default="train")
	parser.add_argument("--dataset", type=str, help='Dataset to use', default="bionsight")
	parser.add_argument("--model_type", type=str, help='Used to create a folder name', default="GNN")
	parser.add_argument("--use_augmentation", help='Augmentation using Smiles Enumeration', type=bool, default=True)
	parser.add_argument("--radius", type=int, help='The number of times each node ID is updated', default=1)
	args = parser.parse_args()

	environment.setup(args)	
	'''Solver for training and testing MolGNN'''
	solver = Solver()
	solver.run()
예제 #2
0
def main(arguments):

    cells = arguments.cells
    speed = get_speed(arguments.speed)
    num_fail = 0

    miner = Miner(row=0, col=0, num_rotate=0, num_move=0)
    grid, pit_xy, beacon_xy, gold_row, gold_col = setup(miner, cells=cells)

    while True:

        grid[miner.row][miner.col] = "-"
        stuck = miner.forward(cells=cells, grid=grid, level=2)

        #        time.sleep(speed)

        display(grid, miner, num_fail)

        if (miner.row == gold_row) and (miner.col == gold_col):
            break
        elif [miner.row, miner.col] in pit_xy or stuck:
            num_fail += 1
            miner = Miner(row=0, col=0, num_rotate=0, num_move=0)
            grid, pit_xy, beacon_xy, gold_row, gold_col = setup(miner,
                                                                cells=cells)
            display(grid, miner, num_fail)
예제 #3
0
def parseArgs():
    args = createParser().parse_args()

    environment.setup(args.lang, args.platform, args.game == 'origins')

    print 'Language-specific content path: ' + environment.contentPath
    print 'Platform specified: ' + environment.platform
    if args.game == 'origins':
        print 'Silent Hill Origins mode specified'

    return args
예제 #4
0
파일: utils.py 프로젝트: krast/vitess
options = None
devnull = open('/dev/null', 'w')
hostname = socket.gethostname()

# binlog_player_protocol_flags defines the flags to use for the binlog players.
# A test can overwrite these flags before calling utils.main().
binlog_player_protocol_flags = ['-binlog_player_protocol', 'gorpc']

class TestError(Exception):
  pass

class Break(Exception):
  pass

environment.setup()

class LoggingStream(object):
  def __init__(self):
    self.line = ""

  def write(self, value):
    if value == "\n":
      # we already printed it
      self.line = ""
      return
    self.line += value
    logging.info("===== " + self.line)
    if value.endswith("\n"):
      self.line = ""
예제 #5
0
options = None
devnull = open('/dev/null', 'w')
hostname = socket.getaddrinfo(socket.getfqdn(), None, 0, 0, 0,
                              socket.AI_CANONNAME)[0][3]


class TestError(Exception):
    pass


class Break(Exception):
    pass


environment.setup()


class LoggingStream(object):
    def __init__(self):
        self.line = ''

    def write(self, value):
        if value == '\n':
            # we already printed it
            self.line = ''
            return
        self.line += value
        logging.info('===== ' + self.line)
        if value.endswith('\n'):
            self.line = ''
예제 #6
0
  def setUp(self):
    if self.vttop is None:
      raise EnvironmentError("VTTOP not defined")
    if self.vtroot is None:
      raise EnvironmentError("VTROOT not defined")
    try:
      os.makedirs(environment.tmproot)
    except OSError:
      pass

    environment.setup()

    framework.execute('go install', verbose=utils.options.verbose, cwd=self.vttop+'/go/cmd/vtocc')
    framework.execute('go install', verbose=utils.options.verbose, cwd=self.vttop+'/go/cmd/mysqlctl')

    # start mysql
    res = subprocess.call([
        self.vtroot+"/bin/mysqlctl",
        "-tablet-uid",  self.tabletuid,
        "-port", str(self.vtoccport),
        "-mysql-port", str(self.mysqlport),
        "init"
        ])
    if res != 0:
      raise EnvironmentError("Cannot start mysql")
    res = subprocess.call([
        "mysql",
        "-S",  self.mysqldir+"/mysql.sock",
        "-u", "vt_dba",
        "-e", "create database vt_test_keyspace ; set global read_only = off"])
    if res != 0:
      raise Exception("Cannot create vt_test_keyspace database")

    self.mysql_conn = self.mysql_connect()
    mcu = self.mysql_conn.cursor()
    self.clean_sqls = []
    self.init_sqls = []
    clean_mode = False
    with open(os.path.join(self.vttop, "test", "test_data", "test_schema.sql")) as f:
      for line in f:
        line = line.rstrip()
        if line == "# clean":
          clean_mode = True
        if line=='' or line.startswith("#"):
          continue
        if clean_mode:
          self.clean_sqls.append(line)
        else:
          self.init_sqls.append(line)
    try:
      for line in self.init_sqls:
        mcu.execute(line, {})
    finally:
      mcu.close()

    customrules = '/tmp/customrules.json'
    self.create_customrules(customrules)
    schema_override = '/tmp/schema_override.json'
    self.create_schema_override(schema_override)

    occ_args = [
      self.vtroot+"/bin/vtocc",
      "-port", "9461",
      "-customrules", customrules,
      "-schema-override", schema_override,
      "-db-config-app-charset", "utf8",
      "-db-config-app-dbname", "vt_test_keyspace",
      "-db-config-app-host", "localhost",
      "-db-config-app-unixsocket", self.mysqldir+"/mysql.sock",
      "-db-config-app-uname", 'vt_dba',   # use vt_dba as some tests depend on 'drop'
      "-db-config-app-keyspace", "test_keyspace",
      "-db-config-app-shard", "0"
    ]
    if self.memcache:
      memcache = self.mysqldir+"/memcache.sock"
      occ_args.extend(["-rowcache-bin", "memcached"])
      occ_args.extend(["-rowcache-socket", memcache])
      occ_args.extend(["-enable-rowcache"])

    if self.sensitive_mode:
      occ_args.extend(['-queryserver-config-sensitive-mode'])

    self.vtstderr = open("/tmp/vtocc_stderr.log", "a+")
    self.vtstdout = open("/tmp/vtocc_stdout.log", "a+")
    self.vtocc = subprocess.Popen(occ_args, stdout=self.vtstdout, stderr=self.vtstderr)
    for i in range(30):
      try:
        self.conn = self.connect()
        self.txlogger = subprocess.Popen(['curl', '-s', '-N', 'http://localhost:9461/debug/txlog'], stdout=open('/tmp/vtocc_txlog.log', 'w'))
        self.txlog = framework.Tailer(open('/tmp/vtocc_txlog.log', 'r'))

        def flush():
          utils.run(['curl', '-s', '-N', 'http://localhost:9461/debug/flushlogs'], trap_output=True)

        self.log = framework.Tailer(open('/tmp/vtocc.INFO'), flush=flush)
        utils.run_bg(['curl', '-s', '-N', 'http://localhost:9461/debug/querylog?full=true'], stdout=open('/tmp/vtocc_streamlog_9461.log', 'w'))
        self.querylog = framework.Tailer(open('/tmp/vtocc_streamlog_9461.log'), sleep=0.1)
        return
      except dbexceptions.OperationalError:
        if i == 29:
          raise
        time.sleep(1)
예제 #7
0
  def setUp(self):
    utils.zk_setup()
    environment.setup()
    if self.vttop is None:
      raise EnvironmentError("VTTOP not defined")
    if self.vtroot is None:
      raise EnvironmentError("VTROOT not defined")

    framework.execute('go install', verbose=utils.options.verbose, cwd=self.vttop+'/go/cmd/mysqlctl')

    utils.wait_procs([self.tablet.init_mysql()])
    self.tablet.mquery("", ["create database vt_test_keyspace", "set global read_only = off"])

    self.mysql_conn, mcu = self.tablet.connect('vt_test_keyspace')
    self.clean_sqls = []
    self.init_sqls = []
    clean_mode = False
    with open(os.path.join(self.vttop, "test", "test_data", "test_schema.sql")) as f:
      for line in f:
        line = line.rstrip()
        if line == "# clean":
          clean_mode = True
        if line=='' or line.startswith("#"):
          continue
        if clean_mode:
          self.clean_sqls.append(line)
        else:
          self.init_sqls.append(line)
    try:
      for line in self.init_sqls:
        mcu.execute(line, {})
    finally:
      mcu.close()

    utils.run_vtctl('CreateKeyspace -force /zk/global/vt/keyspaces/test_keyspace')
    self.tablet.init_tablet('master', 'test_keyspace', '0')

    customrules = '/tmp/customrules.json'
    self.create_customrules(customrules)
    schema_override = '/tmp/schema_override.json'
    self.create_schema_override(schema_override)
    if self.memcache:
      self.tablet.start_vttablet(memcache=True, customrules=customrules, schema_override=schema_override, sensitive_mode=self.sensitive_mode)
    else:
      self.tablet.start_vttablet(customrules=customrules, schema_override=schema_override, sensitive_mode=self.sensitive_mode)

    # FIXME(szopa): This is necessary here only because of a bug that
    # makes the qs reload its config only after an action.
    utils.run_vtctl('Ping ' + self.tablet.zk_tablet_path)

    for i in range(30):
      try:
        self.conn = self.connect()
        self.txlogger = subprocess.Popen(['curl', '-s', '-N', 'http://localhost:9461/debug/txlog'], stdout=open('/tmp/vtocc_txlog.log', 'w'))
        self.txlog = framework.Tailer(open('/tmp/vtocc_txlog.log'), flush=self.tablet.flush)
        self.log = framework.Tailer(open(os.path.join(environment.vtlogroot, 'vttablet.INFO')), flush=self.tablet.flush)
        querylog_file = '/tmp/vtocc_streamlog_%s.log' % self.tablet.port
        utils.run_bg(['curl', '-s', '-N', 'http://localhost:9461/debug/querylog?full=true'], stdout=open(querylog_file, 'w'))
        time.sleep(1)
        self.querylog = framework.Tailer(open(querylog_file), sleep=0.1)

        return
      except dbexceptions.OperationalError:
        if i == 29:
          raise
        time.sleep(1)
예제 #8
0
def main():
    global epsilon, time_start, total_steps, learning_rate

    # Initialize lists
    costs = []
    rewards = []
    values = []

    memory = []

    # Parse arguments
    parser = build_parser()
    options = parser.parse_args()
    load_parser(options)
    # epsilon and learning rate decrement
    epsilon_decr = (epsilon - epsilon_) / epsilon_steps
    lr_decr = (learning_rate - lr_final) / lr_steps

    tf.set_random_seed(1)
    seed = 0

    # Initialize TF variables and graph
    X, Y = create_placeholders(state_dim,
                               action_dim)  # (state_dim, action_dim)

    param_online, param_target = initialize_parameters()

    onlineNN, targetNN = forward_propagation(X, param_online, param_target)

    cost = compute_cost(onlineNN, Y)

    # Target network updater operation
    target_update_op = update_target_weights(param_online, param_target)

    # Backpropagation
    # optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
    # With clipping gradients: https://stackoverflow.com/a/43486487/5731130
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    gradients, variables = zip(*optimizer.compute_gradients(cost))
    gradients, _ = tf.clip_by_global_norm(gradients, 1.0)
    optimize = optimizer.apply_gradients(zip(gradients, variables))
    # visualize tf.global_norm(gradients) to see the usual range, then choose the clipping threshold (initial guess 2.0)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    ### MAIN PROGRAM
    config = tf.ConfigProto(device_count={'GPU':
                                          0})  # seems it is faster with CPU
    with tf.Session(config=config) as sess:

        # Run the initialization
        if load_model:
            print('Restoring model ...')
            ckpt = tf.train.get_checkpoint_state('dqn/')
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            sess.run(init)

        # Initialize environment and clock
        env.setup()
        time_start = time.clock()

        # Perform the training loop
        for e in range(num_episodes):

            # Setup new episode (initialize environment)
            env.setup()
            # Initial state
            s = env.get_state()

            t = 0
            rewEp = 0
            valueEp = 0
            costEp = 0

            if e % 50 == 0:
                print(e, ' episode starting')

            while not env.race_finished(
            ):  # initialization of environment automatically when this method returns true
                #print(t)
                ### SEQUENCE / SIMULATION STEP --- START ###
                # Create current sequence
                seq = [s]

                # Simulate and obtain info from the environment
                Q = onlineNN.eval(feed_dict={X: [s]})

                # Pick action, epsilon-greedy
                if np.random.random() > epsilon:  # greedy policy
                    a = np.argmax(Q)
                elif on_policy:  # hard-coded policy
                    a = env.get_next_action()
                else:  # random policy
                    a = np.random.randint(action_dim)

                # Get reward and new state after performing action 'a', updating simulation
                r, s = env.action(a)

                # Save rest of sequence
                seq.extend([a, r, s])

                if len(memory) < memo_max:
                    memory.append(seq)
                else:
                    memory.pop(0)
                    memory.append(seq)
                ### SEQUENCE / SIMULATION STEP --- END ###
                """ LEARN ON NEURAL NETWORK WITH DDQN """
                # When there's enough memory and every 4 iterations
                if len(
                        memory
                ) >= memo_min and t % 4 == 0:  # set min_memo proportional to batch_size

                    # Creation of minibatch used for learning
                    replays = np.array(random.sample(memory, batch_size))

                    state = np.vstack(np.array(
                        replays[:, 0]))  # shape batch_size, state_dim
                    action = np.vstack(np.array(replays[:, 1]))
                    reward = np.vstack(np.array(replays[:, 2]))
                    state_ = np.vstack(np.array(replays[:, 3]))
                    """ DOUBLE DQN ALGORITHM """
                    # calculate y_DoubleQ (y_Q)
                    a_max = np.argmax(onlineNN.eval(feed_dict={X: state_}),
                                      axis=1)

                    # y_Q = reward + gamma * Q(S',target_network)[a_max_Q(S',online_network)]
                    y_Q = reward.squeeze() + gamma * targetNN.eval(
                        feed_dict={X: state_})[np.arange(batch_size), a_max]

                    # This is the evaluation of current online neural network of the state (for all actions)
                    Q_eval = onlineNN.eval(feed_dict={X: state})
                    # Value to which the network is going to aim at its output for the action taken
                    Q_update = y_Q - Q_eval[np.arange(batch_size),
                                            action.squeeze()]
                    # The full vector of outputs with the the index of the action taken updated
                    Q_eval[np.arange(batch_size),
                           action.squeeze()] += alpha * Q_update

                    # Create the minibatch used for learning
                    batch_X = state
                    batch_Y = Q_eval
                    """ UPDATE ONLINE NEURAL NETWORK """
                    # Train on minibatch generated
                    _, batch_cost = sess.run([optimize, cost],
                                             feed_dict={
                                                 X: batch_X,
                                                 Y: batch_Y
                                             })

                    costEp += batch_cost / batch_size
                    """ UPDATE TARGET NEURAL NETWORK """
                    sess.run(target_update_op)

                # END OF TIMESTEP
                t += 1
                total_steps += 1
                rewEp += r
                valueEp += np.mean(Q)

                # Hyperparameters update
                if len(memory) >= memo_min and epsilon > epsilon_:
                    epsilon -= epsilon_decr
                if total_steps > lr_start and learning_rate > lr_final:
                    learning_rate -= lr_decr

                # Print progress
                if e % 50 == 0 and t % 10 == 0:
                    print('=', end='', flush=True)

            ### END OF EPISODE (next e in for loop after this ident finishes)
            if e % 50 == 0:
                print('> ' + str(t) + ' steps')
                print('%i timesteps in episode %i' % (t, e))

            costs.append(costEp / t * 4)
            rewards.append(rewEp / t)
            values.append(valueEp / t)

            np.random.seed(seed)
            seed += 1

            # Show metrics every few episodes
            if print_metrics and e % 100 == 0:
                #print ("Cost after episode %i: %f" % (e, costs[e]))
                print("Reward after episode %i: %f" % (e, rewards[e]))
                print("Value after episode %i: %f" % (e, values[e]))
                #print ("-- memory length ", len(memory))
                # And plots
                plotter(costs, rewards, values)

            if animations and e % 1000 == 0:
                acts = np.array(memory)[len(memory) - t:len(memory) - 1, 1]
                env.save_animation(acts.tolist(), e)

            # Save model
            if save_model and e > 0 and e % 2000 == 0:
                print('Saving model ...')
                saver.save(sess, path_model + 'model' + str(e) + '.ckpt')

        # END OF PROGRAM
        # plot metrics, save model
        if save_model:
            print('Saving model ...')
            saver.save(sess, path_model + 'model' + str(e) + '.ckpt')
        if animations:
            acts = np.array(memory)[len(memory) - t:len(memory) - 1, 1]
            env.save_animation(acts.tolist())

        plotter(costs, rewards, values, final=True)
예제 #9
0
  def setUp(self):
    if self.vttop is None:
      raise EnvironmentError("VTTOP not defined")
    if self.vtroot is None:
      raise EnvironmentError("VTROOT not defined")
    try:
      os.makedirs(environment.tmproot)
    except OSError:
      pass

    environment.setup()

    framework.execute('go install', verbose=utils.options.verbose, cwd=self.vttop+'/go/cmd/vtocc')
    framework.execute('go install', verbose=utils.options.verbose, cwd=self.vttop+'/go/cmd/mysqlctl')

    # start mysql
    res = subprocess.call([
        self.vtroot+"/bin/mysqlctl",
        "-tablet-uid",  self.tabletuid,
        "-port", str(self.vtoccport),
        "-mysql-port", str(self.mysqlport),
        "init"
        ])
    if res != 0:
      raise EnvironmentError("Cannot start mysql")
    res = subprocess.call([
        "mysql",
        "-S",  self.mysqldir+"/mysql.sock",
        "-u", "vt_dba",
        "-e", "create database vt_test_keyspace ; set global read_only = off"])
    if res != 0:
      raise Exception("Cannot create vt_test_keyspace database")

    self.mysql_conn = self.mysql_connect()
    mcu = self.mysql_conn.cursor()
    self.clean_sqls = []
    self.init_sqls = []
    clean_mode = False
    with open(os.path.join(self.vttop, "test", "test_data", "test_schema.sql")) as f:
      for line in f:
        line = line.rstrip()
        if line == "# clean":
          clean_mode = True
        if line=='' or line.startswith("#"):
          continue
        if clean_mode:
          self.clean_sqls.append(line)
        else:
          self.init_sqls.append(line)
    try:
      for line in self.init_sqls:
        mcu.execute(line, {})
    finally:
      mcu.close()

    customrules = '/tmp/customrules.json'
    self.create_customrules(customrules)
    schema_override = '/tmp/schema_override.json'
    self.create_schema_override(schema_override)

    occ_args = [
      self.vtroot+"/bin/vtocc",
      "-port", "9461",
      "-customrules", customrules,
      "-schema-override", schema_override,
      "-db-config-app-charset", "utf8",
      "-db-config-app-dbname", "vt_test_keyspace",
      "-db-config-app-host", "localhost",
      "-db-config-app-unixsocket", self.mysqldir+"/mysql.sock",
      "-db-config-app-uname", 'vt_dba',   # use vt_dba as some tests depend on 'drop'
      "-db-config-app-keyspace", "test_keyspace",
      "-db-config-app-shard", "0"
    ]
    if utils.options.memcache:
      memcache = self.mysqldir+"/memcache.sock"
      occ_args.extend(["-rowcache-bin", "memcached"])
      occ_args.extend(["-rowcache-socket", memcache])
      occ_args.extend(["-enable-rowcache"])

    self.vtstderr = open("/tmp/vtocc_stderr.log", "a+")
    self.vtstdout = open("/tmp/vtocc_stdout.log", "a+")
    self.vtocc = subprocess.Popen(occ_args, stdout=self.vtstdout, stderr=self.vtstderr)
    for i in range(30):
      try:
        self.conn = self.connect()
        self.txlogger = subprocess.Popen(['curl', '-s', '-N', 'http://localhost:9461/debug/txlog'], stdout=open('/tmp/vtocc_txlog.log', 'w'))
        self.txlog = framework.Tailer(open('/tmp/vtocc_txlog.log', 'r'))

        def flush():
          utils.run(['curl', '-s', '-N', 'http://localhost:9461/debug/flushlogs'], trap_output=True)

        self.log = framework.Tailer(open('/tmp/vtocc.INFO'), flush=flush)
        utils.run_bg(['curl', '-s', '-N', 'http://localhost:9461/debug/querylog?full=true'], stdout=open('/tmp/vtocc_streamlog_9461.log', 'w'))
        self.querylog = framework.Tailer(open('/tmp/vtocc_streamlog_9461.log'), sleep=0.1)
        return
      except dbexceptions.OperationalError:
        if i == 29:
          raise
        time.sleep(1)
예제 #10
0
  def setUp(self):
    utils.zk_setup()
    environment.setup()
    if self.vttop is None:
      raise EnvironmentError("VTTOP not defined")
    if self.vtroot is None:
      raise EnvironmentError("VTROOT not defined")

    framework.execute('go install', verbose=utils.options.verbose, cwd=self.vttop+'/go/cmd/mysqlctl')

    utils.wait_procs([self.tablet.init_mysql()])
    self.tablet.mquery("", ["create database vt_test_keyspace", "set global read_only = off"])

    self.mysql_conn, mcu = self.tablet.connect('vt_test_keyspace')
    self.clean_sqls = []
    self.init_sqls = []
    clean_mode = False
    with open(os.path.join(self.vttop, "test", "test_data", "test_schema.sql")) as f:
      for line in f:
        line = line.rstrip()
        if line == "# clean":
          clean_mode = True
        if line=='' or line.startswith("#"):
          continue
        if clean_mode:
          self.clean_sqls.append(line)
        else:
          self.init_sqls.append(line)
    try:
      for line in self.init_sqls:
        mcu.execute(line, {})
    finally:
      mcu.close()

    utils.run_vtctl('CreateKeyspace -force /zk/global/vt/keyspaces/test_keyspace')
    self.tablet.init_tablet('master', 'test_keyspace', '0')

    customrules = '/tmp/customrules.json'
    self.create_customrules(customrules)
    schema_override = '/tmp/schema_override.json'
    self.create_schema_override(schema_override)
    if utils.options.memcache:
      self.tablet.start_vttablet(memcache=True, customrules=customrules, schema_override=schema_override)
    else:
      self.tablet.start_vttablet(customrules=customrules, schema_override=schema_override)

    # FIXME(szopa): This is necessary here only because of a bug that
    # makes the qs reload its config only after an action.
    utils.run_vtctl('Ping ' + self.tablet.zk_tablet_path)

    for i in range(30):
      try:
        self.conn = self.connect()
        self.txlogger = subprocess.Popen(['curl', '-s', '-N', 'http://localhost:9461/debug/txlog'], stdout=open('/tmp/vtocc_txlog.log', 'w'))
        self.txlog = framework.Tailer(open('/tmp/vtocc_txlog.log'), flush=self.tablet.flush)
        self.log = framework.Tailer(open(os.path.join(environment.vtlogroot, 'vttablet.INFO')), flush=self.tablet.flush)
        querylog_file = '/tmp/vtocc_streamlog_%s.log' % self.tablet.port
        utils.run_bg(['curl', '-s', '-N', 'http://localhost:9461/debug/querylog?full=true'], stdout=open(querylog_file, 'w'))
        time.sleep(1)
        self.querylog = framework.Tailer(open(querylog_file), sleep=0.1)

        return
      except dbexceptions.OperationalError:
        if i == 29:
          raise
        time.sleep(1)
예제 #11
0
파일: main.py 프로젝트: razeayres/sleepy
import environment, variables, processing
from os import path
from tools import counter

workfolder = r'D:\My_files\Meus_artigos\Em_andamento\Artigo_de_solos_2019\Projetos\EMBRAPA'
DEM = path.join(workfolder, 'dem.tif')
soil_obs = path.join(workfolder, 'OBS_SOIL.shp')
basins = path.join(workfolder, 'watersheds.tif')

e = environment.setup(workfolder, DEM, soil_obs, basins)
i = variables.initialize(e)
c = counter.count(DEM)
d = 0
while True:
    q = raw_input("Which one do you prefer to use? (0=relative; 1=absolute; 2=cancel): ")
    if q == "0":
        px = int((float(raw_input("Enter the percentage of data to use (%): "))/100) * c.data)
    elif q == "1":
        px = int(raw_input("Enter the number of data pixels to use: "))
    elif q == "2":
        px = 0
        break
    if q in ["0", "1"]:
        print "Using %s pixels to process..." % (px)
        d = raw_input("Is that correct? (0=NO; 1=YES; 2=cancel): ")
        if d == "1":
            break
        elif d == "2":
            px = 0
            break
if px > 0: