예제 #1
0
    def Export(self,
               filename,
               color=True,
               page=0,
               dpi=100,
               antialias=True,
               quality=85,
               backcolor='#ffffff00',
               pdfdpi=150,
               svgtextastext=False):
        """Export plot to filename.

        color is True or False if color is requested in output file
        page is the pagenumber to export
        dpi is the number of dots per inch for bitmap output files
        antialias antialiases output if True
        quality is a quality parameter for jpeg output
        backcolor is the background color for bitmap files, which is a name or
         a #RRGGBBAA value (red, green, blue, alpha)
        pdfdpi is the dpi to use when exporting eps or pdf files
        svgtextastext: write text in SVG as text, rather than curves
        """

        e = export.Export(self.document,
                          filename,
                          page,
                          color=color,
                          bitmapdpi=dpi,
                          antialias=antialias,
                          quality=quality,
                          backcolor=backcolor,
                          pdfdpi=pdfdpi,
                          svgtextastext=svgtextastext)
        e.export()
예제 #2
0
def admin_export_state_handler(update: Update,
                               context: CallbackContext) -> int:
    message = helpers.get_message(update)
    user = message.from_user
    logger.info(f'@{user.username} exported survey results')

    export_dir_path = export.Export().export_to_html(
        user.id, should_remove_original_files=False)
    export_dir_path = os.path.normpath(export_dir_path)
    archive_file_name = os.path.split(export_dir_path)[-1]
    zip_file = shutil.make_archive(
        archive_file_name,
        'zip',
        root_dir=export_dir_path,
        base_dir='.',
    )

    reply_keyboard = [['Вернуться в главное меню админки']]
    keyboard_markup = ReplyKeyboardMarkup(reply_keyboard,
                                          one_time_keyboard=True)
    with open(zip_file, 'rb') as f:
        message.reply_document(f,
                               reply_markup=keyboard_markup,
                               reply_to_message_id=message.message_id)

    os.remove(zip_file)
    return states.ADMIN_EXPORT_RESULT_STATE
    def test_save_file(self, mocker):
        exportInstance = export.Export(self.prog)
        exportStub = stubExport()
        path = ('/home/cameron/test.csv', 'null')

        # mocker.spy(exportInstance, 'to_csv')
        mocker.spy(exportInstance, 'to_json')
        mocker.spy(exportInstance, 'to_xml')
        mocker.spy(exportInstance, 'to_csv')

        exportInstance.save_file(path, exportStub.data, 'CSV')
        exportInstance.save_file(path, exportStub.data, 'JSON')
        exportInstance.save_file(path, exportStub.data, 'XML')

        assert (exportInstance.to_csv.call_count == 1)
        assert (exportInstance.to_json.call_count == 1)
        assert (exportInstance.to_xml.call_count == 1)
예제 #4
0
import export
import IPython

lr = 0.03
gamma = 0.0

sample_density = 20
group_num_particles = sample_density**2
goal_pos = np.array([1.4, 0.4])
goal_range = np.array([0.0, 0.00])
batch_size = 1
actuation_strength = 4

config = 'B'

exp = export.Export('walker_data')

# Robot B
num_groups = 7
group_offsets = [(0, 0), (0.5, 0), (0, 1), (1, 1), (2, 1), (2, 0), (2.5, 0)]
group_sizes = [(0.5, 1), (0.5, 1), (1, 1), (1, 1), (1, 1), (0.5, 1), (0.5, 1)]
actuations = [0, 1, 5, 6]
fixed_groups = []
head = 3
gravity = (0, -2)

num_particles = group_num_particles * num_groups


def particle_mask(start, end):
    r = tf.range(0, num_particles)
예제 #5
0
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument(
        '-c',
        '--config',
        help=
        'Path to yaml config file with datastore connect details. See pgobserver_frontend.example.yaml for a sample file. \
        Certain values can be overridden by ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
    )
    parser.add_argument(
        '--s3-config-path',
        help=
        'Path style S3 URL to a key that holds the config file. Or PGOBS_CONFIG_S3_BUCKET env. var',
        metavar='https://s3-region.amazonaws.com/x/y/file.yaml',
        default=os.getenv('PGOBS_CONFIG_S3_BUCKET'))
    parser.add_argument(
        '-p',
        '--port',
        help='Web server port. Overrides value from config file',
        type=int)

    args = parser.parse_args()

    settings = collections.defaultdict(dict)

    if args.s3_config_path:  # S3 has precedence if specified
        import aws_s3_configreader
        settings = aws_s3_configreader.get_config_as_dict_from_s3_file(
            args.s3_config_path)
    elif args.config:
        args.config = os.path.expanduser(args.config)

        if not os.path.exists(args.config):
            print 'WARNING. Config file {} not found! exiting...'.format(
                args.config)
            return
        print "trying to read config file from {}".format(args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings['database']['host'] = (os.getenv('PGOBS_HOST')
                                    or settings['database'].get('host'))
    settings['database']['port'] = (os.getenv('PGOBS_PORT')
                                    or settings['database'].get('port')
                                    or 5432)
    settings['database']['name'] = (os.getenv('PGOBS_DATABASE')
                                    or settings['database'].get('name'))
    settings['database']['frontend_user'] = (
        os.getenv('PGOBS_USER') or settings['database'].get('frontend_user'))
    settings['database']['password'] = (
        os.getenv('PGOBS_PASSWORD')
        or settings['database'].get('frontend_password'))

    if not (settings['database'].get('host')
            and settings['database'].get('name')
            and settings['database'].get('frontend_user')):
        print 'Mandatory datastore connect details missing!'
        print 'Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
        print ''
        parser.print_help()
        return

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))
    print 'Setting connection string to ... ' + conn_string
    # finished print conn_string to the world, password can be added
    conn_string = conn_string + ' password='******'database'][
        'frontend_password']

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {
        'global': {
            'server.socket_host':
            '0.0.0.0',
            'server.socket_port':
            args.port or settings.get('frontend', {}).get('port') or 8080
        },
        '/': {
            'tools.staticdir.root': current_dir,
            'request.dispatch': HostIdAndShortnameDispatcher()
        },
        '/healthcheck': {
            'tools.sessions.on': False
        },
        '/static': {
            'tools.staticdir.dir': 'static',
            'tools.staticdir.on': True,
            'tools.sessions.on': False
        },
        '/manifest.info': {
            'tools.staticfile.on':
            True,
            'tools.staticfile.filename':
            os.path.join(current_dir, '..', 'MANIFEST.MF'),
            'tools.auth_basic.on':
            False,
            'tools.sessions.on':
            False
        },
    }

    tplE.setup(
        settings)  # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    root.host = monitorfrontend.MonitorFrontend()
    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(
        root
    )  # JSON api exposure, enabling integration with other monitoring tools
    root.healthcheck = Healthcheck()

    if settings.get('oauth', {}).get('enable_oauth', False):
        print 'switching on oauth ...'
        import oauth
        root.oauth = oauth.Oauth(settings['oauth'])
        cherrypy.config.update({
            'tools.oauthtool.on':
            True,
            'tools.sessions.on':
            True,
            'tools.sessions.timeout':
            settings['oauth'].get('session_timeout', 43200)
        })

    cherrypy.quickstart(root, config=conf)
예제 #6
0
lr = 1
gamma = 0.0

sample_density = 30
group_num_particles = sample_density**3
goal_pos = np.array([1.4, 0.4, 0.5])
goal_range = np.array([0.0, 0.0, 0.0])
batch_size = 1

actuation_strength = 2


config = 'C'

exp = export.Export('walker3d')

# Robot B
if config == 'B':
  num_groups = 7
  group_offsets = [(0, 0, 0), (0.5, 0, 0), (0, 1, 0), (1, 1, 0), (2, 1, 0), (2, 0, 0), (2.5, 0, 0)]
  group_sizes = [(0.5, 1, 1), (0.5, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1), (0.5, 1, 1), (0.5, 1, 1)]
  actuations = [0, 1, 5, 6]
  fixed_groups = []
  head = 3
  gravity = (0, -2, 0)



#TODO: N-Ped
#Robot C
예제 #7
0
import export 
import IPython

lr = 0.03
gamma = 0.0

sample_density = 20
group_num_particles = sample_density**2
goal_pos = np.array([1.4, 0.4])
goal_range = np.array([0.0, 0.00])
batch_size = 1
actuation_strength = 3

config = 'B'

exp = export.Export('walker_2d')

# Robot B
num_groups = 7
group_offsets = [(0, 0), (0.5, 0), (0, 1), (1, 1), (2, 1), (2, 0), (2.5, 0)]
group_sizes = [(0.5, 1), (0.5, 1), (1, 1), (1, 1), (1, 1), (0.5, 1), (0.5, 1)]
actuations = [0, 1, 5, 6]
fixed_groups = []
head = 3
gravity = (0, 0)

num_particles = group_num_particles * num_groups


def particle_mask(start, end):
  r = tf.range(0, num_particles)
예제 #8
0
 def save(self):
     export_result = export.Export(self, self.dirty, self.lastDirectory,
                                   None)
     export_result.to_json()
예제 #9
0
 def export_to_word(self):
     export_result = export.Export(self, self.dirty, self.lastDirectory,
                                   self.final_building)
     export_result.to_word()
예제 #10
0
import IPython

lr = 0.03
gamma = 0.0

sample_density = 40
group_num_particles = sample_density**2
goal_pos = np.array([0.5, 0.6])
goal_range = np.array([0.1, 0.1])
batch_size = 1
actuation_strength = 4
multi_target = True

config = 'B'

exp = export.Export('finger_data')

# Robot B
num_groups = 3
group_offsets = [(1, 0), (1.5, 0), (1, 2)]
group_sizes = [(0.5, 2), (0.5, 2), (1, 1)]
actuations = [0, 1]
head = 2
gravity = (0, 0)

num_particles = group_num_particles * num_groups


def particle_mask(start, end):
    r = tf.range(0, num_particles)
    return tf.cast(tf.logical_and(start <= r, r < end), tf.float32)[None, :]
예제 #11
0
from vector_math import *
import export
import IPython

lr = 0.03
gamma = 0.0

sample_density = 20
group_num_particles = sample_density**2
goal_pos = np.array([1.4, 0.4])
goal_range = np.array([0.0, 0.00])
batch_size = 1

config = 'B'

exp = export.Export('walker_video')

# Robot B
num_groups = 1
group_offsets = [(1, 1)]
group_sizes = [(0.5, 1)]
actuations = [0]
fixed_groups = []
head = 0
gravity = (0, 0)

num_particles = group_num_particles * num_groups


def particle_mask(start, end):
    r = tf.range(0, num_particles)
예제 #12
0
evaluate = False
lr = 10
gamma = 0.0

sample_density = 15
group_num_particles = sample_density**3
goal_pos = np.array([1.4, 0.4, 0.5])
goal_range = np.array([0.0, 0.0, 0.0])
batch_size = 1

actuation_strength = 4


config = 'C'

exp = export.Export('crawler3d')

# Robot B
if config == 'B':
  num_groups = 7
  group_offsets = [(0, 0, 0), (0.5, 0, 0), (0, 1, 0), (1, 1, 0), (2, 1, 0), (2, 0, 0), (2.5, 0, 0)]
  group_sizes = [(0.5, 1, 1), (0.5, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1), (0.5, 1, 1), (0.5, 1, 1)]
  actuations = [0, 1, 5, 6]
  fixed_groups = []
  head = 3
  gravity = (0, -2, 0)



#TODO: N-Ped
#Robot C
예제 #13
0
import tensorflow.contrib.layers as ly
from vector_math import *
import IPython
import copy

import pygmo as pg
#import pygmo_plugins_nonfree as ppnf
import export


def flatten_vectors(vectors):
    return tf.concat([tf.squeeze(ly.flatten(vector)) for vector in vectors], 0)


lr = 1.0
exp = export.Export('arm3d')

goal_range = 0.0
batch_size = 1
actuation_strength = 8

use_pygmo = True

num_steps = 600

# Finger
num_links = 2
num_acts = int(num_steps // num_links //
               10)  #TBH this is just to keep the number of variables tame
sample_density = int(10 // (np.sqrt(num_links)))
group_num_particles = sample_density**3
예제 #14
0
import sys

sys.path.append('..')

import random
import time
from simulation import Simulation, get_bounding_box_bc
from time_integration import UpdatedSimulationState
import tensorflow as tf
import numpy as np
from IPython import embed
import export

exp = export.Export('rolling_acc')


def main(sess):
    batch_size = 1
    gravity = (0, -1)
    # gravity = (0, 0)
    N = 5
    dR = 0.2
    R = (N - 1) * dR
    dC = 1.6
    num_particles = int(((N - 1) * dC + 1)**2)
    steps = 1000
    dt = 5e-3
    goal_range = 0.15
    res = (45, 30)
    bc = get_bounding_box_bc(res)
예제 #15
0
def main(sess):
  batch_size = 1
  gravity = (0, -1.)
  # gravity = (0, 0)
  N = 10
  dR = 0.1
  R = (N - 1) * dR
  dC = 1.6
  num_particles = int(((N - 1) * dC + 1) ** 2)
  steps = 1000
  dt = 1e-2
  goal_range = 0.15
  res = (90, 60)
  dx = 1. / res[0]
  max_speed = 0.3
  exp = export.Export('rolling_acc_{}'.format(max_speed))

  lef = 2.36
  rig = 9.
  
  temp_f = lambda x: 2 * x - 1 if x > 0.5 else -2 * x + 1
  temp_f_ = lambda x: 2 if x > 0.5 else -2
  boundary = lambda x: ((temp_f(x / res[0]) - 0.5) * difficulty + 0.5) * res[1]
  boundary_ = lambda x: temp_f_(x / res[0]) / res[0] * res[1] * difficulty

  boundary = lambda x: (np.sin((rig - lef) * x / res[0] + lef) * difficulty + 1) * res[1] / 2
  boundary_ = lambda x: (rig - lef) / 2 * difficulty * res[1] * np.cos((rig - lef) * x / res[0] + lef) / res[0]
  tf_boundary = lambda x: (tf.sin((rig - lef) * x + lef) * difficulty + 1) * res[1] / 2
  tf_boundary_ = lambda x: (rig - lef) / 2 * difficulty * res[1] * tf.cos((rig - lef) * x + lef) / res[0]
  
  bc = get_new_bc(res, boundary = boundary, boundary_ = boundary_)

  lr = 1
  
  goal = tf.placeholder(dtype=tf.float32, shape=[batch_size, 2], name='goal')

  def state_tensor(state):
    cm = state.center_of_mass()
    mv = tf.reduce_mean(state.velocity, axis = 2)
    time1 = tf.sin(tf.cast(state.step_count, dtype = tf.float32) / 100)
    time2 = tf.sin(tf.cast(state.step_count, dtype = tf.float32) / 31)
    time3 = tf.sin(tf.cast(state.step_count, dtype = tf.float32) / 57)
    time4 = tf.sin(tf.cast(state.step_count, dtype = tf.float32) / 7)
    lis = [mv, cm, time1, time2, time3, time4]
    tensor = tf.concat([tf.reshape(t, shape = [batch_size, -1])
                        for t in lis],
                       axis = 1)
    return tensor
  
  def norm_tensor(state):
    return (state_tensor(state) - model.get_bn_mean()) ** 2

  def F_controller(state):
    # F = state.position - state.center_of_mass()[:, :, None]
    # F = tf.stack([F[:, 1], -F[:, 0]], axis = 1)
    # F = tf.constant([1, 2 / res[0] * res[1]])[None, :, None]

    # accelerate
    cm = state.center_of_mass()
    k = tf_boundary_(cm[:, 0])
    L = (k ** 2 + 1) ** 0.5
    dx = tf.reciprocal(L)
    dy = k / L
    F = tf.stack([dx, dy], axis = 1) * max_speed

    # inputs
    inputs = state_tensor(state)
    
    # network
    direction = model(inputs)
    '''
    direction = thb[state.step_count]
    '''

    '''
    # best solution
    vx = tf.reduce_mean(state.velocity, axis = 2)[:, 0]
    direction = tf.cast(vx > 0, dtype = tf.float32) * 2 - 1
    '''
    

    return (F * direction)[:, :, None]

  sim = Simulation(
      dt=dt,
      num_particles=num_particles,
      grid_res=res,
      bc=bc,
      gravity=gravity,
      m_p=0.5,
      V_p=0.5,
      E = 1,
      nu = 0.3,
      dx = dx,
      sess=sess,
      use_visualize = True,
      F_controller = F_controller,
      part_size = 10)
  position = np.zeros(shape=(batch_size, num_particles, 2))

  # velocity_ph = tf.constant([0.2, 0.3])
  velocity_ph = tf.constant([0, 0], dtype = tf.float32)
  velocity = velocity_ph[None, :, None] + tf.zeros(
      shape=[batch_size, 2, num_particles], dtype=tf.float32)
  for b in range(batch_size):
    dx, dy = 15.81, 7.2
    cnt = 0
    las = 0
    for i in range(N):
      l = int((dC * i + 1) ** 2)
      l, las = l - las, l
      print(l)
      dth = 2 * np.pi / l
      dr = R / (N - 1) * i
      theta = np.pi * 2 * np.random.random()
      for j in range(l):
        theta += dth
        x, y = np.cos(theta) * dr, np.sin(theta) * dr
        position[b, cnt] = ((dx + x) / 45, (dy + y) / 45)
        cnt += 1

  position = np.array(position).swapaxes(1, 2)


  initial_state = sim.get_initial_state(
      position=position, velocity=velocity)
  state_sum = tf.reduce_mean(sim.stepwise_sym(state_tensor), axis = 0)
  state_norm = tf.reduce_mean(sim.stepwise_sym(norm_tensor), axis = 0)

  final_position = sim.initial_state.center_of_mass()
  final_velocity = tf.reduce_mean(sim.initial_state.velocity, axis = 2)
  final_F = tf.reduce_mean(sim.updated_state.F, axis = 2)
  loss = tf.reduce_sum((final_position - goal) ** 2)
  sim.add_point_visualization(pos = final_position, color = (1, 0, 0), radius = 3)
  sim.add_point_visualization(pos = goal, color = (0, 1, 0), radius = 3)
  sim.add_vector_visualization(pos=final_position, vector=final_velocity, color=(0, 0, 1), scale=50)
  sim.add_vector_visualization(pos=final_position, vector=final_F, color=(1, 0, 1), scale=500)

  trainables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
  sim.set_initial_state(initial_state = initial_state)

  sym = sim.gradients_sym(loss, variables = trainables)

  goal_input = np.array([[37.39 / 45, 25. / 45]], dtype=np.float32)
  grad_ph = [
      tf.placeholder(shape = v.shape, dtype = tf.float32) for v in trainables
  ]
  velocity = [
      tf.Variable(np.zeros(shape = v.shape, dtype = np.float32),
      trainable=False) for v in trainables
  ]
  
  gradient_descent = [
      v.assign(v - lr * g) for v, g in zip(trainables, grad_ph)
  ]
  
  
  momentum_beta = 0.9
  momentum = [
      v.assign(v * momentum_beta + g * (1 - momentum_beta))
      for v, g in zip(velocity, grad_ph)
  ] + [
      v.assign(v - lr * g) for v, g in zip(trainables, velocity)
  ] 
  
  sess.run(tf.global_variables_initializer())

  flog = open('rolling_{}.log'.format(max_speed), 'w')
  for i in range(100000):
    t = time.time()
    print('Epoch {:5d}, learning rate {}'.format(i, lr))
  
    tt = time.time()
    memo = sim.run(
        initial_state = initial_state, 
        num_steps = steps,
        iteration_feed_dict = {goal: goal_input},
        loss = loss,
        stepwise_loss = [state_sum, state_norm])
    print('forward', time.time() - tt)


    if False:# i % 10 == 0:
      tt = time.time()
      sim.visualize(memo, show = False, interval = 1, export = exp)
      print('visualize', time.time() - tt)
      if memo.loss < 1e-3:
        break

    tt = time.time()
    grad = sim.eval_gradients(sym=sym, memo=memo)
    print('eval_gradients', time.time() - tt)

    tt = time.time()
    grad_feed_dict = {}
    tot = 0.
    for gp, g in zip(grad_ph, grad):
      grad_feed_dict[gp] = g * (np.random.random(g.shape) < 0.8).astype('float32')
      tot += (g ** 2).sum()
      # if i % 10 == 0:
      #   grad_feed_dict[gp] += (np.random.random(g.shape) - 0.5) * 0.01
    print('gradient norm', tot ** 0.5)
    sess.run(momentum, feed_dict = grad_feed_dict)
    print('gradient_descent', time.time() - tt)
    # log1 = tf.Print(W1, [W1], 'W1:')
    # log2 = tf.Print(b1, [b1], 'b1:')
    # sess.run(log1)
    # sess.run(log2)

    mean, norm = memo.stepwise_loss
    mean /= steps
    norm /= steps
    model.update_bn(mean, norm, sess)

    # if i % 10 == 0:
    if tot ** 0.5 > 100:# i % 10 == 0:
      embed()
      tt = time.time()
      sim.visualize(memo, show = True, interval = 1, export = exp)
      sim.visualize(memo, show = False, interval = 1, export = exp)
      print('visualize', time.time() - tt)
      if memo.loss < 1e-3:
        break

    print('Iter {:5d} time {:.3f} loss {}'.format(
        i, time.time() - t, memo.loss))
    print(i, memo.loss, file = flog)
    flog.flush()