Example #1
0
def deploy():
  # Libraries usually have a versioned path (True)
  # help.deployer(True)
  # Sites or apps dont
  help.deployer(Yak.paths['build'], False)
  # In case you wanna deploy dependencies as well
  help.deployer('dependencies', False, 'dependencies')
    def _populate_grid_at_position(self,
                                   grid_position,
                                   mapper,
                                   layout,
                                   coordinate_pair):
        # creates a button and positions it on the grid

        x, y = coordinate_pair[0], coordinate_pair[1]
        textual_coordinates = "{0}-{1}".format(x, y)
        button = QCustomPushButton(
            self,
            grid_position,
            Coordinates.parse_coordinates(textual_coordinates,
                                          self._grid_size)
        )

        Helpers.paint_grid_button(button, style.FIELD_BLUE)
        button.setObjectName("GridButton")
        button.setFixedSize(style.FIELD_ICON_SIZE + 10,
                            style.FIELD_ICON_SIZE + 10)
        button.setIconSize(QSize(style.FIELD_ICON_SIZE, style.FIELD_ICON_SIZE))

        # set the QSignalMapper's mapping to work with strings
        mapper.setMapping(button, textual_coordinates)
        # connecting the button's clicked signal to the QSignalMappers
        # mapped slot
        button.clicked.connect(mapper.map)
        # finally, add the button to the QGridLayout
        layout.addWidget(button, x, y)
Example #3
0
 def connect(cls, host='0.0.0.0', port=12100):
     try:
         cls.sock.connect((host, port))
         return True
     except Exception as e:
         Helpers.print_error(str(e))
     return False
Example #4
0
def build():
  # ============================
  # Very basic build
  # ============================

  sed = Sed()
  help.replacer(sed)
  deepcopy(FileList("src", exclude = "*tests*"), Yak.paths['build'], replace = sed)
Example #5
0
 def recv(cls):
     recv_header = cls.sock.recv(1024)
     headers = json.loads(recv_header.decode('utf-8'))
     content_length = headers['content-length']
     recv_data = cls.sock.recv(content_length)
     data = recv_data.decode('utf-8')
     data_dict = json.loads(data)
     Helpers.check_data(data_dict)
     return data_dict
Example #6
0
def main(args):
    commander = Commander()
    helpers = Helpers()
    extractor = Extractor()

    commandandarguments = superCreatePushCommand(args, commander, helpers, extractor)

    dumpfilepath = helpers.getDumpFilePath(args)
    executeCommandAndArguments(commandandarguments, dumpfilepath);
Example #7
0
 def __init__(self, filename, spec=None):
     Helpers.__init__(self)
     self.filename = filename
     self.spec = spec
     self.is_build = False
     self.build_failed = False
     self._rpm_files = None
     self.rpmlint_output = []
     self.unpack()
Example #8
0
def tests():
  # ============================
  # Build tests
  # ============================
  sed = Sed()
  help.replacer(sed)

  list = FileList(Yak.paths['tests'], filter="*.js,*.html,*.css")
  deepcopy(list, Yak.paths['build'] + '/tests', replace=sed)
 def assertCanResizeCluster(self, resource_group, target):
     self.client.clusters.update(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name,
                                 scale_settings=models.ScaleSettings(
                                     manual=models.ManualScaleSettings(target_node_count=target)))
     self.assertEqual(
         Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, target,
                                Helpers.NODE_STARTUP_TIMEOUT_SEC),
         target)
     Helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
                                                         self.cluster_name, target)
    def test_setup_task_execution(self, resource_group, location, storage_account, storage_account_key):
        """Tests setup task execution.
        """
        cluster = Helpers.create_cluster(
            self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 1,
            storage_account.name, storage_account_key,
            setup_task_cmd='echo $GREETING $SECRET_GREETING',
            setup_task_env={'GREETING': 'setup task'},
            setup_task_secrets={'SECRET_GREETING': 'has a secret'})  # type: models.Cluster

        # Verify that the cluster is reported in the list of clusters
        Helpers.assert_existing_clusters_are(self, self.client, resource_group.name, [self.cluster_name])

        # Verify that one node is allocated and become available
        self.assertEqual(
            Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
                                   Helpers.NODE_STARTUP_TIMEOUT_SEC), 1)

        # Check that server doesn't return values for secrets
        self.assertEqual(len(cluster.node_setup.setup_task.secrets), 1)
        self.assertEqual(cluster.node_setup.setup_task.secrets[0].name, 'SECRET_GREETING')
        self.assertIsNone(cluster.node_setup.setup_task.secrets[0].value)
        # Verify that the setup task is completed by checking generated output. BatchAI reports a path which was auto-
        # generated for storing setup output logs.
        setup_task_output_path = cluster.node_setup.setup_task.std_out_err_path_suffix
        nodes = Helpers.get_node_ids(self.client, resource_group.name, self.cluster_name)
        self.assertEqual(len(nodes), 1)
        node_id = nodes[0]
        Helpers.assert_file_in_file_share(self, storage_account.name, storage_account_key,
                                          setup_task_output_path,
                                          'stdout-{0}.txt'.format(node_id),
                                          u'setup task has a secret\n')
        Helpers.assert_file_in_file_share(self, storage_account.name, storage_account_key,
                                          setup_task_output_path, 'stderr-{0}.txt'.format(node_id), u'')
        self.client.clusters.delete(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()
Example #11
0
def build():
  # ============================
  # Very basic build
  # ============================

  sed = Sed()
  help.replacer(sed)
  # deepcopy(FileList("src", exclude = "*tests*"), Yak.paths['build'], replace = sed)

  libs = FileList('src', filter="*namespace.js", exclude="*shims*")
  libs.merge(FileList('src', filter="*.js", exclude="*namespace.js,*shims*"))
  combine(libs, Yak.paths['build'] + '/phoneapp.js')
Example #12
0
 def getWeather(self):
     if(Helpers.isAligned(self.p1,self.p2,self.p3)):
         if(Helpers.isAligned(self.p1,self.p2,self.sun)):
            # print 'sequia ' + str(self.p1) + str(self.p2) + str(self.p3)+ ' - ' + str(self.p1.day)
             return -2
         print 'buen clima ' + str(self.p1) + str(self.p2) + str(self.p3)
         return -1
     if(Helpers.inTriangle(self.sun,self.p1,self.p2,self.p3)):
         #print 'lluvia ' + str(self.p1) + str(self.p2) + str(self.p3)
         return Helpers.perimeter(self.p1,self.p2,self.p3)
    # print 'invalido ' + str(self.p1) + str(self.p2) + str(self.p3) + ' - ' + str(self.p1.day)
     return -3
Example #13
0
 def __init__(self, base, plugin_path):
     Helpers.__init__(self)
     self.plugin_path = plugin_path
     self.version = None
     self.spec = base.spec
     self.srpm = base.srpm
     self.sources = base.sources
     self.encoder = ReviewJSONEncoder()
     self.decoder = JSONDecoder()
     self.results = []
     self.plug_in = None
     self.plug_out = None
     self.plug_err = None
    def _declare_winner(self, winner):
        self.session.session_phase = SessionPhase.game_over
        self._is_new_game = False
        self._update_status_bar()

        player_type_textual = EnumConverters.\
            player_type_to_string_converter(winner.player_type)

        Helpers.raise_info(
            self,
            self.tr("Game over - {0} wins!".format(
                    player_type_textual))
        )
Example #15
0
def build():

  sed = Sed()
  help.replacer(sed)

  sh('compass compile src')

  app = FileList('src/app', filter="*root.js")
  app.merge(FileList('src/app', filter="*.js", exclude="*root.js"))
  combine(app, FileSystem.join(Yak.paths['build'], 'js/app.js'))

  knownHelpers = [
    'action',
    'bind',
    'bindAttr',
    'collection',
    'each',
    'if',
    'log',
    'outlet',
    'unless',
    'view',
    'with'
  ]

  helperCmd = ''
  for helper in knownHelpers:
    helperCmd += '-k %s ' % helper

  stf = Std()
  sh('handlebars src/app/templates -r src/app/templates -f .build/js/templates.js %s' % helperCmd, header="build templates", std = stf)
  if stf.err:
    console.fail("PROUT")

  deepcopy('src/index.html', Yak.paths['build'])

  deepcopy('src/assets/images', FileSystem.join(Yak.paths['build'], 'images'))

  fontcss = FileList('src/assets/fonts/', filter="*.css")
  combine(fontcss, FileSystem.join(Yak.paths['build'], 'fonts/pictos.css'))

  # pictos = FileList('src/assets/fonts/generated', exclude="*.css")
  # deepcopy(pictos, FileSystem.join(Yak.paths['build'], 'fonts'))

  fonts = FileList('src/assets/fonts/signika')
  deepcopy(fonts, FileSystem.join(Yak.paths['build'], 'fonts'))

  # Phonegap is still needed on desktop
  libs = FileList('src/lib/phonegap/ios', filter="*.js")
  libs.merge(FileList('src/lib/phonegap/plugins', filter="*.js"))
  combine(libs, FileSystem.join(Yak.paths['build'], 'js', 'phonegap.js'))
Example #16
0
 def __init__(self, base):
     Helpers.__init__(self)
     self.base = base
     self.spec = base.spec
     self.srpm = base.srpm
     self.sources = base.sources
     self.url = None
     self.text = None
     self.description = None
     self.state = 'pending'
     self.type = 'MUST'
     self.result = None
     self.output_extra = None
     self.attachments = []
    def test_creation_and_deletion(self, resource_group, location, storage_account, storage_account_key):
        """Tests basic use-case scenario.

        1. Create cluster
        2. Execute a task on the host
        3. Execute a task in a docker container
        4. Delete cluster
        """
        cluster = Helpers.create_cluster(
            self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 1,
            storage_account.name, storage_account_key)

        self.assertEqual(cluster.name, self.cluster_name)
        self.assertIsNone(cluster.errors)
        self.assertEqual(cluster.vm_size, 'STANDARD_D1')

        # Verify that the cluster is reported in the list of clusters
        Helpers.assert_existing_clusters_are(self, self.client, resource_group.name, [self.cluster_name])

        # Verify that one node is allocated and become available
        self.assertEqual(
            Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
                                   Helpers.NODE_STARTUP_TIMEOUT_SEC), 1)
        Helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
                                                            self.cluster_name, 1)

        # Verify that the cluster able to run tasks.
        self.assertCanRunJobOnHost(resource_group, location, cluster.id)
        self.assertCanRunJobInContainer(resource_group, location, cluster.id)

        # Test cluster deletion
        self.client.clusters.delete(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()
        Helpers.assert_existing_clusters_are(self, self.client, resource_group.name, [])
Example #18
0
    def sync_record(self, model):
        log.info("Adding switch %s to onos-fabric" % model.name)
        # Send device info to onos-fabric netcfg
        data = {
            "devices": {
                model.ofId: {
                    "basic": {
                        "name": model.name,
                        "driver": model.driver
                    },
                    "segmentrouting": {
                        "name": model.name,
                        "ipv4NodeSid": model.ipv4NodeSid,
                        "ipv4Loopback": model.ipv4Loopback,
                        "routerMac": model.routerMac,
                        "isEdgeRouter": model.isEdgeRouter,
                        "adjacencySids": []
                    }
                }
            }
        }

        onos = Helpers.get_onos_fabric_service(model_accessor=self.model_accessor)

        url = 'http://%s:%s/onos/v1/network/configuration/' % (onos.rest_hostname, onos.rest_port)
        r = requests.post(url, json=data, auth=HTTPBasicAuth(onos.rest_username, onos.rest_password))

        if r.status_code != 200:
            log.error(r.text)
            raise Exception("Failed to add device %s into ONOS" % model.name)
        else:
            try:
                log.info("result", json=r.json())
            except Exception:
                log.info("result", text=r.text)
Example #19
0
 def __init__(self):
     """	Constructor """
     self.oHelpers = Helpers()
     self.servicesChecksFunc = {'tcp': self.checkTcpService, \
         'http': self.checkHttpService, \
         'ftp': self.checkFtpService \
     }
    def delete_record(self, o):

        if hasattr(o, 'service'):
            # this is a ServiceAttribute model
            if 'ONOSService' in o.service.leaf_model.class_names:
                print "sync ONOSService Attribute", o.service.leaf_model

                log.info("Deleting config %s" % o.name)
                # getting onos url and auth
                onos_service = o.service.leaf_model
                onos_url = "%s:%s" % (
                    Helpers.format_url(onos_service.rest_hostname), onos_service.rest_port)
                onos_basic_auth = HTTPBasicAuth(onos_service.rest_username,
                                                onos_service.rest_password)

                url = o.name
                if url[0] == "/":
                    # strip initial /
                    url = url[1:]

                url = '%s/%s' % (onos_url, url)
                request = requests.delete(url, auth=onos_basic_auth)

                if request.status_code != 204:
                    log.error("Request failed", response=request.text)
                    raise Exception("Failed to remove config %s from ONOS:  %s" % (url, request.text))
    def sync_record(self, o):
        if hasattr(o, 'service'):
            # this is a ServiceAttribute model
            if 'ONOSService' in o.service.leaf_model.class_names:
                print "sync ONOSService Attribute", o.service.leaf_model
                return self.sync_record(o.service.leaf_model)
            return  # if it's not related to an ONOSService do nothing

        onos_url = "%s:%s" % (Helpers.format_url(o.rest_hostname), o.rest_port)
        onos_basic_auth = HTTPBasicAuth(o.rest_username, o.rest_password)

        configs = self.get_service_attribute(o)
        for url, value in configs.iteritems():

            if url[0] == "/":
                # strip initial /
                url = url[1:]

            url = '%s/%s' % (onos_url, url)
            value = json.loads(value)
            request = requests.post(url, json=value, auth=onos_basic_auth)

            if request.status_code != 200:
                log.error("Request failed", response=request.text)
                raise Exception("Failed to add config %s in ONOS" % url)
Example #22
0
 def do_search(self, query):
     """搜索歌曲、歌手、专辑 etc.
     
     用法示例: 
         \033[92mfeeluown ☛  search 刘德华\033[0m
     """
     if query.startswith('"'):
         query = query[1:-1]
     func = 'search("%s")' % query
     Client.send(func)
     data = Client.recv()
     songs = data.get('result', None)
     if type(songs) == list:
         self.songs = songs[:10]
         Helpers.print_music_list(songs)
     else:
         Helpers.print_hint("蛋疼,没有搜到...")
Example #23
0
    def __init__(self, sources, tag, url):

        def my_logger(cache):
             if cache:
                 path = urlparse(url).path
                 self.log.info("Using cached data for (%s): %s" %
                               (tag, os.path.basename(path)))
             else:
                 self.log.info("Downloading (%s): %s" % (tag, url))


        Helpers.__init__(self)
        self.sources = sources
        self.tag = tag
        self.downloaded = True
        is_url = urlparse(url)[0] != ''
        if is_url:  # This is a URL, Download it
            self.url = url
            self.local = False
            try:
                self.filename = self._get_file(url,
                                               ReviewDirs.upstream,
                                               my_logger)
            except:
                self.log.debug('Download error on ' + url,
                                exc_info=True)
                self.log.warning('Cannot download url: ' + url)
                self.downloaded = False
                # get the filename
                url = urlparse(url)[2].split('/')[-1]

        if not is_url or not self.downloaded:  # this is a local file in the SRPM
            local_src = os.path.join(ReviewDirs.startdir, url)
            if os.path.exists(local_src):
                self.log.info(
                    "Using local file " + url + " as " + tag)
                srcdir = ReviewDirs.startdir
                self.local_src = local_src
                self.local = False
            else:
                self.log.info("No upstream for (%s): %s" % (tag, url))
                srcdir = ReviewDirs.srpm_unpacked
                self.local = True
            self.filename = os.path.join(srcdir, url)
            self.url = 'file://' + self.filename
Example #24
0
    def delete_netcfg_item(self, partial_url):
        onos = Helpers.get_onos_fabric_service(self.model_accessor)
        url = 'http://%s:%s/onos/v1/network/configuration/ports/%s' % (onos.rest_hostname, onos.rest_port, partial_url)

        r = requests.delete(url, auth=HTTPBasicAuth(onos.rest_username, onos.rest_password))

        if r.status_code != 204:
            log.error(r.text)
            raise Exception("Failed to %s port %s from ONOS" % url)
Example #25
0
    def __init__(self, parent):
        """

        :param parent: an instance of type QMainWindow

        """
        super(AboutDialog, self).__init__()
        self.ui = Ui_AboutDialog()
        self.ui.setupUi(self)

        self.setFixedSize(self.size())
        self.setWindowFlags(
                self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint
        )
        self.setModal(True)

        Helpers.center_dialog(self, parent)
        self._parent = parent
    def test_cluster_resizing(self, resource_group, location, storage_account, storage_account_key):
        """Tests manual cluster resizing"""
        cluster = Helpers.create_cluster(
            self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 1,
            storage_account.name, storage_account_key)

        # Verify that one node is allocated and become available
        self.assertEqual(
            Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
                                   Helpers.NODE_STARTUP_TIMEOUT_SEC), 1)
        Helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
                                                            self.cluster_name, 1)

        self.assertCanResizeCluster(resource_group, 0)
        self.assertCanResizeCluster(resource_group, 1)

        # Verify that cluster able to run tasks after resizing.
        self.assertCanRunJobOnHost(resource_group, location, cluster.id)
        self.client.clusters.delete(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name).result()
Example #27
0
 def __init__(self, path):
     self.helpers = Helpers()  # Image helpers
     self.image = self.loadImage(path)
     self.preprocess()
     #self.helpers.show(self.image, 'After Preprocessing')
     sudoku = self.cropSudoku()
     #self.helpers.show(sudoku, 'After Cropping out grid')
     sudoku = self.straighten(sudoku)
     #self.helpers.show(sudoku, 'Final Sudoku grid')
     self.cells = Cells(sudoku).cells
class Pinger(object): 
	def __init__(self,Key,Username_PL,Username_DC):
		self.key=Key
		self.uname_pl=Username_PL #planet lab username
		self.uname_dc=Username_DC #datacenter username
		self.plab=Helpers(self.key,self.uname_pl) #call helpers for plab
		self.dcs=Helpers(self.key,self.uname_dc) #call helpers for dc

	def install_libs(self,fname,toinstall):
		f=open(fname,'r')
		for node in f:
			print node
#			self.plab.do_ssh(node.split(',')[0].strip('\n\r'),'sudo apt-get install '+toinstall) #automate this, decide ase on os			
			self.plab.do_ssh(node.split(',')[0].strip('\n\r'),'sudo yum update ; sudo yum  -y -t --nogpgcheck install '+toinstall)

	def get_files(self,direc,expnum,copyto,f1,f2):
		self.dcs.copy_from(direc,expnum,open(f1,'r'),copyto)
		self.plab.copy_from(direc,expnum,open(f2,'r'),copyto)

	def run_dc_ping(self,direc,duration,expnum,f_dc,f_servers): #ping servers and plabs from DC
		for node in f_dc:
			self.ping_servers(direc,duration,expnum,node.split(',')[0].strip('\r\n'),f_servers,'tcpping',self.dcs)

	def ping_servers(self,direc,duration,expnum,node,f_s,pingtype,node_obj): #ping servers
			print node,direc,expnum,f_s
			direc=direc+str(expnum)
			node_obj.do_ssh(node,'mkdir '+direc) # making dir at host
			node_obj.do_scp(node,f_s,direc) # copying files to host
			node_obj.do_scp(node,'batch_ping.py',direc)
			node_obj.do_ssh(node,'python '+direc+'/batch_ping.py '+ \
				node+' '+f_s+' '+str(expnum)+' '+direc+' '+str(duration)+' '+pingtype)

	def run_pl_ping(self,direc,duration,expnum,f_pl,f_servers,exptype):  #ping DC and servers from pl pair, and ping between pl nodes
		for node in f_pl:
			print node
			self.ping_servers(direc,duration,expnum,node.split(',')[0].strip('\r\n'),f_servers,'ping',self.plab)
			if exptype ==1:
				self.ping_servers(direc,duration,expnum,node.split(',')[2],f_servers,'ping',self.plab)
				new_dir=direc+str(expnum)+'/'+node.split(',')[0]+'_'+node.split(',')[2]
				self.plab.do_ssh(node.split(',')[0],'mkdir '+new_dir)
				self.plab.run_ping(new_dir,duration,node.split(',')[2],node.split(',')[0], \
				  node.split(',')[0],expnum,'ping')#ping other pl node 
Example #29
0
class Extractor(object):
    '''
        Stores and manipulates the input image to extract the Sudoku puzzle
        all the way to the cells
    '''

    def __init__(self, path):
        self.helpers = Helpers()  # Image helpers
        self.image = self.loadImage(path)
        self.preprocess()
        #self.helpers.show(self.image, 'After Preprocessing')
        sudoku = self.cropSudoku()
        self.helpers.show(sudoku, 'After Cropping out grid')
        sudoku = self.straighten(sudoku)
        #self.helpers.show(sudoku, 'Final Sudoku grid')
        self.cells = Cells(sudoku).cells

    def loadImage(self, path):
        color_img = cv2.imread(path)
        if color_img is None:
            raise IOError('Image not loaded')
        print 'Image loaded.'
        return color_img

    def preprocess(self):
        print 'Preprocessing...',
        self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        self.image = self.helpers.thresholdify(self.image)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
        self.image = cv2.morphologyEx(self.image, cv2.MORPH_CLOSE, kernel)
        print 'done.'

    def cropSudoku(self):
        print 'Cropping out Sudoku...',
        contour = self.helpers.largestContour(self.image.copy())
        sudoku = self.helpers.cut_out_sudoku_puzzle(self.image.copy(), contour)
        print 'done.'
        return sudoku

    def straighten(self, sudoku):
        print 'Straightening image...',
        largest = self.helpers.largest4SideContour(sudoku.copy())
        app = self.helpers.approx(largest)
        corners = self.helpers.get_rectangle_corners(app)
        sudoku = self.helpers.warp_perspective(corners, sudoku)
        print 'done.'
        return sudoku
Example #30
0
    def do_play(self, query):
        """切换为播放状态或者播放一首特定的歌曲

        假设有一首歌的id是: 1314
        你可以通过以下命令播放你所指定的这首歌
        用法示例: 
            \033[92mfeeluown ☛  play 1314\033[0m
        你也可以不加参数,它可以让播放器从暂停状态切换为播放状态:
            \033[92mfeeluown ☛ play\033[0m
        """
        func = 'play()'
        if query != '':
            try:
                func = 'play(%d)' % int(query)
            except ValueError:
                Helpers.print_hint("参数必须是歌曲的 id")
                return

        Client.send(func)
        Client.recv()
Example #31
0
    def test_datapath_id_to_hex(self):
        hex = Helpers.datapath_id_to_hex(55334486016)
        self.assertEqual(hex, "0000000ce2314000")

        hex = Helpers.datapath_id_to_hex("55334486016")
        self.assertEqual(hex, "0000000ce2314000")
# Arg parsing
parser = argparse.ArgumentParser(
    description=
    'Colorize images using conditional generative adversarial networks.')
parser.add_argument(
    'input',
    help=
    'path to input image you would like to colorize (i.e., "~/Desktop/input.jpg")'
)
parser.add_argument('model',
                    help='path to saved model (i.e., "lib/generator")')
args = parser.parse_args()
args.input = os.path.abspath(args.input)

with tf.Session() as sess:
    in_rgb, shape = Helpers.load_img(args.input)
    in_rgb = tf.convert_to_tensor(in_rgb, dtype=tf.float32)
    in_rgb = tf.expand_dims(in_rgb, axis=0)

    # Initialize new generative net and build its graph
    gen = generator.Generator()
    gen.build(in_rgb)
    sample = gen.output
    print("finished 1 ...")

    # Ops for transforming a sample into a properly formatted rgb image
    img = tf.image.rgb_to_hsv(in_rgb)
    v = tf.slice(img, [0, 0, 0, 2], [1, shape[1], shape[2], 1]) / 255.
    colored_sample = tf.image.hsv_to_rgb(
        tf.concat(axis=3, values=[sample, tf.multiply(v, 255.)])) / 255.
    print("finished 2 ...")
Example #33
0
 def __init__(self, sudoku):
     print('Extracting cells...', end=' ')
     self.helpers = Helpers()
     self.cells = self.extractCells(sudoku)
     print('done.')
Example #34
0
    def test_redundant_uboot_env(
        self,
        successful_image_update_mender,
        bitbake_variables,
        connection,
        http_server,
        board_type,
        use_s3,
        s3_address,
    ):
        """This tests a very specific scenario: Consider the following production
        scenario: You are currently running an update on rootfs partition
        B. Then you attempt another update, which happens to be broken (but you
        don't know that yet). This will put the update in rootfs partition
        A. However, just as U-Boot is about to switch to rootfs partition A,
        using `upgrade_available=1` (and hence triggering bootlimit), the device
        loses power. This causes the stored U-Boot environment to become
        corrupt. On the next boot, U-Boot detects this and reverts to its built
        in environment instead.

        But this is a problem: The default environment will boot from rootfs
        partition A, which contains a broken update. And since U-Boot at this
        point doesn't know that an update was in progress, it will not attempt
        to boot from anywhere else (`upgrade_available=0`). Hence the device is
        bricked.

        This is what a redundant U-Boot environment is supposed to protect
        against by always providing two copies of the stored environment, and
        guaranteeing that at least one of them is always valid.

        In a test we cannot pull the power from the device reliably, but it's
        quite easy to simulate the situation by setting up the above scenario,
        and then corrupting the environment manually with a file write.

        """

        (active,
         passive) = determine_active_passive_part(bitbake_variables,
                                                  connection)

        if active != bitbake_variables["MENDER_ROOTFS_PART_B"]:
            # We are not running the secondary partition. This is a requirement
            # for this test to test the correct scenario. Do a full update, so
            # that we end up on the right partition. Run the full update test to
            # correct this. If running all the tests in order with a fresh
            # build, the correct partition will usually be selected already.
            self.test_network_based_image_update(
                successful_image_update_mender,
                bitbake_variables,
                connection,
                http_server,
                board_type,
                use_s3,
                s3_address,
            )

            (active,
             passive) = determine_active_passive_part(bitbake_variables,
                                                      connection)
            assert active == bitbake_variables["MENDER_ROOTFS_PART_B"]

        file_flag = Helpers.get_file_flag(bitbake_variables)
        install_flag = Helpers.get_install_flag(connection)

        # Make a note of the checksums of each environment. We use this later to
        # determine which one changed.
        old_checksums = Helpers.get_env_checksums(bitbake_variables,
                                                  connection)

        orig_env = connection.run("fw_printenv").stdout

        image_type = bitbake_variables["MENDER_DEVICE_TYPE"]

        try:
            # Make a dummy/broken update
            subprocess.call(
                "dd if=/dev/zero of=image.dat bs=1M count=0 seek=8",
                shell=True)
            subprocess.call(
                "mender-artifact write rootfs-image -t %s -n test-update %s image.dat -o image.mender"
                % (image_type, file_flag),
                shell=True,
            )
            put_no_sftp("image.mender",
                        connection,
                        remote="/var/tmp/image.mender")
            connection.run("mender %s /var/tmp/image.mender" % install_flag)

            new_checksums = Helpers.get_env_checksums(bitbake_variables,
                                                      connection)

            # Exactly one checksum should be different.
            assert (old_checksums[0] == new_checksums[0]
                    or old_checksums[1] == new_checksums[1])
            assert (old_checksums[0] != new_checksums[0]
                    or old_checksums[1] != new_checksums[1])

            if old_checksums[0] != new_checksums[0]:
                to_corrupt = 0
            elif old_checksums[1] != new_checksums[1]:
                to_corrupt = 1

            offsets = Helpers.get_env_offsets(bitbake_variables)

            # Now manually corrupt the environment.
            # A few bytes should do it!
            connection.run("dd if=/dev/zero of=%s bs=1 count=64 seek=%d" %
                           (bitbake_variables["MENDER_STORAGE_DEVICE"],
                            offsets[to_corrupt]))
            connection.run("sync")

            # Check atomicity of Mender environment update: The contents of the
            # environment before the update should be identical to the
            # environment we get if we update, and then corrupt the new
            # environment. If it's not identical, it's an indication that there
            # were intermediary steps. This is important to avoid so that the
            # environment is not in a half updated state.
            new_env = connection.run("fw_printenv").stdout
            assert orig_env == new_env

            reboot(connection)

            # We should have recovered.
            run_after_connect("true", connection)

            # And we should be back at the second rootfs partition.
            (active,
             passive) = determine_active_passive_part(bitbake_variables,
                                                      connection)
            assert active == bitbake_variables["MENDER_ROOTFS_PART_B"]

        finally:
            # Cleanup.
            os.remove("image.mender")
            os.remove("image.dat")
Example #35
0
    def test_network_based_image_update(
        self,
        successful_image_update_mender,
        bitbake_variables,
        connection,
        http_server,
        board_type,
        use_s3,
        s3_address,
    ):

        (active_before, passive_before) = determine_active_passive_part(
            bitbake_variables, connection)

        Helpers.install_update(
            successful_image_update_mender,
            connection,
            http_server,
            board_type,
            use_s3,
            s3_address,
        )

        output = connection.run("fw_printenv bootcount").stdout
        assert output.rstrip("\n") == "bootcount=0"

        output = connection.run("fw_printenv upgrade_available").stdout
        assert output.rstrip("\n") == "upgrade_available=1"

        output = connection.run("fw_printenv mender_boot_part").stdout
        assert output.rstrip("\n") == "mender_boot_part=" + passive_before[-1:]

        # Delete kernel and associated files from currently running partition,
        # so that the boot will fail if U-Boot for any reason tries to grab the
        # kernel from the wrong place.
        connection.run("rm -f /boot/* || true")

        reboot(connection)

        run_after_connect("true", connection)
        (active_after, passive_after) = determine_active_passive_part(
            bitbake_variables, connection)

        # The OS should have moved to a new partition, since the image was fine.
        assert active_after == passive_before
        assert passive_after == active_before

        output = connection.run("fw_printenv bootcount").stdout
        assert output.rstrip("\n") == "bootcount=1"

        output = connection.run("fw_printenv upgrade_available").stdout
        assert output.rstrip("\n") == "upgrade_available=1"

        output = connection.run("fw_printenv mender_boot_part").stdout
        assert output.rstrip("\n") == "mender_boot_part=" + active_after[-1:]

        connection.run("mender -commit")

        output = connection.run("fw_printenv upgrade_available").stdout
        assert output.rstrip("\n") == "upgrade_available=0"

        output = connection.run("fw_printenv mender_boot_part").stdout
        assert output.rstrip("\n") == "mender_boot_part=" + active_after[-1:]

        active_before = active_after
        passive_before = passive_after

        reboot(connection)

        run_after_connect("true", connection)
        (active_after, passive_after) = determine_active_passive_part(
            bitbake_variables, connection)

        # The OS should have stayed on the same partition, since we committed.
        assert active_after == active_before
        assert passive_after == passive_before
Example #36
0
    async def dungeon(self, ctx):
        params = config()
        conn = psycopg2.connect(**params)
        x = datetime.datetime.now() + timedelta(seconds=1)
        y = x.strftime('%Y-%m-%d %H:%M:%S')
        print(y)
        cur = conn.cursor()
        cur.execute("select count(*) from Player where discord_id = %s;",
                    (ctx.message.author.id, ))
        row = cur.fetchone()
        print(row)
        playerExist = True
        if row[0] == 0:
            playerExist = False
            await ctx.send("Sir you haven't !isekai yet")
        helpers = Helpers()
        cur.execute(
            "select plvl, pexp, strh, dex, intl from Player where discord_id = %s;",
            (ctx.message.author.id, ))
        r = cur.fetchone()
        curLvl = r[0]
        curExp = r[1]
        curSTR = r[2]
        curDEX = r[3]
        curINT = r[4]

        if playerExist:
            exp = 10
            curExp = curExp + exp
            await ctx.send("You have completed the dungeon! You gained 10 exp!"
                           )
            cur.execute("update Player set pexp = %s where discord_id = %s",
                        (curExp, ctx.message.author.id))
            if helpers.checkIfPlayerLeveledUp(ctx.message.author.id, curLvl,
                                              curExp):
                embed = discord.Embed(color=discord.Color.dark_teal())
                embed.add_field(
                    name="** **",
                    value=
                    f"**You have leveled up!**  \nLevel: {curLvl} (+{1})\nSTR: {curSTR} (+{1})\nINT: {curINT} (+{1})\nDEX: {curDEX} \
                    (+{1})")
                cur.execute(
                    "update Player set plvl = %s, strh = %s, intl = %s, dex = %s where discord_id = %s;",
                    (curLvl + 1, curSTR + 1, curINT + 1, curDEX + 1,
                     ctx.message.author.id))
                await ctx.message.channel.send(embed=embed)
                print("You can level")
            else:
                print("not yet")
        # player_exist = True
        # for r in rows:
        #     if r[0] == 0:
        #         player_exist = False
        # if player_exist == True:
        #     cur.execute("select adv from Player where discord_id = %s;", (ctx.message.author.id,))
        #     rows = cur.fetchall()
        #     adv = 0
        #     for r in rows:
        #         print(r[0])
        #         if r[0] is None:
        #             cur.execute("update Player set adv = %s where adv is NULL and discord_id = %s", (y, ctx.message.author.id))
        #             adv = y
        #             break
        #         else:
        #             adv = r[0]
        #     z = datetime.datetime.now()
        #     now = z.strftime('%Y-%m-%d %H:%M:%S')
        #     print("Before strptime %s", adv)
        #     if type(adv) == str:
        #         adv = datetime.datetime.strptime(adv, '%Y-%m-%d %H:%M:%S')
        #     now = datetime.datetime.strptime(now, '%Y-%m-%d %H:%M:%S')
        #     if (now > adv):
        #         #Makes their adventure status as NULL
        #         cur.execute("update Player set adv = NULL where discord_id = %s", (ctx.message.author.id,))
        #         cur.execute("select p_exp, lvl from Player where discord_id = %s;", (ctx.message.author.id,))
        #         rows = cur.fetchall()
        #         for r in rows:
        #             exp = r[0]
        #             lvl = r[1]
        #         exp = exp + 4
        #         cur.execute("update Player set p_exp = %s where discord_id = %s", (exp, ctx.message.author.id))
        #         cur.execute("select ex from Lvl where lvl = %s;", (lvl,))
        #         rows = cur.fetchall()
        #         for r in rows:
        #             exp_req = r[0]
        #         if exp < exp_req:
        #             await ctx.send("You have completed the dungeon! You gained 30 exp!")
        #         else:
        #             cur.execute("update Player set lvl = %s where discord_id = %s", (lvl + 1, ctx.message.author.id))
        #             cur.execute("select str_per_lvl, intl_per_lvl, dex_per_lvl, vit_per_lvl, wis_per_lvl, eva_per_lvl from Classes where adv_class = %s;", ("Villager",))
        #             r = cur.fetchone()
        #             str_per_lvl = r[0]
        #             intl_per_lvl = r[1]
        #             dex_per_lvl = r[2]
        #             vit_per_lvl = r[3]
        #             wis_per_lvl = r[4]
        #             eva_per_lvl = r[5]
        #             cur.execute("select str, intl, dex, vit, wis, eva from Player_Stats where discord_id = %s;", (ctx.message.author.id,))
        #             row = cur.fetchone()
        #             sth = row[0]
        #             intl = row[1]
        #             dex = row[2]
        #             vit = row[3]
        #             wis = row[4]
        #             eva = row[5]
        #             embed = discord.Embed(color=discord.Color.dark_teal())
        #             embed.add_field(name="** **",value=f"**You have leveled up!**  \n\nSTR: {sth} (+{str_per_lvl})\nINT: {intl} (+{intl_per_lvl})\nDEX: {dex} \
        #                 (+{dex_per_lvl})\nVIT: {vit} (+{vit_per_lvl})\nWIS: {wis} (+{wis_per_lvl})\n" )
        #             cur.execute("update Player_Stats set str = %s, intl = %s, dex = %s, vit = %s, wis = %s where discord_id = %s;", (sth+str_per_lvl, intl+intl_per_lvl, dex+dex_per_lvl, vit+vit_per_lvl, wis+wis_per_lvl, ctx.message.author.id))
        #             await ctx.message.channel.send(embed=embed)
        #     else:
        #         duration = adv - now
        #         print(adv)
        #         print(now)
        #         print(duration)
        #         await ctx.send(f"You have {duration} remaining to complete the dungeon")
        # else:
        #     await ctx.send("Sir you haven't !isekai yet")
        cur.close()
        conn.commit()
        conn.close()
"""
The following code analyses the sentiment values of tweets. Furthermore, it can
be used to compare sentiments before and after a specific event. Therefore, two
things a necessary: Suitable tweet-data that can be obtained via the procedure
described in examples/example_search.py and a cutoff-date, which marks the event
before and after which you want to compare your tweets.

"""

import datetime
from scipy import stats

from helpers import Helpers

# initialize api
api_helpers = Helpers()

# load and preprocess generated data
api_helpers.settings()

df1 = api_helpers.data_handler(tweets=None,
                               geo=None,
                               user_metadata=True,
                               from_cursor=False,
                               filename="lockdown_022621.txt")
df2 = api_helpers.data_handler(tweets=None,
                               geo=None,
                               user_metadata=True,
                               from_cursor=False,
                               filename="lockdown_030721.txt")
Example #38
0
    All_Dataset = All_Dataset()
    models = All_Models()
    for index, subDir in enumerate(sorted(os.listdir(var.root))):
        if 'train_' in subDir:
            newFolder = subDir
            os.chdir(var.root)
            test_folder = 'train_sanket_washands_S1'
            # test_folder = 'test_' + newFolder[6:]
            # _ = os.system('mv ' + newFolder + ' test_' + newFolder[6:])
            # if lastFolder is not None:
            #     print('Last folder changed')
            #     _ = os.system('mv test_' + lastFolder[6:] + ' ' + lastFolder)

            # print(newFolder, lastFolder)
            trim_frame_size = var.trim_frame_size
            utils = Helpers(test_folder)
            imu_training, imu_testing, training_target, testing_target = utils.load_datasets(
                args.reset_data, repeat=0)

            pipeline, model_checkpoint = models.get_model(
                args.model, test_folder)
            # pipeline.tensorboard_folder = args.tfolder
            optimizer = optim.Adam(pipeline.parameters(),
                                   lr=0.0001)  #, momentum=0.9)
            lambda1 = lambda epoch: 0.95**epoch
            scheduler = optim.lr_scheduler.LambdaLR(optimizer,
                                                    lr_lambda=lambda1)
            criterion = nn.KLDivLoss(reduction='batchmean')
            gt_act = nn.Softmax2d()
            best_test_loss = -np.inf
            if Path(pipeline.var.root + 'datasets/' + test_folder[5:] + '/' +
Example #39
0
def is_module(name):
    if os.path.isfile(name):
        return 'python' in H.filetype(name)
    return False
Example #40
0
import webrepl
from connect import Connect
from helpers import Helpers
import esp
esp.osdebug(None)
import gc
gc.collect()
webrepl.start()
try:
    print(Helpers.free_space())
    Connect.connect()
except:
    pass

Example #41
0
##########
#---------------------- ** REGIONS ** ----------------------#
#Australasia & Oceania, Central America & Caribbean, Central Asia, East Asia, Eastern Europe,
#Middle East & North Africa, North America, South America, South Asia, Southeast Asia, Sub-Saharan Africa,
#Western Europe
##########


#count number of occurrences for each city
def get_count(df):
    return df['city'].value_counts()


regionList = [
    Helpers('North America'),
    Helpers('South America'),
    Helpers('Central America & Caribbean'),
    Helpers('Sub-Saharan Africa'),
    Helpers('Middle East & North Africa'),
    Helpers('Western Europe'),
    Helpers('Central Asia'),
    Helpers('South Asia'),
    Helpers('East Asia'),
    Helpers('Southeast Asia'),
    Helpers('Australasia & Oceania'),
    Helpers('Eastern Europe')
]

name = regionList[0].name
DIR = 'D:\\Programming related\\Python Projects\\GTDdata\\schematics'
Example #42
0
import cv2
from helpers import Helpers
helpers = Helpers()

# Open image
imagePath = '../sample-images/1.jpg'
imageCv = helpers.openImageCv(imagePath)

# Pre-processing
gray = helpers.cvToGrayScale(imageCv)
bilateral = helpers.cvApplyBilateralFilter(gray)
blur = helpers.cvApplyGaussianBlur(bilateral, 5)

# Detect edge contours, and find the plate contour
edged = helpers.cvToCannyEdge(blur)
contours = helpers.cvExtractContours(edged)
rectangleContours = helpers.cvFilterRectangleContours(contours)
plateContour = rectangleContours[0]
plateContour = helpers.cvResizeContour(plateContour, 1.0)

# Crop and blur the plate
plateImage = helpers.cvCropByContour(imageCv, plateContour)
plateImageBlur = helpers.cvApplyGaussianBlur(plateImage, 25)

# Find the plate's background color
plateBackgroundColor = helpers.cvFindMostOccurringColor(plateImageBlur)

# Draw over the plate
result = cv2.drawContours(imageCv.copy(), [plateContour], -1, plateBackgroundColor, -1)

# Show results
class ForecastDataProcessing:
    def __init__(self):
        self.scaler = MinMaxScaler(feature_range=(-1,1))
        self.helpers = Helpers()

    def aggregate_monthly(self, data):
        if 'date' in data:
            data['date'] = pd.to_datetime(data['date'], format="%Y-%d-%m")
            print(data['date'])
            data['date'] = data['date'].dt.year.astype('str') + '-' + data['date'].dt.month.astype('str') + '-01'
            data['date'] = pd.to_datetime(data['date'])
            data = data.groupby('date').sales.sum().reset_index()
            return data
    def get_diff(self, data):
        df_diff = data.copy()
        df_diff['prev_sales'] = df_diff['sales'].shift(1)
        df_diff = df_diff.dropna()
        df_diff['diff'] = (df_diff['sales'] - df_diff['prev_sales'])
        
        return df_diff
    def add_lag(self, data, lag_limit):
        if 'prev_sales' in data:
            df_supervised = data.drop(['prev_sales'],axis=1)
            for inc in range(1,lag_limit):
                field_name = 'lag_' + str(inc)
                df_supervised[field_name] = df_supervised['diff'].shift(inc)
            df_supervised = df_supervised.dropna().reset_index(drop=True)
            df_supervised = df_supervised.drop(['sales','date'],axis=1)
            return df_supervised[-6:].values
    def scale_and_reshape(self, data):
        self.scaler = self.scaler.fit(data)
        data = data.reshape(data.shape[0], data.shape[1])
        scaled = self.scaler.transform(data)
        X, y = scaled[:, 1:], scaled[:, 0:1]
        X = X.reshape(X.shape[0], 1, X.shape[1])

        return X
    def process_output(self, data, og_data, X):
        processed_prediction = self.parse_model_output(data, X)
        last_date = self.helpers.get_last_date(og_data)
        result = self.parse_predictions(processed_prediction, last_date)
        return result
    def parse_model_output(self, output, og_data):
        predicted = output.reshape(output.shape[0], 1, output.shape[1])
        pred_test_set = []
        for index in range(0,len(predicted)):
            pred_test_set.append(np.concatenate([predicted[index],og_data[index]],axis=1))
        pred_test_set = np.array(pred_test_set)
        pred_test_set = pred_test_set.reshape(pred_test_set.shape[0], pred_test_set.shape[2])
        pred_test_set_inverted = self.scaler.inverse_transform(pred_test_set)
        return pred_test_set_inverted
    def parse_predictions(self, pred_test_set_inverted, last_date):
        result_list = []
        for index in range(0,len(pred_test_set_inverted)):
            result_dict = {}
            result_dict['pred_value'] = int(pred_test_set_inverted[index][0])
            if(index == 0):
                date_n = self.helpers.add_months(last_date, 1)
            else:
                date_n = self.helpers.add_months(date_n, 1)
            result_dict['date'] = date_n
            result_list.append(result_dict)
        return pd.DataFrame(result_list)
Example #44
0
"""
Toy example showing of data handler as well as basic setup with tweepy (and without our methods).

"""

import tweepy as tw

from helpers import Helpers

# initialize api
api_helpers = Helpers()
api = api_helpers._multi_init_api()

# Define search terms
search_words = '#lockdown' + ' -filter:retweets'
data_since = '2021-02-11'
lang = 'de'
items = 10

# Collect tweets with first API
api_used = 0
tweets = tw.Cursor(api[api_used].search,
                   q=search_words,
                   lang=lang,
                   since=data_since).items(items)
df = api_helpers.data_handler(tweets, geo=None, user_metadata=True)
print(df)
Example #45
0
from jnpr.jsnapy import SnapAdmin
from pprint import pprint
from jnpr.junos import Device
import difflib
import re
import xml.etree.ElementTree
from helpers import Helpers
from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager

js = SnapAdmin()

helpers = Helpers()

config_file = """
hosts:
  - device: 192.168.122.9
    username: ubuntu
tests:
  - test_diff.yml
"""

#js.snap(config_file, "pre")
js.snap(config_file, "post")
chk = js.check(config_file, "pre", "post")

file_name = "/home/ubuntu/Evolved_Campus_Core_Automation/Inventory"
with open(file_name) as f:
    content = f.read()
    campuses = re.findall('\[(.*?)\]', content)
'''
Example #46
0
	def create_wallet(self, customer_id):
		return CustomerWalletRepository().store({
			'id' : Helpers.generate_unique_code(),
			'customer_id' : customer_id,
			'current_balance' : 0
		})
    def test_image_download_retry_1(self,
                                    install_image=conftest.get_valid_image()):
        """
            Install an update, and block storage connection when we detect it's
            being copied over to the inactive parition.

            The test should result in a successful download retry.
        """
        if not env.host_string:
            execute(self.test_image_download_retry_1,
                    hosts=get_mender_clients(),
                    install_image=install_image)
            return

        # make tcp timeout quicker, none persistent changes
        run("echo 2 > /proc/sys/net/ipv4/tcp_keepalive_time")
        run("echo 2 > /proc/sys/net/ipv4/tcp_keepalive_intvl")
        run("echo 3 > /proc/sys/net/ipv4/tcp_syn_retries")

        # to speed up timeouting client connection
        run("echo 1 > /proc/sys/net/ipv4/tcp_keepalive_probes")

        inactive_part = Helpers.get_passive_partition()
        deployment_id, new_yocto_id = common_update_procedure(install_image)

        # use iptables to block traffic to storage when installing starts
        for _ in range(60):
            time.sleep(0.5)
            with quiet():
                # make sure we are writing to the inactive partition
                output = run("fuser -mv %s" % (inactive_part))
            if output.return_code == 0:
                Helpers.gateway_connectivity(False,
                                             hosts=["s3.docker.mender.io"
                                                    ])  # disable connectivity
                break

        # re-enable connectivity after 2 retries
        self.wait_for_download_retry_attempts()
        Helpers.gateway_connectivity(True, hosts=["s3.docker.mender.io"
                                                  ])  # re-enable connectivity

        Helpers.verify_reboot_performed()
        assert Helpers.get_active_partition() == inactive_part
        assert Helpers.yocto_id_installed_on_machine() == new_yocto_id
        Helpers.verify_reboot_not_performed()
Example #48
0
class Cells(object):
    '''
    Extracts each cell from the sudoku grid obtained
    from the Extractor
    '''
    def __init__(self, sudoku):
        print('Extracting cells...', end=' ')
        self.helpers = Helpers()
        self.cells = self.extractCells(sudoku)
        print('done.')

    def extractCells(self, sudoku):
        cells = []
        W, H = sudoku.shape

        cell_size = W // 9
        i, j = 0, 0
        for r in range(0, W, cell_size):

            j = 0
            for c in range(0, W, cell_size):
                cell = sudoku[r:r + cell_size, c:c + cell_size]
                cell = self.helpers.make_it_square(cell, 28)
                #self.helpers.show(cell, 'Before clean')
                cell = self.clean(cell)
                digit = Digit(cell).digit
                #self.helpers.show(digit, 'After clean')
                digit = self.centerDigit(digit)
                cells.append(digit // 255)
                j += 1
            i += 1
        return cells

    def clean(self, cell):
        contour = self.helpers.largestContour(cell.copy())
        x, y, w, h = cv2.boundingRect(contour)
        cell = self.helpers.make_it_square(cell[y:y + h, x:x + w], 28)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
        cell = cv2.morphologyEx(cell, cv2.MORPH_CLOSE, kernel)
        cell = 255 * (cell // 130)
        return cell

    def centerDigit(self, digit):
        digit = self.centerX(digit)
        digit = self.centerY(digit)
        return digit

    def centerX(self, digit):
        topLine = self.helpers.getTopLine(digit)
        bottomLine = self.helpers.getBottomLine(digit)
        if topLine is None or bottomLine is None:
            return digit
        centerLine = (topLine + bottomLine) >> 1
        imageCenter = digit.shape[0] >> 1
        digit = self.helpers.rowShift(digit,
                                      start=topLine,
                                      end=bottomLine,
                                      length=imageCenter - centerLine)
        return digit

    def centerY(self, digit):
        leftLine = self.helpers.getLeftLine(digit)
        rightLine = self.helpers.getRightLine(digit)
        if leftLine is None or rightLine is None:
            return digit
        centerLine = (leftLine + rightLine) >> 1
        imageCenter = digit.shape[1] >> 1
        digit = self.helpers.colShift(digit,
                                      start=leftLine,
                                      end=rightLine,
                                      length=imageCenter - centerLine)
        return digit
Example #49
0
    def test_signed_updates(self, sig_case, bitbake_path, bitbake_variables,
                            connection):
        """Test various combinations of signed and unsigned, present and non-
        present verification keys."""

        file_flag = Helpers.get_file_flag(bitbake_variables)
        install_flag = Helpers.get_install_flag(connection)

        # mmc mount points are named: /dev/mmcblk0p1
        # ubi volumes are named: ubi0_1
        (active,
         passive) = determine_active_passive_part(bitbake_variables,
                                                  connection)
        if passive.startswith("ubi"):
            passive = "/dev/" + passive

        # Generate "update" appropriate for this test case.
        # Cheat a little. Instead of spending a lot of time on a lot of reboots,
        # just verify that the contents of the update are correct.
        new_content = sig_case.label
        with open("image.dat", "w") as fd:
            fd.write(new_content)
            # Write some extra data just to make sure the update is big enough
            # to be written even if the checksum is wrong. If it's too small it
            # may fail before it has a chance to be written.
            fd.write("\x00" * (1048576 * 8))

        artifact_args = ""

        # Generate artifact with or without signature.
        if sig_case.signature:
            artifact_args += " -k %s" % signing_key(sig_case.key_type).private

        # Generate artifact with specific version. None means default.
        if sig_case.artifact_version is not None:
            artifact_args += " -v %d" % sig_case.artifact_version

        if sig_case.key_type:
            sig_key = signing_key(sig_case.key_type)
        else:
            sig_key = None

        image_type = bitbake_variables["MENDER_DEVICE_TYPE"]

        subprocess.check_call(
            "mender-artifact write rootfs-image %s -t %s -n test-update %s image.dat -o image.mender"
            % (artifact_args, image_type, file_flag),
            shell=True,
        )

        # If instructed to, corrupt the signature and/or checksum.
        if ((sig_case.signature and not sig_case.signature_ok)
                or not sig_case.checksum_ok
                or not sig_case.header_checksum_ok):
            tar = subprocess.check_output(["tar", "tf", "image.mender"])
            tar_list = tar.split()
            tmpdir = tempfile.mkdtemp()
            try:
                shutil.copy("image.mender",
                            os.path.join(tmpdir, "image.mender"))
                cwd = os.open(".", os.O_RDONLY)
                os.chdir(tmpdir)
                try:
                    tar = subprocess.check_output(
                        ["tar", "xf", "image.mender"])
                    if not sig_case.signature_ok:
                        # Corrupt signature.
                        with open("manifest.sig", "r+") as fd:
                            Helpers.corrupt_middle_byte(fd)
                    if not sig_case.checksum_ok:
                        os.chdir("data")
                        try:
                            data_list = subprocess.check_output(
                                ["tar", "tzf", "0000.tar.gz"])
                            data_list = data_list.split()
                            subprocess.check_call(
                                ["tar", "xzf", "0000.tar.gz"])
                            # Corrupt checksum by changing file slightly.
                            with open("image.dat", "r+") as fd:
                                Helpers.corrupt_middle_byte(fd)
                            # Pack it up again in same order.
                            os.remove("0000.tar.gz")
                            subprocess.check_call(
                                ["tar", "czf", "0000.tar.gz"] + data_list)
                            for data_file in data_list:
                                os.remove(data_file)
                        finally:
                            os.chdir("..")

                    if not sig_case.header_checksum_ok:
                        data_list = subprocess.check_output(
                            ["tar", "tzf", "header.tar.gz"])
                        data_list = data_list.split()
                        subprocess.check_call(["tar", "xzf", "header.tar.gz"])
                        # Corrupt checksum by changing file slightly.
                        with open("headers/0000/files", "a") as fd:
                            # Some extra data to corrupt the header checksum,
                            # but still valid JSON.
                            fd.write(" ")
                        # Pack it up again in same order.
                        os.remove("header.tar.gz")
                        subprocess.check_call(["tar", "czf", "header.tar.gz"] +
                                              data_list)
                        for data_file in data_list:
                            os.remove(data_file)

                    # Make sure we put it back in the same order.
                    os.remove("image.mender")
                    subprocess.check_call(["tar", "cf", "image.mender"] +
                                          tar_list)
                finally:
                    os.fchdir(cwd)
                    os.close(cwd)

                shutil.move(os.path.join(tmpdir, "image.mender"),
                            "image.mender")

            finally:
                shutil.rmtree(tmpdir, ignore_errors=True)

        put_no_sftp("image.mender", connection, remote="/data/image.mender")
        try:
            # Update key configuration on device.
            connection.run(
                "cp /etc/mender/mender.conf /data/etc/mender/mender.conf.bak")
            get_no_sftp("/etc/mender/mender.conf", connection)
            with open("mender.conf") as fd:
                config = json.load(fd)
            if sig_case.key:
                config[
                    "ArtifactVerifyKey"] = "/data/etc/mender/%s" % os.path.basename(
                        sig_key.public)
                put_no_sftp(
                    sig_key.public,
                    connection,
                    remote="/data/etc/mender/%s" %
                    os.path.basename(sig_key.public),
                )
            else:
                if config.get("ArtifactVerifyKey"):
                    del config["ArtifactVerifyKey"]
            with open("mender.conf", "w") as fd:
                json.dump(config, fd)
            put_no_sftp("mender.conf",
                        connection,
                        remote="/etc/mender/mender.conf")
            os.remove("mender.conf")

            # Start by writing known "old" content in the partition.
            old_content = "Preexisting partition content"
            if "ubi" in passive:
                # ubi volumes cannot be directly written to, we have to use
                # ubiupdatevol
                connection.run('echo "%s" | dd of=/tmp/update.tmp && '
                               "ubiupdatevol %s /tmp/update.tmp; "
                               "rm -f /tmp/update.tmp" %
                               (old_content, passive))
            else:
                connection.run('echo "%s" | dd of=%s' % (old_content, passive))

            result = connection.run("mender %s /data/image.mender" %
                                    install_flag,
                                    warn=True)

            if sig_case.success:
                if result.return_code != 0:
                    pytest.fail(
                        "Update failed when it should have succeeded: %s, Output: %s"
                        % (sig_case.label, result))
            else:
                if result.return_code == 0:
                    pytest.fail(
                        "Update succeeded when it should not have: %s, Output: %s"
                        % (sig_case.label, result))

            if sig_case.update_written:
                expected_content = new_content
            else:
                expected_content = old_content

            try:
                content = connection.run(
                    "dd if=%s bs=%d count=1" %
                    (passive, len(expected_content))).stdout
                assert content == expected_content, "Case: %s" % sig_case.label

            # In Fabric context, SystemExit means CalledProcessError. We should
            # not catch all exceptions, because we want to leave assertions
            # alone.
            # In Fabric2 there might be different exception thrown in that case
            # which is UnexpectedExit.
            except (SystemExit, UnexpectedExit):
                if "mender-ubi" in bitbake_variables["DISTRO_FEATURES"].split(
                ):
                    # For UBI volumes specifically: The UBI_IOCVOLUP call which
                    # Mender uses prior to writing the data, takes a size
                    # argument, and if you don't write that amount of bytes, the
                    # volume is marked corrupted as a security measure. This
                    # sometimes triggers in our checksum mismatch tests, so
                    # accept the volume being unreadable in that case.
                    pass
                else:
                    raise

        finally:
            # Reset environment to what it was.
            connection.run("fw_setenv mender_boot_part %s" % active[-1:])
            connection.run("fw_setenv mender_boot_part_hex %x" %
                           int(active[-1:]))
            connection.run("fw_setenv upgrade_available 0")
            connection.run(
                "cp -L /data/etc/mender/mender.conf.bak $(realpath /etc/mender/mender.conf)"
            )
            if sig_key:
                connection.run("rm -f /etc/mender/%s" %
                               os.path.basename(sig_key.public))
 def setUp(self):
     super(WorkspaceTestCase, self).setUp()
     self.client = Helpers.create_batchai_client(
         self)  # type: BatchAIManagementClient
Example #51
0
def index():
    if request.method == "GET":
        return 'OK'
    elif request.method == "POST":
        # Store the IP address of the requester
        request_ip = ipaddress.ip_address(u'{0}'.format(request.remote_addr))

        # Check the POST source
        if not WHITELIST_IP is None:
            for block in [WHITELIST_IP]:
                if ipaddress.ip_address(request_ip) in ipaddress.ip_network(
                        block):
                    break  # the remote_addr is within the network range of github.
            else:
                abort(403)

        payload = json.loads(request.data.decode('utf-8'))

        # common for events
        if payload['object_kind'] in ['push', 'issue']:
            repo_meta = {
                'homepage': payload['repository']['homepage'],
            }
            repo = repos.get(repo_meta['homepage'], None)
            private_token = repo.get('private_token', None)

        if not repo:
            return json.dumps({'error': "nothing to do for " + str(repo_meta)})

        if payload['object_kind'] == "push":
            match = re.match(r"refs/heads/(?P<branch>.*)", payload['ref'])
            if match:
                repo_meta['branch'] = match.groupdict()['branch']
            else:
                return json.dumps(
                    {'error': "unable to determine pushed branch"})

            push = repo.get("push", None)
            if push:
                branch = push.get(repo_meta['branch'], None)
                if not branch:
                    branch = repo['push'].get("other", None)
                if branch:
                    branch_actions = branch.get("actions", None)

                    if branch_actions:
                        for action in branch_actions:
                            try:
                                subp = subprocess.Popen(action,
                                                        cwd=branch.get(
                                                            "path", "."),
                                                        shell=True)
                                subp.wait()
                            except Exception as e:
                                print(e)
            return 'OK'

        if payload['object_kind'] == "issue":
            issue = repo.get("issue", None)
            if issue:
                # notification for new issue
                if issue.get(
                        "user_notify", None
                ) and payload['object_attributes']['action'] == "open":
                    if not private_token:
                        abort(403)
                    gl = GitlabApi(repo_meta['homepage'], private_token)
                    notify = issue['user_notify']
                    description = payload['object_attributes']['description']
                    usernames = []
                    for n in notify:
                        username_match = re.match("^@[a-zA-Z0-9_.+-]+$", n)
                        if username_match:
                            # simple username
                            usernames.append(n)
                        else:
                            # try to pull the email from the issue body
                            # and derive the username from that
                            body_match = re.match(n, description)
                            if body_match and private_token:
                                email = body_match.group(1)
                                username = gl.lookup_username(email)
                                if username:
                                    usernames.append("@" + username)
                    # narrow down to unique names
                    usernames = list(set(usernames))
                    if len(usernames) > 0:
                        project_id = payload['object_attributes']['project_id']
                        issue_id = payload['object_attributes']['id']
                        gl.comment_on_issue(
                            project_id, issue_id, "Automatic mention for %s" %
                            (" and ".join(usernames)))

                # parse commit message and manage labels on issues
                if issue.get("labels"):
                    if not private_token:
                        abort(403)
                    gl = GitlabApi(repo_meta['homepage'], private_token)
                    helpers = Helpers()
                    project_id = payload['object_attributes']['project_id']
                    labels = helpers.get_label_names(gl.get_labels(project_id))
                    list_labels = helpers.get_list_labels(
                        gl.get_boards(project_id))
                    for commit in payload['commits']:
                        parse_commit = helpers.parse_commit_labels(
                            commit['message'])
                        for issue in parse_commit['issues']:
                            issue_labels = gl.get_issue(project_id, issue)
                            updated_labels = helpers.simplify_labels(
                                issue_labels, parse_commit['label_ops'])
                            gl.set_issue_labels(project_id, issue,
                                                updated_labels)
            return 'OK'

        # unknown event type
        return json.dumps({'error': "wrong event type"})
Example #52
0
import time
import logging
from actions import Actions
from helpers import Helpers
from usb_helpers import USBTools

usb = USBTools()
helpers = Helpers()

logger = logging.getLogger(__name__)


class USBEventManager(object):
    def __init__(self, no_actions):

        self._event = False
        self._no_actions = no_actions
        self._app_config = helpers.config
        self._config_path = helpers.config_path

        # Configuration details
        self._default_actions: dict = self._app_config["default_actions"]
        self._check_interval: int = int(self._app_config["check_interval"])
        self._allow_unknown_removal: bool = self._app_config["allow_unknown_removal"]
        self._devices_with_specific_actions: dict = self._app_config["device_specific"]
        self._allow_unknown_devices_at_start: bool = self._app_config[
            "allow_unknown_devices_at_start"
        ]

        # Load the white/blacklists from the configuration file.
        self._get_lists()
Example #53
0
 def setUp(self):
     self.sut = Helpers()
Example #54
0
class ConsentFormHandler(Resource):
    def __init__(self):
        super(ConsentFormHandler, self).__init__()
        self.am_url = current_app.config["ACCOUNT_MANAGEMENT_URL"]
        self.am_user = current_app.config["ACCOUNT_MANAGEMENT_USER"]
        self.am_password = current_app.config["ACCOUNT_MANAGEMENT_PASSWORD"]
        self.timeout = current_app.config["TIMEOUT"]
        try:
            self.AM = AccountManagerHandler(self.am_url, self.am_user,
                                            self.am_password, self.timeout)
        except Exception as e:
            debug_log.warn(
                "Initialization of AccountManager failed. We will crash later but note it here.\n{}"
                .format(repr(e)))

        self.Helpers = Helpers(current_app.config)

    @error_handler
    def get(self, account_id):
        '''get
        :return: Returns Consent form to UI for user input.
        '''
        _consent_form = Consent_form_Out
        service_ids = request.args

        sq.task("Fetch services")
        sink = getService(service_ids["sink"])
        _consent_form["sink"]["service_id"] = sink["name"]
        source = getService(service_ids["source"])
        _consent_form["source"]["service_id"] = source["name"]

        sq.task("Generate RS_ID")
        sq.task("Store RS_ID")

        rs_id = self.Helpers.gen_rs_id(source["name"])
        _consent_form["source"]["rs_id"] = rs_id

        sq.reply_to("UI", msg="Consent Form+RS_ID")
        return _consent_form

    @error_handler
    def post(self, account_id):
        '''post
        :return: Returns 201 when consent has been created
        '''
        debug_log.info(dumps(request.json, indent=2))

        _consent_form = request.json
        sink_srv_id = _consent_form["sink"]["service_id"]
        source_srv_id = _consent_form["source"]["service_id"]

        sq.task("Validate RS_ID")
        if self.Helpers.validate_rs_id(
                _consent_form["source"]
            ["rs_id"]):  # Validate RS_ID (RS_ID exists and not used before)
            self.Helpers.store_consent_form(
                _consent_form)  # Store Consent Form
        else:
            raise DetailedHTTPException(title="RS_ID Validation error.",
                                        detail="RS_ID could not be validated.",
                                        status=403)

        sq.send_to("Account Mgmt", "GET surrogate_id & slr_id")
        try:
            sink_sur = self.AM.getSUR_ID(sink_srv_id, account_id)
            source_sur = self.AM.getSUR_ID(source_srv_id, account_id)
        except AttributeError as e:
            raise DetailedHTTPException(
                status=502,
                title=
                "It would seem initiating Account Manager Handler has failed.",
                detail="Account Manager might be down or unresponsive.",
                trace=traceback.format_exc(limit=100).splitlines())
        debug_log.info("sink_sur = {}".format(sink_sur))
        debug_log.info("source_sur = {}".format(source_sur))

        slr_id_sink, surrogate_id_sink = sink_sur["data"]["surrogate_id"]["attributes"]["servicelinkrecord_id"],\
                                         sink_sur["data"]["surrogate_id"]["attributes"]["surrogate_id"]  # Get slr and surrogate_id

        slr_id_source, surrogate_id_source = source_sur["data"]["surrogate_id"]["attributes"]["servicelinkrecord_id"],\
                                             source_sur["data"]["surrogate_id"]["attributes"]["surrogate_id"] # One for Sink, one for Source

        # Generate common_cr for both sink and source.
        sq.task("Generate common CR")
        common_cr_source = self.Helpers.gen_cr_common(
            surrogate_id_source, _consent_form["source"]["rs_id"],
            slr_id_source)
        common_cr_sink = self.Helpers.gen_cr_common(
            surrogate_id_sink, _consent_form["source"]["rs_id"], slr_id_sink)

        sq.task("Generate ki_cr")
        ki_cr = self.Helpers.Gen_ki_cr(self)

        sq.task("Generate CR for sink")
        sink_cr = self.Helpers.gen_cr_sink(common_cr_sink, _consent_form)

        sq.task("Generate CR for source")
        source_cr = self.Helpers.gen_cr_source(common_cr_source, _consent_form,
                                               Operator_public_key)
        debug_log.info(sink_cr)
        debug_log.info(source_cr)
        sq.task("Generate CSR's")
        sink_csr = self.Helpers.gen_csr(surrogate_id_sink,
                                        sink_cr["cr"]["common_part"]["cr_id"],
                                        "Active", "null")
        source_csr = self.Helpers.gen_csr(
            surrogate_id_source, source_cr["cr"]["common_part"]["cr_id"],
            "Active", "null")

        sq.send_to("Account Mgmt", "Send CR/CSR to sign and store")
        result = self.AM.signAndstore(sink_cr, sink_csr, source_cr, source_csr,
                                      account_id)
        debug_log.info(dumps(result, indent=3))
        sink_cr = result["data"]["sink"]["consentRecord"]["attributes"]["cr"]
        sink_csr = result["data"]["sink"]["consentStatusRecord"]["attributes"][
            "csr"]

        source_cr = result["data"]["source"]["consentRecord"]["attributes"][
            "cr"]
        source_csr = result["data"]["source"]["consentStatusRecord"][
            "attributes"]["csr"]

        crs_csrs_payload = {
            "sink": {
                "cr": sink_cr,
                "csr": sink_csr
            },
            "source": {
                "cr": source_cr,
                "csr": source_csr
            }
        }
        #logger.info("Going to Celery task")
        sq.send_to("Sink", "Post CR-Sink, CSR-Sink")
        sq.send_to("Source", "Post CR-Source, CSR-Source")

        debug_log.info(dumps(crs_csrs_payload, indent=2))
        CR_installer.delay(crs_csrs_payload, SH.getService_url(sink_srv_id),
                           SH.getService_url(source_srv_id))
        return {"status": 201, "msg": "CREATED"}, 201
 def __init__(self, df_small, df_med, df_large):
     self.helpers = Helpers()
     self.desirable_conditions(df_small, df_med, df_large)
     self.t_vs_p(df_small, "10")
Example #56
0
    def sync_record(self, model):

        if model.leaf_model_name == "PortInterface":
            log.info("Receivent update for PortInterface",
                     port=model.port.portId,
                     interface=model)
            return self.sync_record(model.port)

        if model.leaf_model_name == "FabricIpAddress":
            log.info("Receivent update for FabricIpAddress",
                     port=model.interface.port.portId,
                     interface=model.interface.name,
                     ip=model.ip)
            return self.sync_record(model.interface.port)

        log.info("Adding port %s/%s to onos-fabric" %
                 (model.switch.ofId, model.portId))
        interfaces = []
        for intf in model.interfaces.all():
            i = {"name": intf.name, "ips": [i.ip for i in intf.ips.all()]}
            if intf.vlanUntagged:
                i["vlan-untagged"] = intf.vlanUntagged
            interfaces.append(i)

        # Send port config to onos-fabric netcfg
        data = {
            "ports": {
                "%s/%s" % (model.switch.ofId, model.portId): {
                    "interfaces": interfaces,
                    "hostLearning": {
                        "enabled": model.host_learning
                    }
                }
            }
        }

        log.debug("Port %s/%s data" % (model.switch.ofId, model.portId),
                  data=data)

        onos = Helpers.get_onos_fabric_service()

        url = 'http://%s:%s/onos/v1/network/configuration/' % (
            onos.rest_hostname, onos.rest_port)

        r = requests.post(url,
                          json=data,
                          auth=HTTPBasicAuth(onos.rest_username,
                                             onos.rest_password))

        if r.status_code != 200:
            log.error(r.text)
            raise Exception("Failed to add port  %s/%s into ONOS" %
                            (model.switch.ofId, model.portId))
        else:
            try:
                log.info("Port %s/%s response" %
                         (model.switch.ofId, model.portId),
                         json=r.json())
            except Exception:
                log.info("Port %s/%s response" %
                         (model.switch.ofId, model.portId),
                         text=r.text)
Example #57
0
 def test_format_url(self):
     url = Helpers.format_url("onf.com")
     self.assertEqual(url, "http://onf.com")
     url = Helpers.format_url("http://onf.com")
     self.assertEqual(url, "http://onf.com")
Example #58
0
    def test_uboot_mender_saveenv_canary(self, bitbake_variables, connection):
        """Tests that the mender_saveenv_canary works correctly, which tests
        that Mender will not proceed unless the U-Boot boot loader has saved the
        environment."""

        file_flag = Helpers.get_file_flag(bitbake_variables)
        install_flag = Helpers.get_install_flag(connection)
        image_type = bitbake_variables["MACHINE"]

        try:
            # Make a dummy/broken update
            subprocess.call(
                "dd if=/dev/zero of=image.dat bs=1M count=0 seek=16",
                shell=True)
            subprocess.call(
                "mender-artifact write rootfs-image -t %s -n test-update %s image.dat -o image.mender"
                % (image_type, file_flag),
                shell=True,
            )
            put_no_sftp("image.mender",
                        connection,
                        remote="/var/tmp/image.mender")

            env_conf = connection.run("cat /etc/fw_env.config").stdout
            env_conf_lines = env_conf.rstrip("\n\r").split("\n")
            assert len(env_conf_lines) == 2
            for i in [0, 1]:
                entry = env_conf_lines[i].split()
                connection.run(
                    "dd if=%s skip=%d bs=%d count=1 iflag=skip_bytes > /data/old_env%d"
                    % (entry[0], int(entry[1], 0), int(entry[2], 0), i))

            try:
                # Try to manually remove the canary first.
                connection.run("fw_setenv mender_saveenv_canary")
                result = connection.run("mender %s /var/tmp/image.mender" %
                                        install_flag,
                                        warn=True)
                assert (result.return_code !=
                        0), "Update succeeded when canary was not present!"
                output = connection.run(
                    "fw_printenv upgrade_available").stdout.rstrip("\n")
                # Upgrade should not have been triggered.
                assert output == "upgrade_available=0"

                # Then zero the environment, causing the libubootenv to fail
                # completely.
                for i in [0, 1]:
                    entry = env_conf_lines[i].split()
                    connection.run(
                        "dd if=/dev/zero of=%s seek=%d bs=%d count=1 oflag=seek_bytes"
                        % (entry[0], int(entry[1], 0), int(entry[2], 0)))
                result = connection.run("mender %s /var/tmp/image.mender" %
                                        install_flag,
                                        warn=True)
                assert (result.return_code !=
                        0), "Update succeeded when canary was not present!"
                # This should just fail, since we don't provide a default
                # environment in libubootenv (we used to for u-boot-fw-utils).
                result = connection.run("fw_printenv upgrade_available",
                                        warn=True)
                assert result.return_code != 0

            finally:
                # Restore environment to what it was.
                for i in [0, 1]:
                    entry = env_conf_lines[i].split()
                    connection.run(
                        "dd of=%s seek=%d bs=%d count=1 oflag=seek_bytes < /data/old_env%d"
                        % (entry[0], int(entry[1], 0), int(entry[2], 0), i))
                    connection.run("rm -f /data/old_env%d" % i)

        finally:
            # Cleanup.
            os.remove("image.mender")
            os.remove("image.dat")
 def __init__(self):
     self.scaler = MinMaxScaler(feature_range=(-1,1))
     self.helpers = Helpers()
Example #60
0
 def __init__(self):
     Helpers.__init__(self)
     self.spec_url = None
     self.srpm_url = None
     self.spec_file = None
     self.srpm_file = None