예제 #1
0
파일: convert.py 프로젝트: Byron/bit
    def execute(self, args, remaining_args):
        if sys.stdin.isatty():
            raise AssertionError("This command reads all values from standard input - please redirect the output of a zfs command")
        # end must not have tty

        ParserType, ZType = self.input_command_map[args.from_cmd]
        parser = ParserType()

        if args.format == self.FORMAT_CSV:
            sys.stdout.write('host' + csv_sep)
            if parser.schema is None:
                parser.parse_schema(sys.stdin.readline())
            # end assure schema is dynamically parsed as needed
            sys.stdout.write(csv_sep.join(n for n,c in parser.schema) + '\n')
            # end header

            for sample in parser.parse_stream(sys.stdin):
                csv_convert(args.host, sample)
            # end for each sample
        else:
            samples = list(parser.parse_stream(sys.stdin))
            if args.format in (self.FORMAT_SQL, self.FORMAT_SQL_GRAPHITE):
                session = ZSession.new()
                session.sync(args.host, samples, ZType).commit()
            # end handle sql/graphite
            if args.format in (self.FORMAT_GRAPHITE, self.FORMAT_SQL_GRAPHITE):
                conv = GraphiteConverter()
                conv.send(time(), args.host, samples, ZType)
            # end handle sql/graphite
        # handle any other format than csv

        return self.SUCCESS
예제 #2
0
파일: filesystem.py 프로젝트: Byron/bit
    def execute(self, args, remaining_args):
        def handle_report(report):
            if not report.records:
                self.log().info('No result')
            else:
                report.serialize(report.SERIALIZE_TTY, sys.stdout.write)
            # end handle empty reports
        # end utility

        if args.mode == self.MODE_SYNC:
            if len(remaining_args) == 1:
                remaining_args.append(SnapshotSender.DEST_MODE_AUTO)
            # end auto-setup

            if len(remaining_args) != 2:
                raise ParserError("Please specify source and destination file-system zfs url, i.e. sync zfs://host/fs zfs://host/dest-fs, destination can be left out for auto-mode.\nCan also be auto, property, configured")
            # end verify arguments

            surl, durl = remaining_args
            surl = ZFSURL(surl)
            session = ZSession.new()

            try:
                if args.report:
                    handle_report(SnapshotSender.report_candidates(session.instance_by_url(surl)))
                    return self.SUCCESS
                # end handle complete report

                sfs = session.instance_by_url(surl, force_dataset=True)
                if durl == self.MULTI_SYNC_MODE:
                    senders = SnapshotSender.new_from_properties(sfs)
                else:
                    senders = [SnapshotSender.new(sfs, durl)]
            except ValueError, err:
                self.log().error(str(err))
                return self.ERROR
            # end handle invalid source

            if not senders:
                self.log().info('no filesystem configured a destination url using the zfs:receive-url property')
                return self.SUCCESS
            # end handle no result

            if args.script:
                for ss in senders:
                    ss.stream_script(sys.stdout.write)
                # end for each sender to write a script for
            else:
                # by default we generate a report
                rep = senders[0].report()
                for ss  in senders[1:]:
                    ss.report(rep)
                # end for each report to generate
                if len(rep.records) > 1:
                    agr = rep.aggregate_record()
                    rep.records.append(agr)
                # end aggregation makes sense only if there are multiple records
                handle_report(rep)
예제 #3
0
파일: list.py 프로젝트: Byron/bit
    def execute(self, args, remaining_args):
        config = self.settings_value()
        session = ZSession.new()
        zcls = args.type == ZReportGenerator.TYPE_POOL and ZPool or ZDataset
        query = session.query(zcls)
        table = zcls.__table__
        columns = table.columns.keys()
        hosts_attribute = zcls.host
        name_attribute = zcls.name
        columns_important = getattr(config.columns, args.type)

        if args.type == ZReportGenerator.TYPE_SNAPSHOT:
            query = query.filter(ZDataset.avail == None)
            columns_important = config.columns.snapshot
        elif args.type == ZReportGenerator.TYPE_FILESYSTEM:
            query = query.filter(ZDataset.avail != None).filter(ZDataset.type == ZReportGenerator.TYPE_FILESYSTEM)
            columns_important = config.columns.filesystem

        # COLUMNS FILTER
        #################
        if args.columns:
            has_user_columns = True
            if len(args.columns) == 1 and args.columns[0] in (self.COLUMN_ALL, self.COLUMN_IMPORTANT):
                if args.columns[0] == self.COLUMN_IMPORTANT:
                    args.columns = columns_important
                else:
                    has_user_columns = False
                # end handle 'all'
            # end handle presets

            if has_user_columns:
                columns = self.verify_columns(table.columns, args.columns)
                if not columns:
                    return self.ERROR
                # end early abort
            # end handle special case: all
        # end check provided columns

        # Always use the updated_at column
        columns.insert(0, 'updated_at')

        # HOSTS FILTER
        ##############
        if args.host:
            query = query.filter(hosts_attribute == args.host)
        # end

        # Name filter
        ##############
        if args.name:
            name = '%%%s%%' % args.name
            query = query.filter(name_attribute.like(name))
        # end handle name filter

        # ORDER
        #########
        # NOTE: if there is no order, order by creation ascending !
        if not args.order_by_asc and not args.order_by_desc:
            args.order_by_asc = ['host', 'creation']
        # end auto-order

        for attr, order in (('order_by_asc', 'asc'), ('order_by_desc', 'desc')):
            order_cols = getattr(args, attr)
            if not order_cols:
                continue
            # end handle order_cols
            order_cols = self.columns_by_names(table.columns, order_cols)
            if order_cols:
                query = query.order_by(*(getattr(col, order)() for col in order_cols))
        # end for each attr, order
        
        rep = Report()
        rep.columns = self.table_schema_from_colums(table.columns, columns)
        now = datetime.now()

        # FILL RECORDS
        ##############
        col_to_attr = zcls.__mapper__.get_property_by_column
        name_to_col = table.columns.__getitem__
        for inst in query:
            rec = list()

            if isinstance(inst, ZDataset) and args.leaf and not inst.is_snapshot() and list(inst.children()):
                continue
            # end skip non-leaf datasets

            for cid, name in enumerate(columns):
                if name == self.COLUMN_URL:
                    val = str(ZFSURL.new_from_dataset(inst.host, inst.name))
                else:
                    val = getattr(inst, col_to_attr(name_to_col(name)).key)
                    if isinstance(val, datetime):
                        val = now - val
                    # end handle conversions
                # end handle special case
                rec.append(val)
            # end for each colum
            rep.records.append(rec)
        # end for each row

        # AGGREGATION
        ##################
        if len(rep.records) > 1:
            agr = rep.aggregate_record()
            agr[0] = now - now
            rep.records.append(agr)

            if args.aggregate_only:
                rep.records = rep.records[-1:]
            # end remove all records but aggregate
        # end aggregate only if there is something

        # Finally, make sure updated_at becomes seen - we now have the values and no one cares about the schema
        # names anymore
        for col in rep.columns:
            if col[0] == 'updated_at':
                col[0] = 'seen'
        # end rename updated_at

        rep.serialize(Report.SERIALIZE_TTY, sys.stdout.write)
        return self.SUCCESS