Beispiel #1
0
    AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
    OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
    POSSIBILITY OF SUCH DAMAGE.

"""
import sys
from lib.legacy_deps import DependancyCrawler

src_root = '/usr/local/'
crawler = DependancyCrawler(src_root)
crawler.crawl()
if len(sys.argv) < 2:
    print('inspect legacy function usage of module, choose one of:')
    for module_name in crawler.get_files():
        print(module_name)
    sys.exit()

chk_source = sys.argv[1]

usage = crawler.where_used(chk_source)
for src_filename in usage:
    module_name = crawler.get_dependency_by_src(src_filename)
    trace_data = crawler.trace(module_name)
    if chk_source not in trace_data:
        print('inspect : %s (%s)' %
              (src_filename, ','.join(usage[src_filename])))
Beispiel #2
0
src_root = '/usr/local/'

# create target directory if not existing
if not os.path.exists(target_directory):
    os.mkdir(target_directory)

# start crawling
crawler = DependancyCrawler(src_root)
print '[%.2f] started ' % (time.time())
crawler.crawl()
print '[%.2f] collected %d dependancies in %d files' % (
    time.time(), crawler.get_total_dependencies(), crawler.get_total_files())

# generate graphs
generated_files = list()
for filename in crawler.get_files():
    file_stats = crawler.file_info(filename)
    if file_stats['levels'] > 1:
        print '[%.2f] ... writing %s' % (time.time(), filename)
        dot_filename = ('%s/%s.dot' % (target_directory, filename)).replace(
            '//', '/')
        target_filename = dot_filename.replace('.dot', '.png')
        open(dot_filename, 'w').write(crawler.generate_dot(filename))
        os.system('/usr/local/bin/dot -Tpng %s -o %s ' %
                  (dot_filename, target_filename))
        generated_files.append(os.path.basename(target_filename))
    else:
        # not interested, item has no children.
        print '[%.2f] ... skip %s' % (time.time(), filename)

# write a simple index page for our generated files
Beispiel #3
0
    AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
    AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
    OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
    POSSIBILITY OF SUCH DAMAGE.

"""
import sys
from lib.legacy_deps import DependancyCrawler

src_root = '/usr/local/'
crawler = DependancyCrawler(src_root)
crawler.crawl()
if len(sys.argv) < 2:
    print ('inspect legacy function usage of module, choose one of:')
    for module_name in crawler.get_files():
        print (module_name)
    sys.exit()

chk_source = sys.argv[1]

usage = crawler.where_used(chk_source)
for src_filename in usage:
    module_name = crawler.get_dependency_by_src(src_filename)
    trace_data = crawler.trace(module_name)
    if chk_source not in trace_data:
        print ('inspect : %s (%s)' % (src_filename, ',' .join(usage[src_filename] ) ))
Beispiel #4
0
# create target directory if not existing
if not os.path.exists(target_directory):
    os.mkdir(target_directory)

# start crawling
crawler = DependancyCrawler(src_root)
print '[%.2f] started ' % (time.time())
crawler.crawl()
print '[%.2f] collected %d dependencies in %d files' % (time.time(),
                                                        crawler.get_total_dependencies(),
                                                        crawler.get_total_files())

# generate graphs
generated_files = list()
for filename in crawler.get_files():
    file_stats = crawler.file_info(filename)
    if file_stats['levels'] > 1:
        print '[%.2f] ... writing %s' % (time.time(), filename)
        dot_filename = ('%s/%s.dot' % (target_directory, filename)).replace('//', '/')
        target_filename = dot_filename.replace('.dot', '.png')
        open(dot_filename, 'w').write(crawler.generate_dot(filename))
        os.system('/usr/local/bin/dot -Tpng %s -o %s ' % (dot_filename, target_filename))
        generated_files.append(os.path.basename(target_filename))
    else:
        # not interested, item has no children.
        print '[%.2f] ... skip %s' % (time.time(), filename)

# write a simple index page for our generated files
open(('%s/index.html' % target_directory).replace('//', '/'), 'w').write(crawler.generate_index_html(generated_files))
print '[%.2f] done (all results in %s)' % (time.time(), target_directory)