Move buildroot to bsp directory.

This commit is contained in:
2016-11-16 22:05:33 +01:00
parent 317c040ea8
commit 807ab03547
7408 changed files with 0 additions and 0 deletions

View File

@@ -0,0 +1,153 @@
#!/usr/bin/env bash
# A little script I whipped up to make it easy to
# patch source trees and have sane error handling
# -Erik
#
# (c) 2002 Erik Andersen <andersen@codepoet.org>
#
# Parameters:
# - "-s", optional. Silent operation, don't print anything if there
# isn't any error.
# - the build directory, optional, default value is '.'. The place where are
# the package sources.
# - the patch directory, optional, default '../kernel-patches'. The place
# where are the scripts you want to apply.
# - other parameters are the patch name patterns, optional, default value is
# '*'. Pattern(s) describing the patch names you want to apply.
#
# The script will look recursively for patches from the patch directory. If a
# file named 'series' exists then the patches mentioned in it will be applied
# as plain patches, regardless of their file name. If no 'series' file exists,
# the script will look for file names matching pattern(s). If the name
# ends with '.tar.*', '.tbz2' or '.tgz', the file is considered as an archive
# and will be uncompressed into a directory named
# '.patches-name_of_the_archive-unpacked'. It's the turn of this directory to
# be scanned with '*' as pattern. Remember that scanning is recursive. Other
# files than series file and archives are considered as a patch.
#
# Once a patch is found, the script will try to apply it. If its name doesn't
# end with '.gz', '.bz', '.bz2', '.xz', '.zip', '.Z', '.diff*' or '.patch*',
# it will be skipped. If necessary, the patch will be uncompressed before being
# applied. The list of the patches applied is stored in '.applied_patches_list'
# file in the build directory.
silent=
if [ "$1" = "-s" ] ; then
# add option to be used by the patch tool
silent=-s
shift
fi
# Set directories from arguments, or use defaults.
builddir=${1-.}
patchdir=${2-../kernel-patches}
shift 2
patchpattern=${@-*}
# use a well defined sorting order
export LC_COLLATE=C
if [ ! -d "${builddir}" ] ; then
echo "Aborting. '${builddir}' is not a directory."
exit 1
fi
if [ ! -d "${patchdir}" ] ; then
echo "Aborting. '${patchdir}' is not a directory."
exit 1
fi
# Remove any rejects present BEFORE patching - Because if there are
# any, even if patches are well applied, at the end it will complain
# about rejects in builddir.
find ${builddir}/ '(' -name '*.rej' -o -name '.*.rej' ')' -print0 | \
xargs -0 -r rm -f
function apply_patch {
path=$1
patch=$2
if [ "$3" ]; then
type="series"; uncomp="cat"
else
case "$patch" in
*.gz)
type="gzip"; uncomp="gunzip -dc"; ;;
*.bz)
type="bzip"; uncomp="bunzip -dc"; ;;
*.bz2)
type="bzip2"; uncomp="bunzip2 -dc"; ;;
*.xz)
type="xz"; uncomp="unxz -dc"; ;;
*.zip)
type="zip"; uncomp="unzip -d"; ;;
*.Z)
type="compress"; uncomp="uncompress -c"; ;;
*.diff*)
type="diff"; uncomp="cat"; ;;
*.patch*)
type="patch"; uncomp="cat"; ;;
*)
echo "Unsupported file type for ${path}/${patch}, skipping";
return 0
;;
esac
fi
if [ -z "$silent" ] ; then
echo ""
echo "Applying $patch using ${type}: "
fi
if [ ! -e "${path}/$patch" ] ; then
echo "Error: missing patch file ${path}/$patch"
exit 1
fi
echo $patch >> ${builddir}/.applied_patches_list
${uncomp} "${path}/$patch" | patch -g0 -p1 -E -d "${builddir}" -t -N $silent
if [ $? != 0 ] ; then
echo "Patch failed! Please fix ${patch}!"
exit 1
fi
}
function scan_patchdir {
local path=$1
shift 1
patches=${@-*}
# If there is a series file, use it instead of using ls sort order
# to apply patches. Skip line starting with a dash.
if [ -e "${path}/series" ] ; then
# The format of a series file accepts a second field that is
# used to specify the number of directory components to strip
# when applying the patch, in the form -pN (N an integer >= 0)
# We assume this field to always be -p1 whether it is present
# or missing.
series_patches="`grep -Ev "^#" ${path}/series | cut -d ' ' -f1 2> /dev/null`"
for i in $series_patches; do
apply_patch "$path" "$i" series
done
else
for i in `cd $path; ls -d $patches 2> /dev/null` ; do
if [ -d "${path}/$i" ] ; then
scan_patchdir "${path}/$i"
elif echo "$i" | grep -q -E "\.tar(\..*)?$|\.tbz2?$|\.tgz$" ; then
unpackedarchivedir="$builddir/.patches-$(basename $i)-unpacked"
rm -rf "$unpackedarchivedir" 2> /dev/null
mkdir "$unpackedarchivedir"
tar -C "$unpackedarchivedir" -xaf "${path}/$i"
scan_patchdir "$unpackedarchivedir"
else
apply_patch "$path" "$i"
fi
done
fi
}
scan_patchdir "$patchdir" "$patchpattern"
# Check for rejects...
if [ "`find $builddir/ '(' -name '*.rej' -o -name '.*.rej' ')' -print`" ] ; then
echo "Aborting. Reject files found."
exit 1
fi
# Remove backup files
find $builddir/ '(' -name '*.orig' -o -name '.*.orig' ')' -exec rm -f {} \;

View File

@@ -0,0 +1,71 @@
#!/usr/bin/env bash
# This script scans $(HOST_DIR)/{bin,sbin} for all ELF files, and checks
# they have an RPATH to $(HOST_DIR)/usr/lib if they need libraries from
# there.
# Override the user's locale so we are sure we can parse the output of
# readelf(1) and file(1)
export LC_ALL=C
main() {
local pkg="${1}"
local hostdir="${2}"
local file ret
# Remove duplicate and trailing '/' for proper match
hostdir="$( sed -r -e 's:/+:/:g; s:/$::;' <<<"${hostdir}" )"
ret=0
while read file; do
elf_needs_rpath "${file}" "${hostdir}" || continue
check_elf_has_rpath "${file}" "${hostdir}" && continue
if [ ${ret} -eq 0 ]; then
ret=1
printf "***\n"
printf "*** ERROR: package %s installs executables without proper RPATH:\n" "${pkg}"
fi
printf "*** %s\n" "${file}"
done < <( find "${hostdir}"/usr/{bin,sbin} -type f -exec file {} + 2>/dev/null \
|sed -r -e '/^([^:]+):.*\<ELF\>.*\<executable\>.*/!d' \
-e 's//\1/' \
)
return ${ret}
}
elf_needs_rpath() {
local file="${1}"
local hostdir="${2}"
local lib
while read lib; do
[ -e "${hostdir}/usr/lib/${lib}" ] && return 0
done < <( readelf -d "${file}" \
|sed -r -e '/^.* \(NEEDED\) .*Shared library: \[(.+)\]$/!d;' \
-e 's//\1/;' \
)
return 1
}
check_elf_has_rpath() {
local file="${1}"
local hostdir="${2}"
local rpath dir
while read rpath; do
for dir in ${rpath//:/ }; do
# Remove duplicate and trailing '/' for proper match
dir="$( sed -r -e 's:/+:/:g; s:/$::;' <<<"${dir}" )"
[ "${dir}" = "${hostdir}/usr/lib" ] && return 0
done
done < <( readelf -d "${file}" \
|sed -r -e '/.* \(R(UN)?PATH\) +Library r(un)?path: \[(.+)\]$/!d' \
-e 's//\3/;' \
)
return 1
}
main "${@}"

View File

@@ -0,0 +1,41 @@
#!/bin/sh
SYSROOT="${1}"
# Make sure we have enough version components
HDR_VER="${2}.0.0"
HDR_M="${HDR_VER%%.*}"
HDR_V="${HDR_VER#*.}"
HDR_m="${HDR_V%%.*}"
EXEC="$(mktemp -t check-headers.XXXXXX)"
# We do not want to account for the patch-level, since headers are
# not supposed to change for different patchlevels, so we mask it out.
# This only applies to kernels >= 3.0, but those are the only one
# we actually care about; we treat all 2.6.x kernels equally.
${HOSTCC} -imacros "${SYSROOT}/usr/include/linux/version.h" \
-x c -o "${EXEC}" - <<_EOF_
#include <stdio.h>
#include <stdlib.h>
int main(int argc __attribute__((unused)),
char** argv __attribute__((unused)))
{
if((LINUX_VERSION_CODE & ~0xFF)
!= KERNEL_VERSION(${HDR_M},${HDR_m},0))
{
printf("Incorrect selection of kernel headers: ");
printf("expected %d.%d.x, got %d.%d.x\n", ${HDR_M}, ${HDR_m},
((LINUX_VERSION_CODE>>16) & 0xFF),
((LINUX_VERSION_CODE>>8) & 0xFF));
return 1;
}
return 0;
}
_EOF_
"${EXEC}"
ret=${?}
rm -f "${EXEC}"
exit ${ret}

View File

@@ -0,0 +1,76 @@
#!/bin/sh
# This script registers the toolchain of a Buildroot project into the
# Eclipse plugin. To do so, it adds a new line for the Buildroot
# toolchain into the $HOME/.buildroot-eclipse.toolchains file, which
# the Eclipse Buildroot plugin reads to discover automatically the
# available Buildroot toolchains on the system.
#
# This script should typically not be called manually. Instead, one
# should enable the BR2_ECLIPSE_REGISTER configuration option, which
# will lead Buildroot to automatically call this script with the
# appropriate arguments.
#
# Usage:
# eclipse-register-toolchain project-directory toolchain-prefix architecture
#
# project-directory is the absolute path to the Buildroot project
# output directory (which contains the host/, target/, build/,
# images/, etc. subdirectories). It should be an absolute and
# canonical path.
#
# toolchain-prefix is the prefix of the cross-compilation tools, i.e
# 'arm-linux-' if the cross-compiler executable is 'arm-linux-gcc'.
#
# architecture is the lower-cased name of the architecture targetted
# by the Buildroot project.
if test $# -ne 3; then
echo "Invalid number of arguments."
echo "Usage: $0 project-directory toolchain-prefix architecture"
exit 1
fi
project_directory=$1
toolchain_prefix=$2
architecture=$3
if test ! -d ${project_directory} ; then
echo "Non-existing project directory ${project_directory}"
exit 1
fi
if test ! -d ${project_directory}/host ; then
echo "Your project directory does not look like a Buildroot output"
exit 1
fi
if test ! -e ${project_directory}/host/usr/bin/${toolchain_prefix}gcc ; then
echo "Cannot find the cross-compiler in the project directory"
exit 1
fi
TOOLCHAIN_ECLIPSE_FILE=${HOME}/.buildroot-eclipse.toolchains
# First, we remove all lines from the ${TOOLCHAIN_ECLISPE_FILE} that
# correspond to toolchains that no longer exist.
if test -f ${TOOLCHAIN_ECLIPSE_FILE} ; then
mv ${TOOLCHAIN_ECLIPSE_FILE} ${TOOLCHAIN_ECLIPSE_FILE}.tmp
cat ${TOOLCHAIN_ECLIPSE_FILE}.tmp | while read toolchain ; do
path=$(echo ${toolchain} | cut -f1 -d ':')
# Filter lines corresponding to still existing projects
echo "Testing ${path} ..."
if ! test -d ${path} ; then
continue
fi
# .. and the current project
if test ${path} = ${project_directory} ; then
continue
fi
echo ${toolchain} >> ${TOOLCHAIN_ECLIPSE_FILE}
done
rm ${TOOLCHAIN_ECLIPSE_FILE}.tmp
fi
# Add the toolchain
echo "${project_directory}:${toolchain_prefix}:${architecture}" >> ${TOOLCHAIN_ECLIPSE_FILE}

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
# This script is used to generate a gconv-modules file that takes into
# account only the gconv modules installed by Buildroot. It receives
# on its standard input the original complete gconv-modules file from
# the toolchain, and as arguments the list of gconv modules that were
# actually installed, and writes on its standard output the new
# gconv-modules file.
# The format of gconv-modules is precisely documented in the
# file itself. It consists of two different directives:
# module FROMSET TOSET FILENAME COST
# alias ALIAS REALNAME
# and that's what this script parses and generates.
#
# There are two kinds of 'module' directives:
# - the first defines conversion of a charset to/from INTERNAL representation
# - the second defines conversion of a charset to/from another charset
# we handle each with slightly different code, since the second never has
# associated aliases.
gawk -v files="${1}" '
$1 == "alias" {
aliases[$3] = aliases[$3] " " $2;
}
$1 == "module" && $2 != "INTERNAL" && $3 == "INTERNAL" {
file2internals[$4] = file2internals[$4] " " $2;
mod2cost[$2] = $5;
}
$1 == "module" && $2 != "INTERNAL" && $3 != "INTERNAL" {
file2cset[$4] = file2cset[$4] " " $2 ":" $3;
mod2cost[$2] = $5;
}
END {
nb_files = split(files, all_files);
for(f = 1; f <= nb_files; f++) {
file = all_files[f];
printf("# Modules and aliases for: %s\n", file);
nb_mods = split(file2internals[file], mods);
for(i = 1; i <= nb_mods; i++) {
nb_aliases = split(aliases[mods[i]], mod_aliases);
for(j = 1; j <= nb_aliases; j++) {
printf("alias\t%s\t%s\n", mod_aliases[j], mods[i]);
}
printf("module\t%s\t%s\t%s\t%d\n", mods[i], "INTERNAL", file, mod2cost[mods[i]]);
printf("module\t%s\t%s\t%s\t%d\n", "INTERNAL", mods[i], file, mod2cost[mods[i]]);
printf("\n" );
}
printf("%s", nb_mods != 0 ? "\n" : "");
nb_csets = split(file2cset[file], csets);
for(i = 1; i <= nb_csets; i++) {
split(csets[i], cs, ":");
printf("module\t%s\t%s\t%s\t%d\n", cs[1], cs[2], file, mod2cost[cs[1]]);
}
printf("%s", nb_csets != 0 ? "\n\n" : "");
}
}
'

View File

@@ -0,0 +1,513 @@
## gen-manual-lists.py
##
## This script generates the following Buildroot manual appendices:
## - the package tables (one for the target, the other for host tools);
## - the deprecated items.
##
## Author(s):
## - Samuel Martin <s.martin49@gmail.com>
##
## Copyright (C) 2013 Samuel Martin
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import sys
import datetime
from argparse import ArgumentParser
try:
import kconfiglib
except ImportError:
message = """
Could not find the module 'kconfiglib' in the PYTHONPATH:
"""
message += "\n".join([" {0}".format(path) for path in sys.path])
message += """
Make sure the Kconfiglib directory is in the PYTHONPATH, then relaunch the
script.
You can get kconfiglib from:
https://github.com/ulfalizer/Kconfiglib
"""
sys.stderr.write(message)
raise
def get_symbol_subset(root, filter_func):
""" Return a generator of kconfig items.
:param root_item: Root item of the generated subset of items
:param filter_func: Filter function
"""
if hasattr(root, "get_items"):
get_items = root.get_items
elif hasattr(root, "get_top_level_items"):
get_items = root.get_top_level_items
else:
message = "The symbol does not contain any subset of symbols"
raise Exception(message)
for item in get_items():
if item.is_symbol():
if not filter_func(item):
continue
yield item
elif item.is_menu() or item.is_choice():
for i in get_symbol_subset(item, filter_func):
yield i
def get_symbol_parents(item, root=None, enable_choice=False):
""" Return the list of the item's parents. The last item of the list is
the closest parent, the first the furthest.
:param item: Item from which the parent list is generated
:param root: Root item stopping the search (not included in the
parent list)
:param enable_choice: Flag enabling choices to appear in the parent list
"""
parent = item.get_parent()
parents = []
while parent and parent != root:
if parent.is_menu():
parents.append(parent.get_title())
elif enable_choice and parent.is_choice():
parents.append(parent.get_prompts()[0])
parent = parent.get_parent()
if isinstance(root, kconfiglib.Menu) or \
(enable_choice and isinstance(root, kconfiglib.Choice)):
parents.append("") # Dummy empty parent to get a leading arrow ->
parents.reverse()
return parents
def format_asciidoc_table(root, get_label_func, filter_func=lambda x: True,
format_func=lambda x: x,
enable_choice=False, sorted=True,
item_label=None):
""" Return the asciidoc formatted table of the items and their location.
:param root: Root item of the item subset
:param get_label_func: Item's label getter function
:param filter_func: Filter function to apply on the item subset
:param format_func: Function to format a symbol and the table header
:param enable_choice: Enable choices to appear as part of the item's
location
:param sorted: Flag to alphabetically sort the table
"""
lines = []
for item in get_symbol_subset(root, filter_func):
lines.append(format_func(what="symbol", symbol=item, root=root,
get_label_func=get_label_func,
enable_choice=enable_choice))
if sorted:
lines.sort(key=lambda x: x.lower())
table = ":halign: center\n\n"
width, columns = format_func(what="layout")
table = "[width=\"{0}\",cols=\"{1}\",options=\"header\"]\n".format(width, columns)
table += "|===================================================\n"
table += format_func(what="header", header=item_label, root=root)
table += "\n" + "".join(lines) + "\n"
table += "|===================================================\n"
return table
class Buildroot:
""" Buildroot configuration object.
"""
root_config = "Config.in"
package_dirname = "package"
package_prefixes = ["BR2_PACKAGE_", "BR2_PACKAGE_HOST_"]
re_pkg_prefix = re.compile(r"^(" + "|".join(package_prefixes) + ").*")
deprecated_symbol = "BR2_DEPRECATED"
list_in = """\
//
// Automatically generated list for Buildroot manual.
//
{table}
"""
list_info = {
'target-packages': {
'filename': "package-list",
'root_menu': "Target packages",
'filter': "_is_real_package",
'format': "_format_symbol_prompt_location",
'sorted': True,
},
'host-packages': {
'filename': "host-package-list",
'root_menu': "Host utilities",
'filter': "_is_real_package",
'format': "_format_symbol_prompt",
'sorted': True,
},
'virtual-packages': {
'filename': "virtual-package-list",
'root_menu': "Target packages",
'filter': "_is_virtual_package",
'format': "_format_symbol_virtual",
'sorted': True,
},
'deprecated': {
'filename': "deprecated-list",
'root_menu': None,
'filter': "_is_deprecated_feature",
'format': "_format_symbol_prompt_location",
'sorted': False,
},
}
def __init__(self):
self.base_dir = os.environ.get("TOPDIR")
self.output_dir = os.environ.get("O")
self.package_dir = os.path.join(self.base_dir, self.package_dirname)
self.config = kconfiglib.Config(os.path.join(self.base_dir,
self.root_config),
self.base_dir)
self._deprecated = self.config.get_symbol(self.deprecated_symbol)
self.gen_date = datetime.datetime.utcnow()
self.br_version_full = os.environ.get("BR2_VERSION_FULL")
if self.br_version_full and self.br_version_full.endswith("-git"):
self.br_version_full = self.br_version_full[:-4]
if not self.br_version_full:
self.br_version_full = "undefined"
def _get_package_symbols(self, package_name):
""" Return a tuple containing the target and host package symbol.
"""
symbols = re.sub("[-+.]", "_", package_name)
symbols = symbols.upper()
symbols = tuple([prefix + symbols for prefix in self.package_prefixes])
return symbols
def _is_deprecated(self, symbol):
""" Return True if the symbol is marked as deprecated, otherwise False.
"""
# This also catches BR2_DEPRECATED_SINCE_xxxx_xx
return bool([ symbol for x in symbol.get_referenced_symbols()
if x.get_name().startswith(self._deprecated.get_name()) ])
def _is_package(self, symbol, type='real'):
""" Return True if the symbol is a package or a host package, otherwise
False.
:param symbol: The symbol to check
:param type: Limit to 'real' or 'virtual' types of packages,
with 'real' being the default.
Note: only 'real' is (implictly) handled for now
"""
if not symbol.is_symbol():
return False
if type == 'real' and not symbol.get_prompts():
return False
if type == 'virtual' and symbol.get_prompts():
return False
if not self.re_pkg_prefix.match(symbol.get_name()):
return False
pkg_name = self._get_pkg_name(symbol)
pattern = "^(HOST_)?" + pkg_name + "$"
pattern = re.sub("_", ".", pattern)
pattern = re.compile(pattern, re.IGNORECASE)
# Here, we cannot just check for the location of the Config.in because
# of the "virtual" package.
#
# So, to check that a symbol is a package (not a package option or
# anything else), we check for the existence of the package *.mk file.
#
# By the way, to actually check for a package, we should grep all *.mk
# files for the following regex:
# "\$\(eval \$\((host-)?(generic|autotools|cmake)-package\)\)"
#
# Implementation details:
#
# * The package list is generated from the *.mk file existence, the
# first time this function is called. Despite the memory consumption,
# this list is stored because the execution time of this script is
# noticeably shorter than rescanning the package sub-tree for each
# symbol.
if not hasattr(self, "_package_list"):
pkg_list = []
for _, _, files in os.walk(self.package_dir):
for file_ in (f for f in files if f.endswith(".mk")):
pkg_list.append(re.sub(r"(.*?)\.mk", r"\1", file_))
setattr(self, "_package_list", pkg_list)
for pkg in getattr(self, "_package_list"):
if type == 'real':
if pattern.match(pkg) and not self._exists_virt_symbol(pkg):
return True
if type == 'virtual':
if pattern.match('has_' + pkg):
return True
return False
def _is_real_package(self, symbol):
return self._is_package(symbol, 'real')
def _is_virtual_package(self, symbol):
return self._is_package(symbol, 'virtual')
def _is_deprecated_feature(self, symbol):
return symbol.get_prompts() and self._is_deprecated(symbol)
def _exists_virt_symbol(self, pkg_name):
""" Return True if a symbol exists that defines the package as
a virtual package, False otherwise
:param pkg_name: The name of the package, for which to check if
a symbol exists defining it as a virtual package
"""
virt_pattern = "BR2_PACKAGE_HAS_" + pkg_name + "$"
virt_pattern = re.sub("_", ".", virt_pattern)
virt_pattern = re.compile(virt_pattern, re.IGNORECASE)
for sym in self.config:
if virt_pattern.match(sym.get_name()):
return True
return False
def _get_pkg_name(self, symbol):
""" Return the package name of the specified symbol.
:param symbol: The symbol to get the package name of
"""
return re.sub("BR2_PACKAGE_(HOST_)?(.*)", r"\2", symbol.get_name())
def _get_symbol_label(self, symbol, mark_deprecated=True):
""" Return the label (a.k.a. prompt text) of the symbol.
:param symbol: The symbol
:param mark_deprecated: Append a 'deprecated' to the label
"""
label = symbol.get_prompts()[0]
if self._is_deprecated(symbol) and mark_deprecated:
label += " *(deprecated)*"
return label
def _format_symbol_prompt(self, what=None, symbol=None, root=None,
enable_choice=False, header=None,
get_label_func=lambda x: x):
if what == "layout":
return ( "30%", "^1" )
if what == "header":
return "| {0:<40}\n".format(header)
if what == "symbol":
return "| {0:<40}\n".format(get_label_func(symbol))
message = "Invalid argument 'what': '%s'\n" % str(what)
message += "Allowed values are: 'layout', 'header' and 'symbol'"
raise Exception(message)
def _format_symbol_prompt_location(self, what=None, symbol=None, root=None,
enable_choice=False, header=None,
get_label_func=lambda x: x):
if what == "layout":
return ( "100%", "^1,4" )
if what == "header":
if hasattr(root, "get_title"):
loc_label = get_symbol_parents(root, None, enable_choice=enable_choice)
loc_label += [root.get_title(), "..."]
else:
loc_label = ["Location"]
return "| {0:<40} <| {1}\n".format(header, " -> ".join(loc_label))
if what == "symbol":
parents = get_symbol_parents(symbol, root, enable_choice)
return "| {0:<40} <| {1}\n".format(get_label_func(symbol),
" -> ".join(parents))
message = "Invalid argument 'what': '%s'\n" % str(what)
message += "Allowed values are: 'layout', 'header' and 'symbol'"
raise Exception(message)
def _format_symbol_virtual(self, what=None, symbol=None, root=None,
enable_choice=False, header=None,
get_label_func=lambda x: "?"):
def _symbol_is_legacy(symbol):
selects = [ s.get_name() for s in symbol.get_selected_symbols() ]
return ("BR2_LEGACY" in selects)
def _get_parent_package(sym):
if self._is_real_package(sym):
return None
# Trim the symbol name from its last component (separated with
# underscores), until we either find a symbol which is a real
# package, or until we have no component (i.e. just 'BR2')
name = sym.get_name()
while name != "BR2":
name = name.rsplit("_", 1)[0]
s = self.config.get_symbol(name)
if s is None:
continue
if self._is_real_package(s):
return s
return None
def _get_providers(symbol):
providers = list()
for sym in self.config:
if not sym.is_symbol():
continue
if _symbol_is_legacy(sym):
continue
selects = sym.get_selected_symbols()
if not selects:
continue
for s in selects:
if s == symbol:
if sym.get_prompts():
l = self._get_symbol_label(sym,False)
parent_pkg = _get_parent_package(sym)
if parent_pkg is not None:
l = self._get_symbol_label(parent_pkg, False) \
+ " (w/ " + l + ")"
providers.append(l)
else:
providers.extend(_get_providers(sym))
return providers
if what == "layout":
return ( "100%", "^1,4,4" )
if what == "header":
return "| {0:<20} <| {1:<32} <| Providers\n".format("Virtual packages", "Symbols")
if what == "symbol":
pkg = re.sub(r"^BR2_PACKAGE_HAS_(.+)$", r"\1", symbol.get_name())
providers = _get_providers(symbol)
return "| {0:<20} <| {1:<32} <| {2}\n".format(pkg.lower(),
'+' + symbol.get_name() + '+',
", ".join(providers))
message = "Invalid argument 'what': '%s'\n" % str(what)
message += "Allowed values are: 'layout', 'header' and 'symbol'"
raise Exception(message)
def print_list(self, list_type, enable_choice=True, enable_deprecated=True,
dry_run=False, output=None):
""" Print the requested list. If not dry run, then the list is
automatically written in its own file.
:param list_type: The list type to be generated
:param enable_choice: Flag enabling choices to appear in the list
:param enable_deprecated: Flag enabling deprecated items to appear in
the package lists
:param dry_run: Dry run (print the list in stdout instead of
writing the list file
"""
def _get_menu(title):
""" Return the first symbol menu matching the given title.
"""
menus = self.config.get_menus()
menu = [m for m in menus if m.get_title().lower() == title.lower()]
if not menu:
message = "No such menu: '{0}'".format(title)
raise Exception(message)
return menu[0]
list_config = self.list_info[list_type]
root_title = list_config.get('root_menu')
if root_title:
root_item = _get_menu(root_title)
else:
root_item = self.config
filter_ = getattr(self, list_config.get('filter'))
filter_func = lambda x: filter_(x)
format_func = getattr(self, list_config.get('format'))
if not enable_deprecated and list_type != "deprecated":
filter_func = lambda x: filter_(x) and not self._is_deprecated(x)
mark_depr = list_type != "deprecated"
get_label = lambda x: self._get_symbol_label(x, mark_depr)
item_label = "Features" if list_type == "deprecated" else "Packages"
table = format_asciidoc_table(root_item, get_label,
filter_func=filter_func,
format_func=format_func,
enable_choice=enable_choice,
sorted=list_config.get('sorted'),
item_label=item_label)
content = self.list_in.format(table=table)
if dry_run:
print(content)
return
if not output:
output_dir = self.output_dir
if not output_dir:
print("Warning: Undefined output directory.")
print("\tUse source directory as output location.")
output_dir = self.base_dir
output = os.path.join(output_dir,
list_config.get('filename') + ".txt")
if not os.path.exists(os.path.dirname(output)):
os.makedirs(os.path.dirname(output))
print("Writing the {0} list in:\n\t{1}".format(list_type, output))
with open(output, 'w') as fout:
fout.write(content)
if __name__ == '__main__':
list_types = ['target-packages', 'host-packages', 'virtual-packages', 'deprecated']
parser = ArgumentParser()
parser.add_argument("list_type", nargs="?", choices=list_types,
help="""\
Generate the given list (generate all lists if unspecified)""")
parser.add_argument("-n", "--dry-run", dest="dry_run", action='store_true',
help="Output the generated list to stdout")
parser.add_argument("--output-target", dest="output_target",
help="Output target package file")
parser.add_argument("--output-host", dest="output_host",
help="Output host package file")
parser.add_argument("--output-virtual", dest="output_virtual",
help="Output virtual package file")
parser.add_argument("--output-deprecated", dest="output_deprecated",
help="Output deprecated file")
args = parser.parse_args()
lists = [args.list_type] if args.list_type else list_types
buildroot = Buildroot()
for list_name in lists:
output = getattr(args, "output_" + list_name.split("-", 1)[0])
buildroot.print_list(list_name, dry_run=args.dry_run, output=output)

View File

@@ -0,0 +1,306 @@
#!/usr/bin/env python
# Copyright (C) 2011 by Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
# Copyright (C) 2013 by Yann E. MORIN <yann.morin.1998@free.fr>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# This script generates graphs of packages build time, from the timing
# data generated by Buildroot in the $(O)/build-time.log file.
#
# Example usage:
#
# cat $(O)/build-time.log | ./support/scripts/graph-build-time --type=histogram --output=foobar.pdf
#
# Three graph types are available :
#
# * histogram, which creates an histogram of the build time for each
# package, decomposed by each step (extract, patch, configure,
# etc.). The order in which the packages are shown is
# configurable: by package name, by build order, or by duration
# order. See the --order option.
#
# * pie-packages, which creates a pie chart of the build time of
# each package (without decomposition in steps). Packages that
# contributed to less than 1% of the overall build time are all
# grouped together in an "Other" entry.
#
# * pie-steps, which creates a pie chart of the time spent globally
# on each step (extract, patch, configure, etc...)
#
# The default is to generate an histogram ordered by package name.
#
# Requirements:
#
# * matplotlib (python-matplotlib on Debian/Ubuntu systems)
# * numpy (python-numpy on Debian/Ubuntu systems)
# * argparse (by default in Python 2.7, requires python-argparse if
# Python 2.6 is used)
import sys
try:
import matplotlib as mpl
import numpy
except ImportError:
sys.stderr.write("You need python-matplotlib and python-numpy to generate build graphs\n")
exit(1)
# Use the Agg backend (which produces a PNG output, see
# http://matplotlib.org/faq/usage_faq.html#what-is-a-backend),
# otherwise an incorrect backend is used on some host machines).
# Note: matplotlib.use() must be called *before* matplotlib.pyplot.
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import csv
import argparse
steps = [ 'extract', 'patch', 'configure', 'build',
'install-target', 'install-staging', 'install-images',
'install-host']
default_colors = ['#e60004', '#009836', '#2e1d86', '#ffed00',
'#0068b5', '#f28e00', '#940084', '#97c000']
alternate_colors = ['#00e0e0', '#3f7f7f', '#ff0000', '#00c000',
'#0080ff', '#c000ff', '#00eeee', '#e0e000']
class Package:
def __init__(self, name):
self.name = name
self.steps_duration = {}
self.steps_start = {}
self.steps_end = {}
def add_step(self, step, state, time):
if state == "start":
self.steps_start[step] = time
else:
self.steps_end[step] = time
if step in self.steps_start and step in self.steps_end:
self.steps_duration[step] = self.steps_end[step] - self.steps_start[step]
def get_duration(self, step=None):
if step is None:
duration = 0
for step in list(self.steps_duration.keys()):
duration += self.steps_duration[step]
return duration
if step in self.steps_duration:
return self.steps_duration[step]
return 0
# Generate an histogram of the time spent in each step of each
# package.
def pkg_histogram(data, output, order="build"):
n_pkgs = len(data)
ind = numpy.arange(n_pkgs)
if order == "duration":
data = sorted(data, key=lambda p: p.get_duration(), reverse=True)
elif order == "name":
data = sorted(data, key=lambda p: p.name, reverse=False)
# Prepare the vals array, containing one entry for each step
vals = []
for step in steps:
val = []
for p in data:
val.append(p.get_duration(step))
vals.append(val)
bottom = [0] * n_pkgs
legenditems = []
plt.figure()
# Draw the bars, step by step
for i in range(0, len(vals)):
b = plt.bar(ind+0.1, vals[i], width=0.8, color=colors[i], bottom=bottom, linewidth=0.25)
legenditems.append(b[0])
bottom = [ bottom[j] + vals[i][j] for j in range(0, len(vals[i])) ]
# Draw the package names
plt.xticks(ind + .6, [ p.name for p in data ], rotation=-60, rotation_mode="anchor", fontsize=8, ha='left')
# Adjust size of graph depending on the number of packages
# Ensure a minimal size twice as the default
# Magic Numbers do Magic Layout!
ratio = max(((n_pkgs + 10) / 48, 2))
borders = 0.1 / ratio
sz = plt.gcf().get_figwidth()
plt.gcf().set_figwidth(sz * ratio)
# Adjust space at borders, add more space for the
# package names at the bottom
plt.gcf().subplots_adjust(bottom=0.2, left=borders, right=1-borders)
# Remove ticks in the graph for each package
axes = plt.gcf().gca()
for line in axes.get_xticklines():
line.set_markersize(0)
axes.set_ylabel('Time (seconds)')
# Reduce size of legend text
leg_prop = fm.FontProperties(size=6)
# Draw legend
plt.legend(legenditems, steps, prop=leg_prop)
if order == "name":
plt.title('Build time of packages\n')
elif order == "build":
plt.title('Build time of packages, by build order\n')
elif order == "duration":
plt.title('Build time of packages, by duration order\n')
# Save graph
plt.savefig(output)
# Generate a pie chart with the time spent building each package.
def pkg_pie_time_per_package(data, output):
# Compute total build duration
total = 0
for p in data:
total += p.get_duration()
# Build the list of labels and values, and filter the packages
# that account for less than 1% of the build time.
labels = []
values = []
other_value = 0
for p in data:
if p.get_duration() < (total * 0.01):
other_value += p.get_duration()
else:
labels.append(p.name)
values.append(p.get_duration())
labels.append('Other')
values.append(other_value)
plt.figure()
# Draw pie graph
patches, texts, autotexts = plt.pie(values, labels=labels,
autopct='%1.1f%%', shadow=True,
colors=colors)
# Reduce text size
proptease = fm.FontProperties()
proptease.set_size('xx-small')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.title('Build time per package')
plt.savefig(output)
# Generate a pie chart with a portion for the overall time spent in
# each step for all packages.
def pkg_pie_time_per_step(data, output):
steps_values = []
for step in steps:
val = 0
for p in data:
val += p.get_duration(step)
steps_values.append(val)
plt.figure()
# Draw pie graph
patches, texts, autotexts = plt.pie(steps_values, labels=steps,
autopct='%1.1f%%', shadow=True,
colors=colors)
# Reduce text size
proptease = fm.FontProperties()
proptease.set_size('xx-small')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.title('Build time per step')
plt.savefig(output)
# Parses the csv file passed on standard input and returns a list of
# Package objects, filed with the duration of each step and the total
# duration of the package.
def read_data(input_file):
if input_file is None:
input_file = sys.stdin
else:
input_file = open(input_file)
reader = csv.reader(input_file, delimiter=':')
pkgs = []
# Auxilliary function to find a package by name in the list.
def getpkg(name):
for p in pkgs:
if p.name == name:
return p
return None
for row in reader:
time = int(row[0].strip())
state = row[1].strip()
step = row[2].strip()
pkg = row[3].strip()
p = getpkg(pkg)
if p is None:
p = Package(pkg)
pkgs.append(p)
p.add_step(step, state, time)
return pkgs
parser = argparse.ArgumentParser(description='Draw build time graphs')
parser.add_argument("--type", '-t', metavar="GRAPH_TYPE",
help="Type of graph (histogram, pie-packages, pie-steps)")
parser.add_argument("--order", '-O', metavar="GRAPH_ORDER",
help="Ordering of packages: build or duration (for histogram only)")
parser.add_argument("--alternate-colors", '-c', action="store_true",
help="Use alternate colour-scheme")
parser.add_argument("--input", '-i', metavar="OUTPUT",
help="Input file (usually $(O)/build/build-time.log)")
parser.add_argument("--output", '-o', metavar="OUTPUT", required=True,
help="Output file (.pdf or .png extension)")
args = parser.parse_args()
d = read_data(args.input)
if args.alternate_colors:
colors = alternate_colors
else:
colors = default_colors
if args.type == "histogram" or args.type is None:
if args.order == "build" or args.order == "duration" or args.order == "name":
pkg_histogram(d, args.output, args.order)
elif args.order is None:
pkg_histogram(d, args.output, "name")
else:
sys.stderr.write("Unknown ordering: %s\n" % args.order)
exit(1)
elif args.type == "pie-packages":
pkg_pie_time_per_package(d, args.output)
elif args.type == "pie-steps":
pkg_pie_time_per_step(d, args.output)
else:
sys.stderr.write("Unknown type: %s\n" % args.type)
exit(1)

View File

@@ -0,0 +1,430 @@
#!/usr/bin/python
# Usage (the graphviz package must be installed in your distribution)
# ./support/scripts/graph-depends [-p package-name] > test.dot
# dot -Tpdf test.dot -o test.pdf
#
# With no arguments, graph-depends will draw a complete graph of
# dependencies for the current configuration.
# If '-p <package-name>' is specified, graph-depends will draw a graph
# of dependencies for the given package name.
# If '-d <depth>' is specified, graph-depends will limit the depth of
# the dependency graph to 'depth' levels.
#
# Limitations
#
# * Some packages have dependencies that depend on the Buildroot
# configuration. For example, many packages have a dependency on
# openssl if openssl has been enabled. This tool will graph the
# dependencies as they are with the current Buildroot
# configuration.
#
# Copyright (C) 2010-2013 Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
import sys
import subprocess
import argparse
from fnmatch import fnmatch
# Modes of operation:
MODE_FULL = 1 # draw full dependency graph for all selected packages
MODE_PKG = 2 # draw dependency graph for a given package
mode = 0
# Limit drawing the dependency graph to this depth. 0 means 'no limit'.
max_depth = 0
# Whether to draw the transitive dependencies
transitive = True
parser = argparse.ArgumentParser(description="Graph packages dependencies")
parser.add_argument("--check-only", "-C", dest="check_only", action="store_true", default=False,
help="Only do the dependency checks (circular deps...)")
parser.add_argument("--outfile", "-o", metavar="OUT_FILE", dest="outfile",
help="File in which to generate the dot representation")
parser.add_argument("--package", '-p', metavar="PACKAGE",
help="Graph the dependencies of PACKAGE")
parser.add_argument("--depth", '-d', metavar="DEPTH", dest="depth", type=int, default=0,
help="Limit the dependency graph to DEPTH levels; 0 means no limit.")
parser.add_argument("--stop-on", "-s", metavar="PACKAGE", dest="stop_list", action="append",
help="Do not graph past this package (can be given multiple times)." \
+ " Can be a package name or a glob, " \
+ " 'virtual' to stop on virtual packages, or " \
+ "'host' to stop on host packages.")
parser.add_argument("--exclude", "-x", metavar="PACKAGE", dest="exclude_list", action="append",
help="Like --stop-on, but do not add PACKAGE to the graph.")
parser.add_argument("--colours", "-c", metavar="COLOR_LIST", dest="colours",
default="lightblue,grey,gainsboro",
help="Comma-separated list of the three colours to use" \
+ " to draw the top-level package, the target" \
+ " packages, and the host packages, in this order." \
+ " Defaults to: 'lightblue,grey,gainsboro'")
parser.add_argument("--transitive", dest="transitive", action='store_true',
default=False)
parser.add_argument("--no-transitive", dest="transitive", action='store_false',
help="Draw (do not draw) transitive dependencies")
args = parser.parse_args()
check_only = args.check_only
if args.outfile is None:
outfile = sys.stdout
else:
if check_only:
sys.stderr.write("don't specify outfile and check-only at the same time\n")
sys.exit(1)
outfile = open(args.outfile, "wb")
if args.package is None:
mode = MODE_FULL
else:
mode = MODE_PKG
rootpkg = args.package
max_depth = args.depth
if args.stop_list is None:
stop_list = []
else:
stop_list = args.stop_list
if args.exclude_list is None:
exclude_list = []
else:
exclude_list = args.exclude_list
transitive = args.transitive
# Get the colours: we need exactly three colours,
# so no need not split more than 4
# We'll let 'dot' validate the colours...
colours = args.colours.split(',',4)
if len(colours) != 3:
sys.stderr.write("Error: incorrect colour list '%s'\n" % args.colours)
sys.exit(1)
root_colour = colours[0]
target_colour = colours[1]
host_colour = colours[2]
allpkgs = []
# Execute the "make <pkg>-show-version" command to get the version of a given
# list of packages, and return the version formatted as a Python dictionary.
def get_version(pkgs):
sys.stderr.write("Getting version for %s\n" % pkgs)
cmd = ["make", "-s", "--no-print-directory" ]
for pkg in pkgs:
cmd.append("%s-show-version" % pkg)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
output = p.communicate()[0]
if p.returncode != 0:
sys.stderr.write("Error getting version %s\n" % pkgs)
sys.exit(1)
output = output.split("\n")
if len(output) != len(pkgs) + 1:
sys.stderr.write("Error getting version\n")
sys.exit(1)
version = {}
for i in range(0, len(pkgs)):
pkg = pkgs[i]
version[pkg] = output[i]
return version
# Execute the "make show-targets" command to get the list of the main
# Buildroot PACKAGES and return it formatted as a Python list. This
# list is used as the starting point for full dependency graphs
def get_targets():
sys.stderr.write("Getting targets\n")
cmd = ["make", "-s", "--no-print-directory", "show-targets"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
output = p.communicate()[0].strip()
if p.returncode != 0:
return None
if output == '':
return []
return output.split(' ')
# Execute the "make <pkg>-show-depends" command to get the list of
# dependencies of a given list of packages, and return the list of
# dependencies formatted as a Python dictionary.
def get_depends(pkgs):
sys.stderr.write("Getting dependencies for %s\n" % pkgs)
cmd = ["make", "-s", "--no-print-directory" ]
for pkg in pkgs:
cmd.append("%s-show-depends" % pkg)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
output = p.communicate()[0]
if p.returncode != 0:
sys.stderr.write("Error getting dependencies %s\n" % pkgs)
sys.exit(1)
output = output.split("\n")
if len(output) != len(pkgs) + 1:
sys.stderr.write("Error getting dependencies\n")
sys.exit(1)
deps = {}
for i in range(0, len(pkgs)):
pkg = pkgs[i]
pkg_deps = output[i].split(" ")
if pkg_deps == ['']:
deps[pkg] = []
else:
deps[pkg] = pkg_deps
return deps
# Recursive function that builds the tree of dependencies for a given
# list of packages. The dependencies are built in a list called
# 'dependencies', which contains tuples of the form (pkg1 ->
# pkg2_on_which_pkg1_depends, pkg3 -> pkg4_on_which_pkg3_depends) and
# the function finally returns this list.
def get_all_depends(pkgs):
dependencies = []
# Filter the packages for which we already have the dependencies
filtered_pkgs = []
for pkg in pkgs:
if pkg in allpkgs:
continue
filtered_pkgs.append(pkg)
allpkgs.append(pkg)
if len(filtered_pkgs) == 0:
return []
depends = get_depends(filtered_pkgs)
deps = set()
for pkg in filtered_pkgs:
pkg_deps = depends[pkg]
# This package has no dependency.
if pkg_deps == []:
continue
# Add dependencies to the list of dependencies
for dep in pkg_deps:
dependencies.append((pkg, dep))
deps.add(dep)
if len(deps) != 0:
newdeps = get_all_depends(deps)
if newdeps is not None:
dependencies += newdeps
return dependencies
# The Graphviz "dot" utility doesn't like dashes in node names. So for
# node names, we strip all dashes.
def pkg_node_name(pkg):
return pkg.replace("-","")
TARGET_EXCEPTIONS = [
"target-finalize",
"target-post-image",
]
# In full mode, start with the result of get_targets() to get the main
# targets and then use get_all_depends() for all targets
if mode == MODE_FULL:
targets = get_targets()
dependencies = []
allpkgs.append('all')
filtered_targets = []
for tg in targets:
# Skip uninteresting targets
if tg in TARGET_EXCEPTIONS:
continue
dependencies.append(('all', tg))
filtered_targets.append(tg)
deps = get_all_depends(filtered_targets)
if deps is not None:
dependencies += deps
rootpkg = 'all'
# In pkg mode, start directly with get_all_depends() on the requested
# package
elif mode == MODE_PKG:
dependencies = get_all_depends([rootpkg])
# Make the dependencies a dictionnary { 'pkg':[dep1, dep2, ...] }
dict_deps = {}
for dep in dependencies:
if dep[0] not in dict_deps:
dict_deps[dep[0]] = []
dict_deps[dep[0]].append(dep[1])
# Basic cache for the results of the is_dep() function, in order to
# optimize the execution time. The cache is a dict of dict of boolean
# values. The key to the primary dict is "pkg", and the key of the
# sub-dicts is "pkg2".
is_dep_cache = {}
def is_dep_cache_insert(pkg, pkg2, val):
try:
is_dep_cache[pkg].update({pkg2: val})
except KeyError:
is_dep_cache[pkg] = {pkg2: val}
# Retrieves from the cache whether pkg2 is a transitive dependency
# of pkg.
# Note: raises a KeyError exception if the dependency is not known.
def is_dep_cache_lookup(pkg, pkg2):
return is_dep_cache[pkg][pkg2]
# This function return True if pkg is a dependency (direct or
# transitive) of pkg2, dependencies being listed in the deps
# dictionary. Returns False otherwise.
# This is the un-cached version.
def is_dep_uncached(pkg,pkg2,deps):
try:
for p in deps[pkg2]:
if pkg == p:
return True
if is_dep(pkg,p,deps):
return True
except KeyError:
pass
return False
# See is_dep_uncached() above; this is the cached version.
def is_dep(pkg,pkg2,deps):
try:
return is_dep_cache_lookup(pkg, pkg2)
except KeyError:
val = is_dep_uncached(pkg, pkg2, deps)
is_dep_cache_insert(pkg, pkg2, val)
return val
# This function eliminates transitive dependencies; for example, given
# these dependency chain: A->{B,C} and B->{C}, the A->{C} dependency is
# already covered by B->{C}, so C is a transitive dependency of A, via B.
# The functions does:
# - for each dependency d[i] of the package pkg
# - if d[i] is a dependency of any of the other dependencies d[j]
# - do not keep d[i]
# - otherwise keep d[i]
def remove_transitive_deps(pkg,deps):
d = deps[pkg]
new_d = []
for i in range(len(d)):
keep_me = True
for j in range(len(d)):
if j==i:
continue
if is_dep(d[i],d[j],deps):
keep_me = False
if keep_me:
new_d.append(d[i])
return new_d
# This function removes the dependency on some 'mandatory' package, like the
# 'toolchain' package, or the 'skeleton' package
def remove_mandatory_deps(pkg,deps):
return [p for p in deps[pkg] if p not in ['toolchain', 'skeleton']]
# This function will check that there is no loop in the dependency chain
# As a side effect, it builds up the dependency cache.
def check_circular_deps(deps):
def recurse(pkg):
if not pkg in list(deps.keys()):
return
if pkg in not_loop:
return
not_loop.append(pkg)
chain.append(pkg)
for p in deps[pkg]:
if p in chain:
sys.stderr.write("\nRecursion detected for : %s\n" % (p))
while True:
_p = chain.pop()
sys.stderr.write("which is a dependency of: %s\n" % (_p))
if p == _p:
sys.exit(1)
recurse(p)
chain.pop()
not_loop = []
chain = []
for pkg in list(deps.keys()):
recurse(pkg)
# This functions trims down the dependency list of all packages.
# It applies in sequence all the dependency-elimination methods.
def remove_extra_deps(deps):
for pkg in list(deps.keys()):
if not pkg == 'all':
deps[pkg] = remove_mandatory_deps(pkg,deps)
for pkg in list(deps.keys()):
if not transitive or pkg == 'all':
deps[pkg] = remove_transitive_deps(pkg,deps)
return deps
check_circular_deps(dict_deps)
if check_only:
sys.exit(0)
dict_deps = remove_extra_deps(dict_deps)
dict_version = get_version([pkg for pkg in allpkgs
if pkg != "all" and not pkg.startswith("root")])
# Print the attributes of a node: label and fill-color
def print_attrs(pkg):
name = pkg_node_name(pkg)
if pkg == 'all':
label = 'ALL'
else:
label = pkg
if pkg == 'all' or (mode == MODE_PKG and pkg == rootpkg):
color = root_colour
else:
if pkg.startswith('host') \
or pkg.startswith('toolchain') \
or pkg.startswith('rootfs'):
color = host_colour
else:
color = target_colour
version = dict_version.get(pkg)
if version == "virtual":
outfile.write("%s [label = <<I>%s</I>>]\n" % (name, label))
else:
outfile.write("%s [label = \"%s\"]\n" % (name, label))
outfile.write("%s [color=%s,style=filled]\n" % (name, color))
# Print the dependency graph of a package
def print_pkg_deps(depth, pkg):
if pkg in done_deps:
return
done_deps.append(pkg)
print_attrs(pkg)
if pkg not in dict_deps:
return
for p in stop_list:
if fnmatch(pkg, p):
return
if dict_version.get(pkg) == "virtual" and "virtual" in stop_list:
return
if pkg.startswith("host-") and "host" in stop_list:
return
if max_depth == 0 or depth < max_depth:
for d in dict_deps[pkg]:
if dict_version.get(d) == "virtual" \
and "virtual" in exclude_list:
continue
if d.startswith("host-") \
and "host" in exclude_list:
continue
add = True
for p in exclude_list:
if fnmatch(d,p):
add = False
break
if add:
outfile.write("%s -> %s\n" % (pkg_node_name(pkg), pkg_node_name(d)))
print_pkg_deps(depth+1, d)
# Start printing the graph data
outfile.write("digraph G {\n")
done_deps = []
print_pkg_deps(0, rootpkg)
outfile.write("}\n")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
#!/bin/sh
# Generates a small Makefile used in the root of the output
# directory, to allow make to be started from there.
# The Makefile also allow for more convenient build of external modules
# Usage
# $1 - Kernel src directory
# $2 - Output directory
test ! -r $2/Makefile -o -O $2/Makefile || exit 0
# Only overwrite automatically generated Makefiles
# (so we do not overwrite buildroot Makefile)
if test -e $2/Makefile && ! grep -q Automatically $2/Makefile
then
exit 0
fi
echo " GEN $2/Makefile"
cat << EOF > $2/Makefile
# Automatically generated by $0: don't edit
lastword = \$(word \$(words \$(1)),\$(1))
makedir := \$(dir \$(call lastword,\$(MAKEFILE_LIST)))
MAKEARGS := -C $1
MAKEARGS += O=\$(if \$(patsubst /%,,\$(makedir)),\$(CURDIR)/)\$(patsubst %/,%,\$(makedir))
MAKEFLAGS += --no-print-directory
.PHONY: _all \$(MAKECMDGOALS)
all := \$(filter-out Makefile,\$(MAKECMDGOALS))
_all:
umask 0022 && \$(MAKE) \$(MAKEARGS) \$(all)
Makefile:;
\$(all): _all
@:
%/: _all
@:
EOF

View File

@@ -0,0 +1,433 @@
#!/usr/bin/env bash
set -e
myname="${0##*/}"
#----------------------------------------------------------------------------
# Configurable items
MIN_UID=1000
MAX_UID=1999
MIN_GID=1000
MAX_GID=1999
# No more is configurable below this point
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
error() {
local fmt="${1}"
shift
printf "%s: " "${myname}" >&2
printf "${fmt}" "${@}" >&2
}
fail() {
error "$@"
exit 1
}
#----------------------------------------------------------------------------
if [ ${#} -ne 2 ]; then
fail "usage: %s USERS_TABLE TARGET_DIR\n"
fi
USERS_TABLE="${1}"
TARGET_DIR="${2}"
shift 2
PASSWD="${TARGET_DIR}/etc/passwd"
SHADOW="${TARGET_DIR}/etc/shadow"
GROUP="${TARGET_DIR}/etc/group"
# /etc/gshadow is not part of the standard skeleton, so not everybody
# will have it, but some may hav it, and its content must be in sync
# with /etc/group, so any use of gshadow must be conditional.
GSHADOW="${TARGET_DIR}/etc/gshadow"
# We can't simply source ${BR2_CONFIG} as it may contains constructs
# such as:
# BR2_DEFCONFIG="$(CONFIG_DIR)/defconfig"
# which when sourced from a shell script will eventually try to execute
# a command name 'CONFIG_DIR', which is plain wrong for virtually every
# systems out there.
# So, we have to scan that file instead. Sigh... :-(
PASSWD_METHOD="$( sed -r -e '/^BR2_TARGET_GENERIC_PASSWD_METHOD="(.*)"$/!d;' \
-e 's//\1/;' \
"${BR2_CONFIG}" \
)"
#----------------------------------------------------------------------------
get_uid() {
local username="${1}"
awk -F: -v username="${username}" \
'$1 == username { printf( "%d\n", $3 ); }' "${PASSWD}"
}
#----------------------------------------------------------------------------
get_ugid() {
local username="${1}"
awk -F: -v username="${username}" \
'$1 == username { printf( "%d\n", $4 ); }' "${PASSWD}"
}
#----------------------------------------------------------------------------
get_gid() {
local group="${1}"
awk -F: -v group="${group}" \
'$1 == group { printf( "%d\n", $3 ); }' "${GROUP}"
}
#----------------------------------------------------------------------------
get_username() {
local uid="${1}"
awk -F: -v uid="${uid}" \
'$3 == uid { printf( "%s\n", $1 ); }' "${PASSWD}"
}
#----------------------------------------------------------------------------
get_group() {
local gid="${1}"
awk -F: -v gid="${gid}" \
'$3 == gid { printf( "%s\n", $1 ); }' "${GROUP}"
}
#----------------------------------------------------------------------------
get_ugroup() {
local username="${1}"
local ugid
ugid="$( get_ugid "${username}" )"
if [ -n "${ugid}" ]; then
get_group "${ugid}"
fi
}
#----------------------------------------------------------------------------
# Sanity-check the new user/group:
# - check the gid is not already used for another group
# - check the group does not already exist with another gid
# - check the user does not already exist with another gid
# - check the uid is not already used for another user
# - check the user does not already exist with another uid
# - check the user does not already exist in another group
check_user_validity() {
local username="${1}"
local uid="${2}"
local group="${3}"
local gid="${4}"
local _uid _ugid _gid _username _group _ugroup
_group="$( get_group "${gid}" )"
_gid="$( get_gid "${group}" )"
_ugid="$( get_ugid "${username}" )"
_username="$( get_username "${uid}" )"
_uid="$( get_uid "${username}" )"
_ugroup="$( get_ugroup "${username}" )"
if [ "${username}" = "root" ]; then
fail "invalid username '%s\n'" "${username}"
fi
if [ ${gid} -lt -1 -o ${gid} -eq 0 ]; then
fail "invalid gid '%d' for '%s'\n" ${gid} "${username}"
elif [ ${gid} -ne -1 ]; then
# check the gid is not already used for another group
if [ -n "${_group}" -a "${_group}" != "${group}" ]; then
fail "gid '%d' for '%s' is already used by group '%s'\n" \
${gid} "${username}" "${_group}"
fi
# check the group does not already exists with another gid
# Need to split the check in two, otherwise '[' complains it
# is missing arguments when _gid is empty
if [ -n "${_gid}" ] && [ ${_gid} -ne ${gid} ]; then
fail "group '%s' for '%s' already exists with gid '%d' (wants '%d')\n" \
"${group}" "${username}" ${_gid} ${gid}
fi
# check the user does not already exists with another gid
# Need to split the check in two, otherwise '[' complains it
# is missing arguments when _ugid is empty
if [ -n "${_ugid}" ] && [ ${_ugid} -ne ${gid} ]; then
fail "user '%s' already exists with gid '%d' (wants '%d')\n" \
"${username}" ${_ugid} ${gid}
fi
fi
if [ ${uid} -lt -1 -o ${uid} -eq 0 ]; then
fail "invalid uid '%d' for '%s'\n" ${uid} "${username}"
elif [ ${uid} -ne -1 ]; then
# check the uid is not already used for another user
if [ -n "${_username}" -a "${_username}" != "${username}" ]; then
fail "uid '%d' for '%s' already used by user '%s'\n" \
${uid} "${username}" "${_username}"
fi
# check the user does not already exists with another uid
# Need to split the check in two, otherwise '[' complains it
# is missing arguments when _uid is empty
if [ -n "${_uid}" ] && [ ${_uid} -ne ${uid} ]; then
fail "user '%s' already exists with uid '%d' (wants '%d')\n" \
"${username}" ${_uid} ${uid}
fi
fi
# check the user does not already exist in another group
if [ -n "${_ugroup}" -a "${_ugroup}" != "${group}" ]; then
fail "user '%s' already exists with group '%s' (wants '%s')\n" \
"${username}" "${_ugroup}" "${group}"
fi
return 0
}
#----------------------------------------------------------------------------
# Generate a unique GID for given group. If the group already exists,
# then simply report its current GID. Otherwise, generate the lowest GID
# that is:
# - not 0
# - comprised in [MIN_GID..MAX_GID]
# - not already used by a group
generate_gid() {
local group="${1}"
local gid
gid="$( get_gid "${group}" )"
if [ -z "${gid}" ]; then
for(( gid=MIN_GID; gid<=MAX_GID; gid++ )); do
if [ -z "$( get_group "${gid}" )" ]; then
break
fi
done
if [ ${gid} -gt ${MAX_GID} ]; then
fail "can not allocate a GID for group '%s'\n" "${group}"
fi
fi
printf "%d\n" "${gid}"
}
#----------------------------------------------------------------------------
# Add a group; if it does already exist, remove it first
add_one_group() {
local group="${1}"
local gid="${2}"
local _f
# Generate a new GID if needed
if [ ${gid} -eq -1 ]; then
gid="$( generate_gid "${group}" )"
fi
# Remove any previous instance of this group, and re-add the new one
sed -i -e '/^'"${group}"':.*/d;' "${GROUP}"
printf "%s:x:%d:\n" "${group}" "${gid}" >>"${GROUP}"
# Ditto for /etc/gshadow if it exists
if [ -f "${GSHADOW}" ]; then
sed -i -e '/^'"${group}"':.*/d;' "${GSHADOW}"
printf "%s:*::\n" "${group}" >>"${GSHADOW}"
fi
}
#----------------------------------------------------------------------------
# Generate a unique UID for given username. If the username already exists,
# then simply report its current UID. Otherwise, generate the lowest UID
# that is:
# - not 0
# - comprised in [MIN_UID..MAX_UID]
# - not already used by a user
generate_uid() {
local username="${1}"
local uid
uid="$( get_uid "${username}" )"
if [ -z "${uid}" ]; then
for(( uid=MIN_UID; uid<=MAX_UID; uid++ )); do
if [ -z "$( get_username "${uid}" )" ]; then
break
fi
done
if [ ${uid} -gt ${MAX_UID} ]; then
fail "can not allocate a UID for user '%s'\n" "${username}"
fi
fi
printf "%d\n" "${uid}"
}
#----------------------------------------------------------------------------
# Add given user to given group, if not already the case
add_user_to_group() {
local username="${1}"
local group="${2}"
local _f
for _f in "${GROUP}" "${GSHADOW}"; do
[ -f "${_f}" ] || continue
sed -r -i -e 's/^('"${group}"':.*:)(([^:]+,)?)'"${username}"'(,[^:]+*)?$/\1\2\4/;' \
-e 's/^('"${group}"':.*)$/\1,'"${username}"'/;' \
-e 's/,+/,/' \
-e 's/:,/:/' \
"${_f}"
done
}
#----------------------------------------------------------------------------
# Encode a password
encode_password() {
local passwd="${1}"
mkpasswd -m "${PASSWD_METHOD}" "${passwd}"
}
#----------------------------------------------------------------------------
# Add a user; if it does already exist, remove it first
add_one_user() {
local username="${1}"
local uid="${2}"
local group="${3}"
local gid="${4}"
local passwd="${5}"
local home="${6}"
local shell="${7}"
local groups="${8}"
local comment="${9}"
local _f _group _home _shell _gid _passwd
# First, sanity-check the user
check_user_validity "${username}" "${uid}" "${group}" "${gid}"
# Generate a new UID if needed
if [ ${uid} -eq -1 ]; then
uid="$( generate_uid "${username}" )"
fi
# Remove any previous instance of this user
for _f in "${PASSWD}" "${SHADOW}"; do
sed -r -i -e '/^'"${username}"':.*/d;' "${_f}"
done
_gid="$( get_gid "${group}" )"
_shell="${shell}"
if [ "${shell}" = "-" ]; then
_shell="/bin/false"
fi
case "${home}" in
-) _home="/";;
/) fail "home can not explicitly be '/'\n";;
/*) _home="${home}";;
*) fail "home must be an absolute path\n";;
esac
case "${passwd}" in
-)
_passwd=""
;;
!=*)
_passwd='!'"$( encode_password "${passwd#!=}" )"
;;
=*)
_passwd="$( encode_password "${passwd#=}" )"
;;
*)
_passwd="${passwd}"
;;
esac
printf "%s:x:%d:%d:%s:%s:%s\n" \
"${username}" "${uid}" "${_gid}" \
"${comment}" "${_home}" "${_shell}" \
>>"${PASSWD}"
printf "%s:%s:::::::\n" \
"${username}" "${_passwd}" \
>>"${SHADOW}"
# Add the user to its additional groups
if [ "${groups}" != "-" ]; then
for _group in ${groups//,/ }; do
add_user_to_group "${username}" "${_group}"
done
fi
# If the user has a home, chown it
# (Note: stdout goes to the fakeroot-script)
if [ "${home}" != "-" ]; then
mkdir -p "${TARGET_DIR}/${home}"
printf "chown -h -R %d:%d '%s'\n" "${uid}" "${_gid}" "${TARGET_DIR}/${home}"
fi
}
#----------------------------------------------------------------------------
main() {
local username uid group gid passwd home shell groups comment
local line
local -a LINES
# Some sanity checks
if [ ${MIN_UID} -le 0 ]; then
fail "MIN_UID must be >0 (currently %d)\n" ${MIN_UID}
fi
if [ ${MIN_GID} -le 0 ]; then
fail "MIN_GID must be >0 (currently %d)\n" ${MIN_GID}
fi
# Read in all the file in memory, exclude empty lines and comments
while read line; do
LINES+=( "${line}" )
done < <( sed -r -e 's/#.*//; /^[[:space:]]*$/d;' "${USERS_TABLE}" )
# We first create groups whose gid is not -1, and then we create groups
# whose gid is -1 (automatic), so that, if a group is defined both with
# a specified gid and an automatic gid, we ensure the specified gid is
# used, rather than a different automatic gid is computed.
# First, create all the main groups which gid is *not* automatic
for line in "${LINES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
[ ${gid} -ge 0 ] || continue # Automatic gid
add_one_group "${group}" "${gid}"
done
# Then, create all the main groups which gid *is* automatic
for line in "${LINES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
[ ${gid} -eq -1 ] || continue # Non-automatic gid
add_one_group "${group}" "${gid}"
done
# Then, create all the additional groups
# If any additional group is already a main group, we should use
# the gid of that main group; otherwise, we can use any gid
for line in "${LINES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
if [ "${groups}" != "-" ]; then
for g in ${groups//,/ }; do
add_one_group "${g}" -1
done
fi
done
# When adding users, we do as for groups, in case two packages create
# the same user, one with an automatic uid, the other with a specified
# uid, to ensure the specified uid is used, rather than an incompatible
# uid be generated.
# Now, add users whose uid is *not* automatic
for line in "${LINES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
[ "${username}" != "-" ] || continue # Magic string to skip user creation
[ ${uid} -ge 0 ] || continue # Automatic uid
add_one_user "${username}" "${uid}" "${group}" "${gid}" "${passwd}" \
"${home}" "${shell}" "${groups}" "${comment}"
done
# Finally, add users whose uid *is* automatic
for line in "${LINES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
[ "${username}" != "-" ] || continue # Magic string to skip user creation
[ ${uid} -eq -1 ] || continue # Non-automatic uid
add_one_user "${username}" "${uid}" "${group}" "${gid}" "${passwd}" \
"${home}" "${shell}" "${groups}" "${comment}"
done
}
#----------------------------------------------------------------------------
main "${@}"

View File

@@ -0,0 +1,410 @@
#!/usr/bin/env bash
# Copyright (C) 2009 by Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# This script generates an HTML file that contains a report about all
# Buildroot packages, their usage of the different package
# infrastructure and possible cleanup actions
#
# Run the script from the Buildroot toplevel directory:
#
# ./support/scripts/pkg-stats > /tmp/pkg.html
#
echo "<head>
<style type=\"text/css\">
table {
width: 100%;
}
td {
border: 1px solid black;
}
td.centered {
text-align: center;
}
td.wrong {
background: #ff9a69;
}
td.correct {
background: #d2ffc4;
}
td.nopatches {
background: #d2ffc4;
}
td.somepatches {
background: #ffd870;
}
td.lotsofpatches {
background: #ff9a69;
}
</style>
<title>Statistics of Buildroot packages</title>
</head>
<a href=\"#results\">Results</a><br/>
<table>
<tr>
<td>Id</td>
<td>Package</td>
<td class=\"centered\">Patch count</td>
<td class=\"centered\">Infrastructure</td>
<td class=\"centered\">License</td>
<td class=\"centered\">License files</td>
<td class=\"centered\">Hash file</td>
</tr>
"
autotools_packages=0
cmake_packages=0
kconfig_packages=0
luarocks_package=0
perl_packages=0
python_packages=0
rebar_packages=0
virtual_packages=0
generic_packages=0
manual_packages=0
packages_with_licence=0
packages_without_licence=0
packages_with_license_files=0
packages_without_license_files=0
packages_with_hash_file=0
packages_without_hash_file=0
total_patch_count=0
cnt=0
for i in $(find boot/ linux/ package/ -name '*.mk' | sort) ; do
if test \
$i = "boot/common.mk" -o \
$i = "linux/linux-ext-fbtft.mk" -o \
$i = "linux/linux-ext-xenomai.mk" -o \
$i = "linux/linux-ext-rtai.mk" -o \
$i = "package/efl/efl.mk" -o \
$i = "package/freescale-imx/freescale-imx.mk" -o \
$i = "package/gcc/gcc.mk" -o \
$i = "package/gstreamer/gstreamer.mk" -o \
$i = "package/gstreamer1/gstreamer1.mk" -o \
$i = "package/gtk2-themes/gtk2-themes.mk" -o \
$i = "package/matchbox/matchbox.mk" -o \
$i = "package/opengl/opengl.mk" -o \
$i = "package/qt5/qt5.mk" -o \
$i = "package/x11r7/x11r7.mk" -o \
$i = "package/doc-asciidoc.mk" -o \
$i = "package/pkg-autotools.mk" -o \
$i = "package/pkg-cmake.mk" -o \
$i = "package/pkg-kconfig.mk" -o \
$i = "package/pkg-luarocks.mk" -o \
$i = "package/pkg-perl.mk" -o \
$i = "package/pkg-python.mk" -o \
$i = "package/pkg-rebar.mk" -o \
$i = "package/pkg-virtual.mk" -o \
$i = "package/pkg-download.mk" -o \
$i = "package/pkg-generic.mk" -o \
$i = "package/pkg-utils.mk" ; then
echo "skipping $i" 1>&2
continue
fi
cnt=$((cnt+1))
hashost=0
hastarget=0
infratype=""
# Determine package infrastructure
if grep -E "\(host-autotools-package\)" $i > /dev/null ; then
infratype="autotools"
hashost=1
fi
if grep -E "\(autotools-package\)" $i > /dev/null ; then
infratype="autotools"
hastarget=1
fi
if grep -E "\(kconfig-package\)" $i > /dev/null ; then
infratype="kconfig"
hastarget=1
fi
if grep -E "\(host-luarocks-package\)" $i > /dev/null ; then
infratype="luarocks"
hashost=1
fi
if grep -E "\(luarocks-package\)" $i > /dev/null ; then
infratype="luarocks"
hastarget=1
fi
if grep -E "\(host-perl-package\)" $i > /dev/null ; then
infratype="perl"
hashost=1
fi
if grep -E "\(perl-package\)" $i > /dev/null ; then
infratype="perl"
hastarget=1
fi
if grep -E "\(host-python-package\)" $i > /dev/null ; then
infratype="python"
hashost=1
fi
if grep -E "\(python-package\)" $i > /dev/null ; then
infratype="python"
hastarget=1
fi
if grep -E "\(host-rebar-package\)" $i > /dev/null ; then
infratype="rebar"
hashost=1
fi
if grep -E "\(rebar-package\)" $i > /dev/null ; then
infratype="rebar"
hastarget=1
fi
if grep -E "\(host-virtual-package\)" $i > /dev/null ; then
infratype="virtual"
hashost=1
fi
if grep -E "\(virtual-package\)" $i > /dev/null ; then
infratype="virtual"
hastarget=1
fi
if grep -E "\(host-generic-package\)" $i > /dev/null ; then
infratype="generic"
hashost=1
fi
if grep -E "\(generic-package\)" $i > /dev/null ; then
infratype="generic"
hastarget=1
fi
if grep -E "\(host-cmake-package\)" $i > /dev/null ; then
infratype="cmake"
hashost=1
fi
if grep -E "\(cmake-package\)" $i > /dev/null ; then
infratype="cmake"
hastarget=1
fi
pkg=$(basename $i)
dir=$(dirname $i)
pkg=${pkg%.mk}
pkgvariable=$(echo ${pkg} | tr "a-z-" "A-Z_")
# Count packages per infrastructure
if [ -z ${infratype} ] ; then
infratype="manual"
manual_packages=$(($manual_packages+1))
elif [ ${infratype} = "autotools" ]; then
autotools_packages=$(($autotools_packages+1))
elif [ ${infratype} = "cmake" ]; then
cmake_packages=$(($cmake_packages+1))
elif [ ${infratype} = "kconfig" ]; then
kconfig_packages=$(($kconfig_packages+1))
elif [ ${infratype} = "luarocks" ]; then
luarocks_packages=$(($luarocks_packages+1))
elif [ ${infratype} = "perl" ]; then
perl_packages=$(($perl_packages+1))
elif [ ${infratype} = "python" ]; then
python_packages=$(($python_packages+1))
elif [ ${infratype} = "rebar" ]; then
rebar_packages=$(($rebar_packages+1))
elif [ ${infratype} = "virtual" ]; then
virtual_packages=$(($virtual_packages+1))
elif [ ${infratype} = "generic" ]; then
generic_packages=$(($generic_packages+1))
fi
if grep -qE "^${pkgvariable}_LICENSE[ ]*=" $i ; then
packages_with_license=$(($packages_with_license+1))
license=1
else
packages_without_license=$(($packages_without_license+1))
license=0
fi
if grep -qE "^${pkgvariable}_LICENSE_FILES[ ]*=" $i ; then
packages_with_license_files=$(($packages_with_license_files+1))
license_files=1
else
packages_without_license_files=$(($packages_without_license_files+1))
license_files=0
fi
if test -f ${dir}/${pkg}.hash; then
packages_with_hash_file=$(($packages_with_hash_file+1))
hash_file=1
else
packages_without_hash_file=$(($packages_without_hash_file+1))
hash_file=0
fi
echo "<tr>"
echo "<td>$cnt</td>"
echo "<td>$i</td>"
package_dir=$(dirname $i)
patch_count=$(find ${package_dir} -name '*.patch' | wc -l)
total_patch_count=$(($total_patch_count+$patch_count))
if test $patch_count -lt 1 ; then
patch_count_class="nopatches"
elif test $patch_count -lt 5 ; then
patch_count_class="somepatches"
else
patch_count_class="lotsofpatches"
fi
echo "<td class=\"centered ${patch_count_class}\">"
echo "<b>$patch_count</b>"
echo "</td>"
if [ ${infratype} = "manual" ] ; then
echo "<td class=\"centered wrong\"><b>manual</b></td>"
else
echo "<td class=\"centered correct\">"
echo "<b>${infratype}</b><br/>"
if [ ${hashost} -eq 1 -a ${hastarget} -eq 1 ]; then
echo "target + host"
elif [ ${hashost} -eq 1 ]; then
echo "host"
else
echo "target"
fi
echo "</td>"
fi
if [ ${license} -eq 0 ] ; then
echo "<td class=\"centered wrong\">No</td>"
else
echo "<td class=\"centered correct\">Yes</td>"
fi
if [ ${license_files} -eq 0 ] ; then
echo "<td class=\"centered wrong\">No</td>"
else
echo "<td class=\"centered correct\">Yes</td>"
fi
if [ ${hash_file} -eq 0 ] ; then
echo "<td class=\"centered wrong\">No</td>"
else
echo "<td class=\"centered correct\">Yes</td>"
fi
echo "</tr>"
done
echo "</table>"
echo "<a id="results"></a>"
echo "<table>"
echo "<tr>"
echo "<td>Packages using the <i>generic</i> infrastructure</td>"
echo "<td>$generic_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>cmake</i> infrastructure</td>"
echo "<td>$cmake_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>autotools</i> infrastructure</td>"
echo "<td>$autotools_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>luarocks</i> infrastructure</td>"
echo "<td>$luarocks_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>kconfig</i> infrastructure</td>"
echo "<td>$kconfig_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>perl</i> infrastructure</td>"
echo "<td>$perl_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>python</i> infrastructure</td>"
echo "<td>$python_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>rebar</i> infrastructure</td>"
echo "<td>$rebar_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>virtual</i> infrastructure</td>"
echo "<td>$virtual_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages not using any infrastructure</td>"
echo "<td>$manual_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages having license information</td>"
echo "<td>$packages_with_license</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages not having licence information</td>"
echo "<td>$packages_without_license</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages having license files information</td>"
echo "<td>$packages_with_license_files</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages not having licence files information</td>"
echo "<td>$packages_without_license_files</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages having hash file</td>"
echo "<td>$packages_with_hash_file</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages not having hash file</td>"
echo "<td>$packages_without_hash_file</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Number of patches in all packages</td>"
echo "<td>$total_patch_count</td>"
echo "</tr>"
echo "<tr>"
echo "<td>TOTAL</td>"
echo "<td>$cnt</td>"
echo "</tr>"
echo "</table>"
echo "<hr/>"
echo "<i>Updated on $(LANG=C date), Git commit $(git log master -n 1 --pretty=format:%H)</i>"
echo "</body>"
echo "</html>"

View File

@@ -0,0 +1,30 @@
Readme
======
Kconfiglib
----------
This python module, developped by Ulf Magnusson and released under the ISC
license, is fetched from:
https://github.com/ulfalizer/Kconfiglib
commit: a95f477eafc0b6708c3ce671fce7302ecec4f789
Kconfiglib license
~~~~~~~~~~~~~~~~~~
License (ISC)
Copyright (c) 2011-2013, Ulf Magnusson <ulfalizer@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.

View File

@@ -0,0 +1,833 @@
#!/usr/bin/env perl
# This chunk of stuff was generated by App::FatPacker. To find the original
# file's code, look for the end of this BEGIN block or the string 'FATPACK'
BEGIN {
my %fatpacked;
$fatpacked{"MetaCPAN/API/Tiny.pm"} = <<'METACPAN_API_TINY';
package MetaCPAN::API::Tiny;
{
$MetaCPAN::API::Tiny::VERSION = '1.131730';
}
use strict;
use warnings;
# ABSTRACT: A Tiny API client for MetaCPAN
use Carp;
use JSON::PP 'encode_json', 'decode_json';
use HTTP::Tiny;
sub new {
my ($class, @args) = @_;
$#_ % 2 == 0
or croak 'Arguments must be provided as name/value pairs';
my %params = @args;
die 'ua_args must be an array reference'
if $params{ua_args} && ref($params{ua_args}) ne 'ARRAY';
my $self = +{
base_url => $params{base_url} || 'http://api.metacpan.org/v0',
ua => $params{ua} || HTTP::Tiny->new(
$params{ua_args}
? @{$params{ua_args}}
: (agent => 'MetaCPAN::API::Tiny/'
. ($MetaCPAN::API::VERSION || 'xx'))),
};
return bless($self, $class);
}
sub _build_extra_params {
my $self = shift;
@_ % 2 == 0
or croak 'Incorrect number of params, must be key/value';
my %extra = @_;
my $ua = $self->{ua};
foreach my $key (keys %extra)
{
# The implementation in HTTP::Tiny uses + instead of %20, fix that
$extra{$key} = $ua->_uri_escape($extra{$key});
$extra{$key} =~ s/\+/%20/g;
}
my $params = join '&', map { "$_=" . $extra{$_} } sort keys %extra;
return $params;
}
# /source/{author}/{release}/{path}
sub source {
my $self = shift;
my %opts = @_ ? @_ : ();
my $url = '';
my $error = "Provide 'author' and 'release' and 'path'";
%opts or croak $error;
if (
defined ( my $author = $opts{'author'} ) &&
defined ( my $release = $opts{'release'} ) &&
defined ( my $path = $opts{'path'} )
) {
$url = "source/$author/$release/$path";
} else {
croak $error;
}
$url = $self->{base_url} . "/$url";
my $result = $self->{ua}->get($url);
$result->{'success'}
or croak "Failed to fetch '$url': " . $result->{'reason'};
return $result->{'content'};
}
# /release/{distribution}
# /release/{author}/{release}
sub release {
my $self = shift;
my %opts = @_ ? @_ : ();
my $url = '';
my $error = "Either provide 'distribution', or 'author' and 'release', " .
"or 'search'";
%opts or croak $error;
my %extra_opts = ();
if ( defined ( my $dist = $opts{'distribution'} ) ) {
$url = "release/$dist";
} elsif (
defined ( my $author = $opts{'author'} ) &&
defined ( my $release = $opts{'release'} )
) {
$url = "release/$author/$release";
} elsif ( defined ( my $search_opts = $opts{'search'} ) ) {
ref $search_opts && ref $search_opts eq 'HASH'
or croak $error;
%extra_opts = %{$search_opts};
$url = 'release/_search';
} else {
croak $error;
}
return $self->fetch( $url, %extra_opts );
}
# /pod/{module}
# /pod/{author}/{release}/{path}
sub pod {
my $self = shift;
my %opts = @_ ? @_ : ();
my $url = '';
my $error = "Either provide 'module' or 'author and 'release' and 'path'";
%opts or croak $error;
if ( defined ( my $module = $opts{'module'} ) ) {
$url = "pod/$module";
} elsif (
defined ( my $author = $opts{'author'} ) &&
defined ( my $release = $opts{'release'} ) &&
defined ( my $path = $opts{'path'} )
) {
$url = "pod/$author/$release/$path";
} else {
croak $error;
}
# check content-type
my %extra = ();
if ( defined ( my $type = $opts{'content-type'} ) ) {
$type =~ m{^ text/ (?: html|plain|x-pod|x-markdown ) $}x
or croak 'Incorrect content-type provided';
$extra{headers}{'content-type'} = $type;
}
$url = $self->{base_url}. "/$url";
my $result = $self->{ua}->get( $url, \%extra );
$result->{'success'}
or croak "Failed to fetch '$url': " . $result->{'reason'};
return $result->{'content'};
}
# /module/{module}
sub module {
my $self = shift;
my $name = shift;
$name or croak 'Please provide a module name';
return $self->fetch("module/$name");
}
# file() is a synonym of module
sub file { goto &module }
# /author/{author}
sub author {
my $self = shift;
my ( $pause_id, $url, %extra_opts );
if ( @_ == 1 ) {
$url = 'author/' . shift;
} elsif ( @_ == 2 ) {
my %opts = @_;
if ( defined $opts{'pauseid'} ) {
$url = "author/" . $opts{'pauseid'};
} elsif ( defined $opts{'search'} ) {
my $search_opts = $opts{'search'};
ref $search_opts && ref $search_opts eq 'HASH'
or croak "'search' key must be hashref";
%extra_opts = %{$search_opts};
$url = 'author/_search';
} else {
croak 'Unknown option given';
}
} else {
croak 'Please provide an author PAUSEID or a "search"';
}
return $self->fetch( $url, %extra_opts );
}
sub fetch {
my $self = shift;
my $url = shift;
my $extra = $self->_build_extra_params(@_);
my $base = $self->{base_url};
my $req_url = $extra ? "$base/$url?$extra" : "$base/$url";
my $result = $self->{ua}->get($req_url);
return $self->_decode_result( $result, $req_url );
}
sub post {
my $self = shift;
my $url = shift;
my $query = shift;
my $base = $self->{base_url};
defined $url
or croak 'First argument of URL must be provided';
ref $query and ref $query eq 'HASH'
or croak 'Second argument of query hashref must be provided';
my $query_json = encode_json( $query );
my $result = $self->{ua}->request(
'POST',
"$base/$url",
{
headers => { 'Content-Type' => 'application/json' },
content => $query_json,
}
);
return $self->_decode_result( $result, $url, $query_json );
}
sub _decode_result {
my $self = shift;
my ( $result, $url, $original ) = @_;
my $decoded_result;
ref $result and ref $result eq 'HASH'
or croak 'First argument must be hashref';
defined $url
or croak 'Second argument of a URL must be provided';
if ( defined ( my $success = $result->{'success'} ) ) {
my $reason = $result->{'reason'} || '';
$reason .= ( defined $original ? " (request: $original)" : '' );
$success or croak "Failed to fetch '$url': $reason";
} else {
croak 'Missing success in return value';
}
defined ( my $content = $result->{'content'} )
or croak 'Missing content in return value';
eval { $decoded_result = decode_json $content; 1 }
or do { croak "Couldn't decode '$content': $@" };
return $decoded_result;
}
1;
__END__
=pod
=head1 NAME
MetaCPAN::API::Tiny - A Tiny API client for MetaCPAN
=head1 VERSION
version 1.131730
=head1 DESCRIPTION
This is the Tiny version of L<MetaCPAN::API>. It implements a compatible API
with a few notable exceptions:
=over 4
=item Attributes are direct hash access
The attributes defined using Mo(o|u)se are now accessed via the blessed hash
directly. There are no accessors defined to access this elements.
=item Exception handling
Instead of using Try::Tiny, raw evals are used. This could potentially cause
issues, so just be aware.
=item Testing
Test::Fatal was replaced with an eval implementation of exception().
Test::TinyMocker usage is retained, but may be absorbed since it is pure perl
=back
=head1 CLASS_METHODS
=head2 new
new is the constructor for MetaCPAN::API::Tiny. In the non-tiny version of this
module, this is provided via Any::Moose built from the attributes defined. In
the tiny version, we define our own constructor. It takes the same arguments
and provides similar checks to MetaCPAN::API with regards to arguments passed.
=head1 PUBLIC_METHODS
=head2 source
my $source = $mcpan->source(
author => 'DOY',
release => 'Moose-2.0201',
path => 'lib/Moose.pm',
);
Searches MetaCPAN for a module or a specific release and returns the plain source.
=head2 release
my $result = $mcpan->release( distribution => 'Moose' );
# or
my $result = $mcpan->release( author => 'DOY', release => 'Moose-2.0001' );
Searches MetaCPAN for a dist.
You can do complex searches using 'search' parameter:
# example lifted from MetaCPAN docs
my $result = $mcpan->release(
search => {
author => "OALDERS AND ",
filter => "status:latest",
fields => "name",
size => 1,
},
);
=head2 pod
my $result = $mcpan->pod( module => 'Moose' );
# or
my $result = $mcpan->pod(
author => 'DOY',
release => 'Moose-2.0201',
path => 'lib/Moose.pm',
);
Searches MetaCPAN for a module or a specific release and returns the POD.
=head2 module
my $result = $mcpan->module('MetaCPAN::API');
Searches MetaCPAN and returns a module's ".pm" file.
=head2 file
A synonym of L</module>
=head2 author
my $result1 = $mcpan->author('XSAWYERX');
my $result2 = $mcpan->author( pauseid => 'XSAWYERX' );
Searches MetaCPAN for a specific author.
You can do complex searches using 'search' parameter:
# example lifted from MetaCPAN docs
my $result = $mcpan->author(
search => {
q => 'profile.name:twitter',
size => 1,
},
);
=head2 fetch
my $result = $mcpan->fetch('/release/distribution/Moose');
# with parameters
my $more = $mcpan->fetch(
'/release/distribution/Moose',
param => 'value',
);
This is a helper method for API implementations. It fetches a path from MetaCPAN, decodes the JSON from the content variable and returns it.
You don't really need to use it, but you can in case you want to write your own extension implementation to MetaCPAN::API.
It accepts an additional hash as "GET" parameters.
=head2 post
# /release&content={"query":{"match_all":{}},"filter":{"prefix":{"archive":"Cache-Cache-1.06"}}}
my $result = $mcpan->post(
'release',
{
query => { match_all => {} },
filter => { prefix => { archive => 'Cache-Cache-1.06' } },
},
);
The POST equivalent of the "fetch()" method. It gets the path and JSON request.
=head1 THANKS
Overall the tests and code were ripped directly from MetaCPAN::API and
tiny-fied. A big thanks to Sawyer X for writing the original module.
=head1 AUTHOR
Nicholas R. Perez <nperez@cpan.org>
=head1 COPYRIGHT AND LICENSE
This software is copyright (c) 2013 by Nicholas R. Perez <nperez@cpan.org>.
This is free software; you can redistribute it and/or modify it under
the same terms as the Perl 5 programming language system itself.
=cut
METACPAN_API_TINY
s/^ //mg for values %fatpacked;
unshift @INC, sub {
if (my $fat = $fatpacked{$_[1]}) {
if ($] < 5.008) {
return sub {
return 0 unless length $fat;
$fat =~ s/^([^\n]*\n?)//;
$_ = $1;
return 1;
};
}
open my $fh, '<', \$fat
or die "FatPacker error loading $_[1] (could be a perl installation issue?)";
return $fh;
}
return
};
} # END OF FATPACK CODE
use 5.022; # same major version as target perl
use strict;
use warnings;
use Fatal qw(open close);
use Getopt::Long;
use Pod::Usage;
use File::Basename;
use Module::CoreList;
use HTTP::Tiny;
use Safe;
use MetaCPAN::API::Tiny;
my ($help, $man, $quiet, $force, $recommend, $test, $host);
my $target = 1;
GetOptions( 'help|?' => \$help,
'man' => \$man,
'quiet|q' => \$quiet,
'force|f' => \$force,
'host!' => \$host,
'target!' => \$target,
'recommend' => \$recommend,
'test' => \$test
) or pod2usage(-exitval => 1);
pod2usage(-exitval => 0) if $help;
pod2usage(-exitval => 0, -verbose => 2) if $man;
pod2usage(-exitval => 1) if scalar @ARGV == 0;
my %dist; # name -> metacpan data
my %need_target; # name -> 1 if target package is needed
my %need_host; # name -> 1 if host package is needed
my %need_dlopen; # name -> 1 if requires dynamic library
my %deps_build; # name -> list of host dependencies
my %deps_runtime; # name -> list of target dependencies
my %license_files; # name -> list of license files
my %checksum; # author -> list of checksum
my $mcpan = MetaCPAN::API::Tiny->new();
my $ua = HTTP::Tiny->new();
sub get_checksum {
my ($url) = @_;
my($path) = $url =~ m|^[^:/?#]+://[^/?#]*([^?#]*)|;
my($basename, $dirname) = fileparse( $path );
unless ($checksum{$dirname}) {
my $response = $ua->get(qq{http://cpan.metacpan.org${dirname}CHECKSUMS});
$checksum{$dirname} = $response->{content};
}
my $chksum = Safe->new->reval($checksum{$dirname});
return $chksum->{$basename}, $basename;
}
sub get_manifest {
my ($author, $distname, $version) = @_;
my $url = qq{http://api.metacpan.org/source/${author}/${distname}-${version}/MANIFEST};
my $response = $ua->get($url);
return $response->{content};
}
sub is_xs {
my ($manifest) = @_;
# This heuristic determines if a module is a native extension, by searching
# some file extension types in the MANIFEST of the distribution.
# It was inspired by http://deps.cpantesters.org/static/purity.html
return $manifest =~ m/\.(swg|xs|c|h|i)[\n\s]/;
}
sub find_license_files {
my ($manifest) = @_;
my @license_files;
foreach (split /\n/, $manifest) {
next if m|/|;
push @license_files, $_ if m/(ARTISTIC|COPYING|COPYRIGHT|LICENSE)/i;
}
if (scalar @license_files == 0 && $manifest =~ m/(README)[\n\s]/i) {
@license_files = ($1);
}
return \@license_files;
}
sub fetch {
my ($name, $need_target, $need_host) = @_;
$need_target{$name} = $need_target if $need_target;
$need_host{$name} = $need_host if $need_host;
unless ($dist{$name}) {
say qq{fetch ${name}} unless $quiet;
my $result = $mcpan->release( distribution => $name );
$dist{$name} = $result;
my $manifest = get_manifest( $result->{author}, $name, $result->{version} );
$need_dlopen{$name} = is_xs( $manifest );
$license_files{$name} = find_license_files( $manifest );
my %build = ();
my %runtime = ();
foreach my $dep (@{$result->{dependency}}) {
my $modname = ${$dep}{module};
next if $modname eq q{perl};
next if $modname =~ m|^Alien|;
next if $modname =~ m|^Win32|;
next if !$test && $modname =~ m|^Test|;
next if Module::CoreList::is_core( $modname, undef, $] );
# we could use the host Module::CoreList data, because host perl and
# target perl have the same major version
next if ${$dep}{phase} eq q{develop};
next if !$test && ${$dep}{phase} eq q{test};
next if !$recommend && ${$dep}{relationship} ne q{requires};
my $distname = $mcpan->module( $modname )->{distribution};
if (${$dep}{phase} eq q{runtime}) {
$runtime{$distname} = 1;
}
else { # configure, build
$build{$distname} = 1;
}
}
$deps_build{$name} = [keys %build];
$deps_runtime{$name} = [keys %runtime];
foreach my $distname (@{$deps_build{$name}}) {
fetch( $distname, 0, 1 );
}
foreach my $distname (@{$deps_runtime{$name}}) {
fetch( $distname, $need_target, $need_host );
$need_dlopen{$name} ||= $need_dlopen{$distname};
}
}
return;
}
foreach my $distname (@ARGV) {
# Command-line's distributions
fetch( $distname, !!$target, !!$host );
}
say scalar keys %dist, q{ packages fetched.} unless $quiet;
# Buildroot package name: lowercase
sub fsname {
my $name = shift;
$name =~ s|_|-|g;
return q{perl-} . lc $name;
}
# Buildroot variable name: uppercase
sub brname {
my $name = shift;
$name =~ s|-|_|g;
return uc $name;
}
while (my ($distname, $dist) = each %dist) {
my $fsname = fsname( $distname );
my $dirname = q{package/} . $fsname;
my $cfgname = $dirname . q{/Config.in};
my $mkname = $dirname . q{/} . $fsname . q{.mk};
my $hashname = $dirname . q{/} . $fsname . q{.hash};
my $brname = brname( $fsname );
mkdir $dirname unless -d $dirname;
if ($need_target{$distname} && ($force || !-f $cfgname)) {
my $abstract = $dist->{abstract};
my $homepage = $dist->{resources}->{homepage} || qq{https://metacpan.org/release/${distname}};
say qq{write ${cfgname}} unless $quiet;
open my $fh, q{>}, $cfgname;
say {$fh} qq{config BR2_PACKAGE_${brname}};
say {$fh} qq{\tbool "${fsname}"};
say {$fh} qq{\tdepends on !BR2_STATIC_LIBS} if $need_dlopen{$distname};
foreach my $dep (sort @{$deps_runtime{$distname}}) {
my $brdep = brname( fsname( $dep ) );
say {$fh} qq{\tselect BR2_PACKAGE_${brdep}};
}
say {$fh} qq{\thelp};
say {$fh} qq{\t ${abstract}\n} if $abstract;
say {$fh} qq{\t ${homepage}};
if ($need_dlopen{$distname}) {
say {$fh} qq{\ncomment "${fsname} needs a toolchain w/ dynamic library"};
say {$fh} qq{\tdepends on BR2_STATIC_LIBS};
}
close $fh;
}
if ($force || !-f $mkname) {
my $version = $dist->{version};
my($path) = $dist->{download_url} =~ m|^[^:/?#]+://[^/?#]*([^?#]*)|;
# this URL contains only the scheme, auth and path parts (but no query and fragment parts)
# the scheme is not used, because the job is done by the BR download infrastructure
# the auth part is not used, because we use $(BR2_CPAN_MIRROR)
my($filename, $directories, $suffix) = fileparse( $path, q{tar.gz}, q{tgz} );
$directories =~ s|/$||;
my $dependencies = join q{ }, map( { q{host-} . fsname( $_ ); } sort @{$deps_build{$distname}} ),
map( { fsname( $_ ); } sort @{$deps_runtime{$distname}} );
my $host_dependencies = join q{ }, map { q{host-} . fsname( $_ ); } sort( @{$deps_build{$distname}},
@{$deps_runtime{$distname}} );
my $license = ref $dist->{license} eq 'ARRAY'
? join q{ or }, @{$dist->{license}}
: $dist->{license};
# BR requires license name as in http://spdx.org/licenses/
$license =~ s|apache_2_0|Apache-2.0|;
$license =~ s|artistic_2|Artistic-2.0|;
$license =~ s|mit|MIT|;
$license =~ s|openssl|OpenSSL|;
$license =~ s|perl_5|Artistic or GPLv1+|;
my $license_files = join q{ }, @{$license_files{$distname}};
say qq{write ${mkname}} unless $quiet;
open my $fh, q{>}, $mkname;
say {$fh} qq{################################################################################};
say {$fh} qq{#};
say {$fh} qq{# ${fsname}};
say {$fh} qq{#};
say {$fh} qq{################################################################################};
say {$fh} qq{};
say {$fh} qq{${brname}_VERSION = ${version}};
say {$fh} qq{${brname}_SOURCE = ${distname}-\$(${brname}_VERSION).${suffix}};
say {$fh} qq{${brname}_SITE = \$(BR2_CPAN_MIRROR)${directories}};
say {$fh} qq{${brname}_DEPENDENCIES = ${dependencies}} if $need_target{$distname} && $dependencies;
say {$fh} qq{HOST_${brname}_DEPENDENCIES = ${host_dependencies}} if $need_host{$distname} && $host_dependencies;
say {$fh} qq{${brname}_LICENSE = ${license}} if $license && $license ne q{unknown};
say {$fh} qq{${brname}_LICENSE_FILES = ${license_files}} if $license_files;
say {$fh} qq{};
say {$fh} qq{\$(eval \$(perl-package))} if $need_target{$distname};
say {$fh} qq{\$(eval \$(host-perl-package))} if $need_host{$distname};
close $fh;
}
if ($force || !-f $hashname) {
my($checksum, $filename) = get_checksum($dist->{download_url});
my $md5 = $checksum->{md5};
my $sha256 = $checksum->{sha256};
say qq{write ${hashname}} unless $quiet;
open my $fh, q{>}, $hashname;
say {$fh} qq{# retrieved by scancpan from http://cpan.metacpan.org/};
say {$fh} qq{md5 ${md5} ${filename}};
say {$fh} qq{sha256 ${sha256} ${filename}};
close $fh;
}
}
my %pkg;
my $cfgname = q{package/Config.in};
if (-f $cfgname) {
open my $fh, q{<}, $cfgname;
while (<$fh>) {
chomp;
$pkg{$_} = 1 if m|package/perl-|;
}
close $fh;
}
foreach my $distname (keys %need_target) {
my $fsname = fsname( $distname );
$pkg{qq{\tsource "package/${fsname}/Config.in"}} = 1;
}
say qq{${cfgname} must contain the following lines:};
say join qq{\n}, sort keys %pkg;
__END__
=head1 NAME
support/scripts/scancpan Try-Tiny Moo
=head1 SYNOPSIS
curl -kL http://install.perlbrew.pl | bash
perlbrew install perl-5.18.2
supports/scripts/scancpan [options] [distname ...]
Options:
-help
-man
-quiet
-force
-target/-notarget
-host/-nohost
-recommend
-test
=head1 OPTIONS
=over 8
=item B<-help>
Prints a brief help message and exits.
=item B<-man>
Prints the manual page and exits.
=item B<-quiet>
Executes without output
=item B<-force>
Forces the overwriting of existing files.
=item B<-target/-notarget>
Switches package generation for the target variant (the default is C<-target>).
=item B<-host/-nohost>
Switches package generation for the host variant (the default is C<-nohost>).
=item B<-recommend>
Adds I<recommended> dependencies.
=item B<-test>
Adds dependencies for test.
=back
=head1 DESCRIPTION
This script creates templates of the Buildroot package files for all the
Perl/CPAN distributions required by the specified distnames. The
dependencies and metadata are fetched from https://metacpan.org/.
After running this script, it is necessary to check the generated files.
You have to manually add the license files (PERL_FOO_LICENSE_FILES variable).
For distributions that link against a target library, you have to add the
buildroot package name for that library to the DEPENDENCIES variable.
See the Buildroot documentation for details on the usage of the Perl
infrastructure.
The major version of the host perl must be aligned on the target one,
in order to work with the right CoreList data.
=head1 LICENSE
Copyright (C) 2013-2014 by Francois Perrad <francois.perrad@gadz.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
This script is a part of Buildroot.
This script requires the module C<MetaCPAN::API::Tiny> (version 1.131730)
which was included at the beginning of this file by the tool C<fatpack>.
See L<http://search.cpan.org/~nperez/MetaCPAN-API-Tiny-1.131730/>.
See L<http://search.cpan.org/search?query=App-FatPacker&mode=dist>.
These both libraries are free software and may be distributed under the same
terms as perl itself.
And perl may be distributed under the terms of Artistic v1 or GPL v1 license.
=cut

View File

@@ -0,0 +1,82 @@
#!/bin/sh
#
# This scripts adds local version information from the version
# control systems git, mercurial (hg) and subversion (svn).
#
# If something goes wrong, send a mail the kernel build mailinglist
# (see MAINTAINERS) and CC Nico Schottelius
# <nico-linuxsetlocalversion -at- schottelius.org>.
#
#
usage() {
echo "Usage: $0 [srctree]" >&2
exit 1
}
cd "${1:-.}" || usage
# Check for git and a git repo.
if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore it,
# because this version is defined in the top level Makefile.
if [ -z "`git describe --exact-match 2>/dev/null`" ]; then
# If we are past a tagged commit (like "v2.6.30-rc5-302-g72357d5"),
# we pretty print it.
if atag="`git describe 2>/dev/null`"; then
echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
# If we don't have a tag at all we print -g{commitish}.
else
printf '%s%s' -g $head
fi
fi
# Is this git on svn?
if git config --get svn-remote.svn.url >/dev/null; then
printf -- '-svn%s' "`git svn find-rev $head`"
fi
# Update index only on r/w media
[ -w . ] && git update-index --refresh --unmerged > /dev/null
# Check for uncommitted changes
if git diff-index --name-only HEAD | grep -v "^scripts/package" \
| read dummy; then
printf '%s' -dirty
fi
# All done with git
exit
fi
# Check for mercurial and a mercurial repo.
if hgid=`hg id 2>/dev/null`; then
tag=`printf '%s' "$hgid" | cut -d' ' -f2`
# Do we have an untagged version?
if [ -z "$tag" -o "$tag" = tip ]; then
id=`printf '%s' "$hgid" | sed 's/[+ ].*//'`
printf '%s%s' -hg "$id"
fi
# Are there uncommitted changes?
# These are represented by + after the changeset id.
case "$hgid" in
*+|*+\ *) printf '%s' -dirty ;;
esac
# All done with mercurial
exit
fi
# Check for svn and a svn repo.
if rev=`LC_ALL=C svn info 2>/dev/null | grep '^Last Changed Rev'`; then
rev=`echo $rev | awk '{print $NF}'`
printf -- '-svn%s' "$rev"
# All done with svn
exit
fi

View File

@@ -0,0 +1,217 @@
#!/usr/bin/env python
# Copyright (C) 2014 by Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os
import os.path
import argparse
import csv
import collections
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("You need python-matplotlib to generate the size graph\n")
exit(1)
colors = ['#e60004', '#009836', '#2e1d86', '#ffed00',
'#0068b5', '#f28e00', '#940084', '#97c000']
#
# This function adds a new file to 'filesdict', after checking its
# size. The 'filesdict' contain the relative path of the file as the
# key, and as the value a tuple containing the name of the package to
# which the file belongs and the size of the file.
#
# filesdict: the dict to which the file is added
# relpath: relative path of the file
# fullpath: absolute path to the file
# pkg: package to which the file belongs
#
def add_file(filesdict, relpath, abspath, pkg):
if not os.path.exists(abspath):
return
if os.path.islink(abspath):
return
sz = os.stat(abspath).st_size
filesdict[relpath] = (pkg, sz)
#
# This function returns a dict where each key is the path of a file in
# the root filesystem, and the value is a tuple containing two
# elements: the name of the package to which this file belongs and the
# size of the file.
#
# builddir: path to the Buildroot output directory
#
def build_package_dict(builddir):
filesdict = {}
with open(os.path.join(builddir, "build", "packages-file-list.txt")) as filelistf:
for l in filelistf.readlines():
pkg, fpath = l.split(",")
# remove the initial './' in each file path
fpath = fpath.strip()[2:]
fullpath = os.path.join(builddir, "target", fpath)
add_file(filesdict, fpath, fullpath, pkg)
return filesdict
#
# This function builds a dictionary that contains the name of a
# package as key, and the size of the files installed by this package
# as the value.
#
# filesdict: dictionary with the name of the files as key, and as
# value a tuple containing the name of the package to which the files
# belongs, and the size of the file. As returned by
# build_package_dict.
#
# builddir: path to the Buildroot output directory
#
def build_package_size(filesdict, builddir):
pkgsize = collections.defaultdict(int)
for root, _, files in os.walk(os.path.join(builddir, "target")):
for f in files:
fpath = os.path.join(root, f)
if os.path.islink(fpath):
continue
frelpath = os.path.relpath(fpath, os.path.join(builddir, "target"))
if not frelpath in filesdict:
print("WARNING: %s is not part of any package" % frelpath)
pkg = "unknown"
else:
pkg = filesdict[frelpath][0]
pkgsize[pkg] += os.path.getsize(fpath)
return pkgsize
#
# Given a dict returned by build_package_size(), this function
# generates a pie chart of the size installed by each package.
#
# pkgsize: dictionary with the name of the package as a key, and the
# size as the value, as returned by build_package_size.
#
# outputf: output file for the graph
#
def draw_graph(pkgsize, outputf):
total = sum(pkgsize.values())
labels = []
values = []
other_value = 0
for (p, sz) in pkgsize.items():
if sz < (total * 0.01):
other_value += sz
else:
labels.append("%s (%d kB)" % (p, sz / 1000.))
values.append(sz)
labels.append("Other (%d kB)" % (other_value / 1000.))
values.append(other_value)
plt.figure()
patches, texts, autotexts = plt.pie(values, labels=labels,
autopct='%1.1f%%', shadow=True,
colors=colors)
# Reduce text size
proptease = fm.FontProperties()
proptease.set_size('xx-small')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.suptitle("Filesystem size per package", fontsize=18, y=.97)
plt.title("Total filesystem size: %d kB" % (total / 1000.), fontsize=10, y=.96)
plt.savefig(outputf)
#
# Generate a CSV file with statistics about the size of each file, its
# size contribution to the package and to the overall system.
#
# filesdict: dictionary with the name of the files as key, and as
# value a tuple containing the name of the package to which the files
# belongs, and the size of the file. As returned by
# build_package_dict.
#
# pkgsize: dictionary with the name of the package as a key, and the
# size as the value, as returned by build_package_size.
#
# outputf: output CSV file
#
def gen_files_csv(filesdict, pkgsizes, outputf):
total = 0
for (p, sz) in pkgsizes.items():
total += sz
with open(outputf, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
wr.writerow(["File name",
"Package name",
"File size",
"Package size",
"File size in package (%)",
"File size in system (%)"])
for f, (pkgname, filesize) in filesdict.items():
pkgsize = pkgsizes[pkgname]
wr.writerow([f, pkgname, filesize, pkgsize,
"%.1f" % (float(filesize) / pkgsize * 100),
"%.1f" % (float(filesize) / total * 100)])
#
# Generate a CSV file with statistics about the size of each package,
# and their size contribution to the overall system.
#
# pkgsize: dictionary with the name of the package as a key, and the
# size as the value, as returned by build_package_size.
#
# outputf: output CSV file
#
def gen_packages_csv(pkgsizes, outputf):
total = sum(pkgsizes.values())
with open(outputf, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
wr.writerow(["Package name", "Package size", "Package size in system (%)"])
for (pkg, size) in pkgsizes.items():
wr.writerow([pkg, size, "%.1f" % (float(size) / total * 100)])
parser = argparse.ArgumentParser(description='Draw size statistics graphs')
parser.add_argument("--builddir", '-i', metavar="BUILDDIR", required=True,
help="Buildroot output directory")
parser.add_argument("--graph", '-g', metavar="GRAPH",
help="Graph output file (.pdf or .png extension)")
parser.add_argument("--file-size-csv", '-f', metavar="FILE_SIZE_CSV",
help="CSV output file with file size statistics")
parser.add_argument("--package-size-csv", '-p', metavar="PKG_SIZE_CSV",
help="CSV output file with package size statistics")
args = parser.parse_args()
# Find out which package installed what files
pkgdict = build_package_dict(args.builddir)
# Collect the size installed by each package
pkgsize = build_package_size(pkgdict, args.builddir)
if args.graph:
draw_graph(pkgsize, args.graph)
if args.file_size_csv:
gen_files_csv(pkgdict, pkgsize, args.file_size_csv)
if args.package_size_csv:
gen_packages_csv(pkgsize, args.package_size_csv)

View File

@@ -0,0 +1,127 @@
#!/usr/bin/env python
# Copyright (C) 2016 Thomas De Schampheleire <thomas.de.schampheleire@gmail.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# TODO (improvements)
# - support K,M,G size suffixes for threshold
# - output CSV file in addition to stdout reporting
import csv
import argparse
import sys
def read_file_size_csv(inputf, detail=None):
"""Extract package or file sizes from CSV file into size dictionary"""
sizes = {}
reader = csv.reader(inputf)
header = next(reader)
if (header[0] != 'File name' or header[1] != 'Package name' or
header[2] != 'File size' or header[3] != 'Package size'):
print(("Input file %s does not contain the expected header. Are you "
"sure this file corresponds to the file-size-stats.csv "
"file created by 'make graph-size'?") % inputf.name)
sys.exit(1)
for row in reader:
if detail:
sizes[row[0]] = int(row[2])
else:
sizes[row[1]] = int(row[3])
return sizes
def compare_sizes(old, new):
"""Return delta/added/removed dictionaries based on two input size
dictionaries"""
delta = {}
oldkeys = set(old.keys())
newkeys = set(new.keys())
# packages/files in both
for entry in newkeys.intersection(oldkeys):
delta[entry] = ('', new[entry] - old[entry])
# packages/files only in new
for entry in newkeys.difference(oldkeys):
delta[entry] = ('added', new[entry])
# packages/files only in old
for entry in oldkeys.difference(newkeys):
delta[entry] = ('removed', -old[entry])
return delta
def print_results(result, threshold):
"""Print the given result dictionary sorted by size, ignoring any entries
below or equal to threshold"""
from six import iteritems
list_result = list(iteritems(result))
# result is a dictionary: name -> (flag, size difference)
# list_result is a list of tuples: (name, (flag, size difference))
for entry in sorted(list_result, key=lambda entry: entry[1][1]):
if threshold is not None and abs(entry[1][1]) <= threshold:
continue
print('%12s %7s %s' % (entry[1][1], entry[1][0], entry[0]))
# main #########################################################################
description = """
Compare rootfs size between Buildroot compilations, for example after changing
configuration options or after switching to another Buildroot release.
This script compares the file-size-stats.csv file generated by 'make graph-size'
with the corresponding file from another Buildroot compilation.
The size differences can be reported per package or per file.
Size differences smaller or equal than a given threshold can be ignored.
"""
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--detail', action='store_true',
help='''report differences for individual files rather than
packages''')
parser.add_argument('-t', '--threshold', type=int,
help='''ignore size differences smaller or equal than this
value (bytes)''')
parser.add_argument('old_file_size_csv', type=argparse.FileType('r'),
metavar='old-file-size-stats.csv',
help="""old CSV file with file and package size statistics,
generated by 'make graph-size'""")
parser.add_argument('new_file_size_csv', type=argparse.FileType('r'),
metavar='new-file-size-stats.csv',
help='new CSV file with file and package size statistics')
args = parser.parse_args()
if args.detail:
keyword = 'file'
else:
keyword = 'package'
old_sizes = read_file_size_csv(args.old_file_size_csv, args.detail)
new_sizes = read_file_size_csv(args.new_file_size_csv, args.detail)
delta = compare_sizes(old_sizes, new_sizes)
print('Size difference per %s (bytes), threshold = %s' % (keyword, args.threshold))
print(80*'-')
print_results(delta, args.threshold)
print(80*'-')
print_results({'TOTAL': ('', sum(new_sizes.values()) - sum(old_sizes.values()))},
threshold=None)

View File

@@ -0,0 +1,180 @@
#!/usr/bin/python
# This script generates a report on the packaging status of X.org
# releases in Buildroot. It does so by downloading the list of
# tarballs that are part of a given X.org release, and compare that
# with the packages that are available in Buildroot.
import BeautifulSoup
import re
import os
import urllib
from distutils.version import LooseVersion
# This can be customized
XORG_VERSION = "X11R7.7"
# Key names in dictionaries
XORG_VERSION_KEY = "xorg-version"
BR_VERSION_KEY = "br-version"
BR_NAME_KEY = "br-name"
# Packages part of X.org releases that we do not want to package in
# Buildroot (old drivers for hardware unlikely to be used in embedded
# contexts).
XORG_EXCEPTIONS = [
'xf86-video-suncg6',
'xf86-video-sunffb',
]
# Get the list of tarballs of a X.org release, parse it, and return a
# dictionary of dictionaries, of the form:
#
# { <name_of_package> : { XORG_VERSION_KEY: <version_of_package> },
# <name_of_package2> : { XORG_VERSION_KEY: <version_of_package2> }}
#
def get_xorg_release_pkgs():
u = urllib.URLopener().open("http://www.x.org/releases/%s/src/everything/" % XORG_VERSION)
b = BeautifulSoup.BeautifulSoup()
b.feed(u.read())
links = b.findAll("a")
packages = {}
r = re.compile("(.*)-([0-9\.]*).tar.bz2")
# We now have a list of all links.
for link in links:
href = link.get("href")
# Skip everything but tarballs
if not href.endswith(".tar.bz2"):
continue
# Separate the name and the version
groups = r.match(href)
if not groups:
continue
name = groups.group(1)
version = groups.group(2)
# Skip packages we don't want to hear about
if name in XORG_EXCEPTIONS:
continue
packages[name] = { XORG_VERSION_KEY : version }
return packages
# Files and directories in package/x11r7/ that should be ignored in
# our processing.
BUILDROOT_EXCEPTIONS = [
"mcookie", # Code is directly in package directory
"x11r7.mk",
"Config.in",
"xdriver_xf86-input-tslib", # From Pengutronix, not part of X.org releases
]
# Prefixes of directories in package/x11r7/ that must be stripped
# before trying to match Buildroot package names with X.org tarball
# names.
BUILDROOT_PREFIXES = [
"xapp",
"xdriver",
"xfont",
"xlib",
"xserver",
"xutil",
"xproto",
]
# From a Buildroot package name, try to see if a prefix should be
# stripped from it. For example, passing "xapp_xlsfonts" as argument
# to this function will return "xlsfonts".
def buildroot_strip_prefix(dirname):
for prefix in BUILDROOT_PREFIXES:
if dirname.startswith(prefix + "_"):
return dirname[len(prefix) + 1:]
return dirname
# From a Buildroot package name, parse its .mk file to find the
# Buildroot version of the package by looking at the <foo>_VERSION
# line.
def buildroot_get_version(dirname):
f = open(os.path.join("package", "x11r7", dirname, dirname + ".mk"))
r = re.compile("^([A-Z0-9_]*)_VERSION = ([0-9\.]*)$")
for l in f.readlines():
m = r.match(l)
if m:
return m.group(2)
return None
# Augment the information of the X.org list of packages (given as
# argument) by details about their packaging in Buildroot. Those
# details are found by looking at the contents of package/x11r7/.
def get_buildroot_pkgs(packages):
dirs = os.listdir(os.path.join(os.getcwd(), "package", "x11r7"))
for d in dirs:
# Skip exceptions
if d in BUILDROOT_EXCEPTIONS:
continue
pkgname = buildroot_strip_prefix(d)
version = buildroot_get_version(d)
if packages.has_key(pkgname):
# There is a X.org package of the same name, so we just
# add information to the existing dict entry.
packages[pkgname]['br-version'] = version
packages[pkgname]['br-name'] = d
else:
# There is no X.org package with this name, so we add a
# new dict entry.
packages[pkgname] = { BR_VERSION_KEY: version,
BR_NAME_KEY : d }
return packages
def show_summary(packages):
FORMAT_STRING = "%40s | %15s | %15s | %-30s"
print FORMAT_STRING % ("Package name", "Vers in BR", "Vers in X.org", "Action")
print FORMAT_STRING % ("-" * 40, "-" * 15, "-" * 15, "-" * 30)
pkgs = packages.keys()
pkgs.sort()
total_pkgs = 0
upgrade_pkgs = 0
add_pkgs = 0
remove_pkgs = 0
nothing_todo_pkgs = 0
for pkgname in pkgs:
pkg = packages[pkgname]
total_pkgs += 1
if pkg.has_key(XORG_VERSION_KEY) and not pkg.has_key(BR_VERSION_KEY):
xorg_version = pkg[XORG_VERSION_KEY]
br_version = "N/A"
action = "Add to Buildroot"
add_pkgs += 1
elif not pkg.has_key(XORG_VERSION_KEY) and pkg.has_key(BR_VERSION_KEY):
br_version = pkg[BR_VERSION_KEY]
xorg_version = "N/A"
action = "Remove from Buildroot"
remove_pkgs += 1
elif LooseVersion(pkg[XORG_VERSION_KEY]) > LooseVersion(pkg[BR_VERSION_KEY]):
br_version = pkg[BR_VERSION_KEY]
xorg_version = pkg[XORG_VERSION_KEY]
action = "Upgrade"
upgrade_pkgs += 1
elif LooseVersion(pkg[XORG_VERSION_KEY]) < LooseVersion(pkg[BR_VERSION_KEY]):
br_version = pkg[BR_VERSION_KEY]
xorg_version = pkg[XORG_VERSION_KEY]
action = "More recent"
nothing_todo_pkgs += 1
else:
br_version = pkg[BR_VERSION_KEY]
xorg_version = pkg[XORG_VERSION_KEY]
action = ""
nothing_todo_pkgs += 1
print FORMAT_STRING % (pkgname, br_version.center(15), xorg_version.center(15), action)
print FORMAT_STRING % ("-" * 40, "-" * 15, "-" * 15, "-" * 30)
STAT_FORMAT_STRING = "%40s : %3d"
print STAT_FORMAT_STRING % ("Total number of packages", total_pkgs)
print STAT_FORMAT_STRING % ("Packages to upgrade", upgrade_pkgs)
print STAT_FORMAT_STRING % ("Packages to add", add_pkgs)
print STAT_FORMAT_STRING % ("Packages to remove", remove_pkgs)
print STAT_FORMAT_STRING % ("Packages with nothing to do", nothing_todo_pkgs)
packages = get_xorg_release_pkgs()
packages = get_buildroot_pkgs(packages)
# print packages
show_summary(packages)