update buildroot to 2017.02.11

This commit is contained in:
jbnadal
2018-05-22 15:35:47 +02:00
parent 4bf1f5e091
commit a3c10bd762
9257 changed files with 433426 additions and 1701 deletions

View File

@@ -0,0 +1,168 @@
#!/usr/bin/env bash
# A little script I whipped up to make it easy to
# patch source trees and have sane error handling
# -Erik
#
# (c) 2002 Erik Andersen <andersen@codepoet.org>
#
# Parameters:
# - "-s", optional. Silent operation, don't print anything if there
# isn't any error.
# - the build directory, optional, default value is '.'. The place where are
# the package sources.
# - the patch directory, optional, default '../kernel-patches'. The place
# where are the scripts you want to apply.
# - other parameters are the patch name patterns, optional, default value is
# '*'. Pattern(s) describing the patch names you want to apply.
#
# The script will look recursively for patches from the patch directory. If a
# file named 'series' exists then the patches mentioned in it will be applied
# as plain patches, regardless of their file name. If no 'series' file exists,
# the script will look for file names matching pattern(s). If the name
# ends with '.tar.*', '.tbz2' or '.tgz', the file is considered as an archive
# and will be uncompressed into a directory named
# '.patches-name_of_the_archive-unpacked'. It's the turn of this directory to
# be scanned with '*' as pattern. Remember that scanning is recursive. Other
# files than series file and archives are considered as a patch.
#
# Once a patch is found, the script will try to apply it. If its name doesn't
# end with '.gz', '.bz', '.bz2', '.xz', '.zip', '.Z', '.diff*' or '.patch*',
# it will be skipped. If necessary, the patch will be uncompressed before being
# applied. The list of the patches applied is stored in '.applied_patches_list'
# file in the build directory.
set -e
silent=
if [ "$1" = "-s" ] ; then
# add option to be used by the patch tool
silent=-s
shift
fi
# Set directories from arguments, or use defaults.
builddir=${1-.}
patchdir=${2-../kernel-patches}
shift 2
patchpattern=${@-*}
# use a well defined sorting order
export LC_COLLATE=C
if [ ! -d "${builddir}" ] ; then
echo "Aborting. '${builddir}' is not a directory."
exit 1
fi
if [ ! -d "${patchdir}" ] ; then
echo "Aborting. '${patchdir}' is not a directory."
exit 1
fi
# Remove any rejects present BEFORE patching - Because if there are
# any, even if patches are well applied, at the end it will complain
# about rejects in builddir.
find ${builddir}/ '(' -name '*.rej' -o -name '.*.rej' ')' -print0 | \
xargs -0 -r rm -f
function apply_patch {
path="${1%%/}"
patch="${2}"
case "${path}" in
/*) ;;
*) path="$PWD/${path}";;
esac
if [ "$3" ]; then
type="series"; uncomp="cat"
else
case "$patch" in
*.gz)
type="gzip"; uncomp="gunzip -dc"; ;;
*.bz)
type="bzip"; uncomp="bunzip -dc"; ;;
*.bz2)
type="bzip2"; uncomp="bunzip2 -dc"; ;;
*.xz)
type="xz"; uncomp="unxz -dc"; ;;
*.zip)
type="zip"; uncomp="unzip -d"; ;;
*.Z)
type="compress"; uncomp="uncompress -c"; ;;
*.diff*)
type="diff"; uncomp="cat"; ;;
*.patch*)
type="patch"; uncomp="cat"; ;;
*)
echo "Unsupported file type for ${path}/${patch}, skipping";
return 0
;;
esac
fi
if [ -z "$silent" ] ; then
echo ""
echo "Applying $patch using ${type}: "
fi
if [ ! -e "${path}/$patch" ] ; then
echo "Error: missing patch file ${path}/$patch"
exit 1
fi
existing="$(grep -E "/${patch}\$" ${builddir}/.applied_patches_list || true)"
if [ -n "${existing}" ]; then
echo "Error: duplicate filename '${patch}'"
echo "Conflicting files are:"
echo " already applied: ${existing}"
echo " to be applied : ${path}/${patch}"
exit 1
fi
echo "${path}/${patch}" >> ${builddir}/.applied_patches_list
${uncomp} "${path}/$patch" | patch -g0 -p1 -E -d "${builddir}" -t -N $silent
if [ $? != 0 ] ; then
echo "Patch failed! Please fix ${patch}!"
exit 1
fi
}
function scan_patchdir {
local path=$1
shift 1
patches=${@-*}
# If there is a series file, use it instead of using ls sort order
# to apply patches. Skip line starting with a dash.
if [ -e "${path}/series" ] ; then
# The format of a series file accepts a second field that is
# used to specify the number of directory components to strip
# when applying the patch, in the form -pN (N an integer >= 0)
# We assume this field to always be -p1 whether it is present
# or missing.
series_patches="`grep -Ev "^#" ${path}/series | cut -d ' ' -f1 2> /dev/null`"
for i in $series_patches; do
apply_patch "$path" "$i" series
done
else
for i in `cd $path; ls -d $patches 2> /dev/null` ; do
if [ -d "${path}/$i" ] ; then
scan_patchdir "${path}/$i"
elif echo "$i" | grep -q -E "\.tar(\..*)?$|\.tbz2?$|\.tgz$" ; then
unpackedarchivedir="$builddir/.patches-$(basename $i)-unpacked"
rm -rf "$unpackedarchivedir" 2> /dev/null
mkdir "$unpackedarchivedir"
tar -C "$unpackedarchivedir" -xaf "${path}/$i"
scan_patchdir "$unpackedarchivedir"
else
apply_patch "$path" "$i"
fi
done
fi
}
touch ${builddir}/.applied_patches_list
scan_patchdir "$patchdir" "$patchpattern"
# Check for rejects...
if [ "`find $builddir/ '(' -name '*.rej' -o -name '.*.rej' ')' -print`" ] ; then
echo "Aborting. Reject files found."
exit 1
fi
# Remove backup files
find $builddir/ '(' -name '*.orig' -o -name '.*.orig' ')' -exec rm -f {} \;

View File

@@ -0,0 +1,224 @@
#!/bin/bash
set -e
# This script must be able to run with bash-3.1, so it can't use
# associative arrays. Instead, it emulates them using 'eval'. It
# can however use indexed arrays, supported since at least bash-3.0.
# The names of the br2-external trees, once validated.
declare -a BR2_EXT_NAMES
# URL to manual for help in converting old br2-external trees.
# Escape '#' so that make does not consider it a comment.
MANUAL_URL='https://buildroot.org/manual.html\#br2-external-converting'
main() {
local OPT OPTARG
local br2_ext ofile ofmt
while getopts :hkmo: OPT; do
case "${OPT}" in
h) help; exit 0;;
o) ofile="${OPTARG}";;
k) ofmt="kconfig";;
m) ofmt="mk";;
:) error "option '%s' expects a mandatory argument\n" "${OPTARG}";;
\?) error "unknown option '%s'\n" "${OPTARG}";;
esac
done
# Forget options; keep only positional args
shift $((OPTIND-1))
case "${ofmt}" in
mk|kconfig)
;;
*) error "no output format specified (-m/-k)\n";;
esac
if [ -z "${ofile}" ]; then
error "no output file specified (-o)\n"
fi
exec >"${ofile}"
# Trap any unexpected error to generate a meaningful error message
trap "error 'unexpected error while generating ${ofile}\n'" ERR
do_validate ${@//:/ }
do_${ofmt}
}
# Validates the br2-external trees passed as arguments. Makes each of
# them canonical and store them in the global arrays BR2_EXT_NAMES
# and BR2_EXT_PATHS.
#
# Note: since this script is always first called from Makefile context
# to generate the Makefile fragment before it is called to generate the
# Kconfig snippet, we're sure that any error in do_validate will be
# interpreted in Makefile context. Going up to generating the Kconfig
# snippet means that there were no error.
#
do_validate() {
local br2_ext
if [ ${#} -eq 0 ]; then
# No br2-external tree is valid
return
fi
for br2_ext in "${@}"; do
do_validate_one "${br2_ext}"
done
}
do_validate_one() {
local br2_ext="${1}"
local br2_name br2_desc n d
if [ ! -d "${br2_ext}" ]; then
error "'%s': no such file or directory\n" "${br2_ext}"
fi
if [ ! -r "${br2_ext}" -o ! -x "${br2_ext}" ]; then
error "'%s': permission denied\n" "${br2_ext}"
fi
if [ ! -f "${br2_ext}/external.desc" ]; then
error "'%s': does not have a name (in 'external.desc'). See %s\n" \
"${br2_ext}" "${MANUAL_URL}"
fi
br2_name="$(sed -r -e '/^name: +(.*)$/!d; s//\1/' "${br2_ext}/external.desc")"
if [ -z "${br2_name}" ]; then
error "'%s/external.desc': does not define the name\n" "${br2_ext}"
fi
# Only ASCII chars in [A-Za-z0-9_] are permitted
n="$(sed -r -e 's/[A-Za-z0-9_]//g' <<<"${br2_name}" )"
if [ -n "${n}" ]; then
# Escape '$' so that it gets printed
error "'%s': name '%s' contains invalid chars: '%s'\n" \
"${br2_ext}" "${br2_name//\$/\$\$}" "${n//\$/\$\$}"
fi
eval d="\"\${BR2_EXT_PATHS_${br2_name}}\""
if [ -n "${d}" ]; then
error "'%s': name '%s' is already used in '%s'\n" \
"${br2_ext}" "${br2_name}" "${d}"
fi
br2_desc="$(sed -r -e '/^desc: +(.*)$/!d; s//\1/' "${br2_ext}/external.desc")"
if [ ! -f "${br2_ext}/external.mk" ]; then
error "'%s/external.mk': no such file or directory\n" "${br2_ext}"
fi
if [ ! -f "${br2_ext}/Config.in" ]; then
error "'%s/Config.in': no such file or directory\n" "${br2_ext}"
fi
# Register this br2-external tree, use an absolute canonical path
br2_ext="$( cd "${br2_ext}"; pwd )"
BR2_EXT_NAMES+=( "${br2_name}" )
eval BR2_EXT_PATHS_${br2_name}="\"\${br2_ext}\""
eval BR2_EXT_DESCS_${br2_name}="\"\${br2_desc:-\${br2_name}}\""
}
# Generate the .mk snippet that defines makefile variables
# for the br2-external tree
do_mk() {
local br2_name br2_ext
printf '#\n# Automatically generated file; DO NOT EDIT.\n#\n'
printf '\n'
printf 'BR2_EXTERNAL ?='
for br2_name in "${BR2_EXT_NAMES[@]}"; do
eval br2_ext="\"\${BR2_EXT_PATHS_${br2_name}}\""
printf ' %s' "${br2_ext}"
done
printf '\n'
printf 'BR2_EXTERNAL_NAMES = \n'
printf 'BR2_EXTERNAL_DIRS = \n'
printf 'BR2_EXTERNAL_MKS = \n'
if [ ${#BR2_EXT_NAMES[@]} -eq 0 ]; then
printf '\n'
printf '# No br2-external tree defined.\n'
return
fi
for br2_name in "${BR2_EXT_NAMES[@]}"; do
eval br2_desc="\"\${BR2_EXT_DESCS_${br2_name}}\""
eval br2_ext="\"\${BR2_EXT_PATHS_${br2_name}}\""
printf '\n'
printf 'BR2_EXTERNAL_NAMES += %s\n' "${br2_name}"
printf 'BR2_EXTERNAL_DIRS += %s\n' "${br2_ext}"
printf 'BR2_EXTERNAL_MKS += %s/external.mk\n' "${br2_ext}"
printf 'export BR2_EXTERNAL_%s_PATH = %s\n' "${br2_name}" "${br2_ext}"
printf 'export BR2_EXTERNAL_%s_DESC = %s\n' "${br2_name}" "${br2_desc}"
done
}
# Generate the kconfig snippet for the br2-external tree.
do_kconfig() {
local br2_name br2_ext
printf '#\n# Automatically generated file; DO NOT EDIT.\n#\n'
printf '\n'
if [ ${#BR2_EXT_NAMES[@]} -eq 0 ]; then
printf '# No br2-external tree defined.\n'
return
fi
printf 'menu "External options"\n'
printf '\n'
for br2_name in "${BR2_EXT_NAMES[@]}"; do
eval br2_desc="\"\${BR2_EXT_DESCS_${br2_name}}\""
eval br2_ext="\"\${BR2_EXT_PATHS_${br2_name}}\""
if [ ${#BR2_EXT_NAMES[@]} -gt 1 ]; then
printf 'menu "%s"\n' "${br2_desc}"
fi
printf 'comment "%s (in %s)"\n' "${br2_desc}" "${br2_ext}"
printf 'config BR2_EXTERNAL_%s_PATH\n' "${br2_name}"
printf '\tstring\n'
printf '\tdefault "%s"\n' "${br2_ext}"
printf 'source "%s/Config.in"\n' "${br2_ext}"
if [ ${#BR2_EXT_NAMES[@]} -gt 1 ]; then
printf 'endmenu # %s\n' "${br2_name}"
fi
printf '\n'
done
printf "endmenu # User-provided options\n"
}
help() {
cat <<-_EOF_
Usage:
${my_name} <-m|-k> -o FILE PATH
With -m, ${my_name} generates the makefile fragment that defines
variables related to the br2-external trees passed as positional
arguments.
With -k, ${my_name} generates the kconfig snippet to include the
configuration options specified in the br2-external trees passed
as positional arguments.
Using -k and -m together is not possible. The last one wins.
Options:
-m Generate the makefile fragment.
-k Generate the kconfig snippet.
-o FILE
FILE in which to generate the kconfig snippet or makefile
fragment.
Returns:
0 If no error
!0 If any error
_EOF_
}
error() { local fmt="${1}"; shift; printf "BR2_EXTERNAL_ERROR = ${fmt}" "${@}"; exit 1; }
my_name="${0##*/}"
main "${@}"

View File

@@ -0,0 +1,62 @@
# Copyright (C) 2010-2013 Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
import sys
import subprocess
# Execute the "make <pkg>-show-version" command to get the version of a given
# list of packages, and return the version formatted as a Python dictionary.
def get_version(pkgs):
sys.stderr.write("Getting version for %s\n" % pkgs)
cmd = ["make", "-s", "--no-print-directory" ]
for pkg in pkgs:
cmd.append("%s-show-version" % pkg)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
output = p.communicate()[0]
if p.returncode != 0:
sys.stderr.write("Error getting version %s\n" % pkgs)
sys.exit(1)
output = output.split("\n")
if len(output) != len(pkgs) + 1:
sys.stderr.write("Error getting version\n")
sys.exit(1)
version = {}
for i in range(0, len(pkgs)):
pkg = pkgs[i]
version[pkg] = output[i]
return version
def _get_depends(pkgs, rule):
sys.stderr.write("Getting dependencies for %s\n" % pkgs)
cmd = ["make", "-s", "--no-print-directory" ]
for pkg in pkgs:
cmd.append("%s-%s" % (pkg, rule))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
output = p.communicate()[0]
if p.returncode != 0:
sys.stderr.write("Error getting dependencies %s\n" % pkgs)
sys.exit(1)
output = output.split("\n")
if len(output) != len(pkgs) + 1:
sys.stderr.write("Error getting dependencies\n")
sys.exit(1)
deps = {}
for i in range(0, len(pkgs)):
pkg = pkgs[i]
pkg_deps = output[i].split(" ")
if pkg_deps == ['']:
deps[pkg] = []
else:
deps[pkg] = pkg_deps
return deps
# Execute the "make <pkg>-show-depends" command to get the list of
# dependencies of a given list of packages, and return the list of
# dependencies formatted as a Python dictionary.
def get_depends(pkgs):
return _get_depends(pkgs, 'show-depends')
# Execute the "make <pkg>-show-rdepends" command to get the list of
# reverse dependencies of a given list of packages, and return the
# list of dependencies formatted as a Python dictionary.
def get_rdepends(pkgs):
return _get_depends(pkgs, 'show-rdepends')

View File

@@ -0,0 +1,71 @@
#!/usr/bin/env bash
# This script scans $(HOST_DIR)/{bin,sbin} for all ELF files, and checks
# they have an RPATH to $(HOST_DIR)/usr/lib if they need libraries from
# there.
# Override the user's locale so we are sure we can parse the output of
# readelf(1) and file(1)
export LC_ALL=C
main() {
local pkg="${1}"
local hostdir="${2}"
local file ret
# Remove duplicate and trailing '/' for proper match
hostdir="$( sed -r -e 's:/+:/:g; s:/$::;' <<<"${hostdir}" )"
ret=0
while read file; do
elf_needs_rpath "${file}" "${hostdir}" || continue
check_elf_has_rpath "${file}" "${hostdir}" && continue
if [ ${ret} -eq 0 ]; then
ret=1
printf "***\n"
printf "*** ERROR: package %s installs executables without proper RPATH:\n" "${pkg}"
fi
printf "*** %s\n" "${file}"
done < <( find "${hostdir}"/{,usr/}{bin,sbin} -type f -exec file {} + 2>/dev/null \
|sed -r -e '/^([^:]+):.*\<ELF\>.*\<executable\>.*/!d' \
-e 's//\1/' \
)
return ${ret}
}
elf_needs_rpath() {
local file="${1}"
local hostdir="${2}"
local lib
while read lib; do
[ -e "${hostdir}/usr/lib/${lib}" ] && return 0
done < <( readelf -d "${file}" \
|sed -r -e '/^.* \(NEEDED\) .*Shared library: \[(.+)\]$/!d;' \
-e 's//\1/;' \
)
return 1
}
check_elf_has_rpath() {
local file="${1}"
local hostdir="${2}"
local rpath dir
while read rpath; do
for dir in ${rpath//:/ }; do
# Remove duplicate and trailing '/' for proper match
dir="$( sed -r -e 's:/+:/:g; s:/$::;' <<<"${dir}" )"
[ "${dir}" = "${hostdir}/usr/lib" ] && return 0
done
done < <( readelf -d "${file}" \
|sed -r -e '/.* \(R(UN)?PATH\) +Library r(un)?path: \[(.+)\]$/!d' \
-e 's//\3/;' \
)
return 1
}
main "${@}"

View File

@@ -0,0 +1,41 @@
#!/bin/sh
SYSROOT="${1}"
# Make sure we have enough version components
HDR_VER="${2}.0.0"
HDR_M="${HDR_VER%%.*}"
HDR_V="${HDR_VER#*.}"
HDR_m="${HDR_V%%.*}"
EXEC="$(mktemp -t check-headers.XXXXXX)"
# We do not want to account for the patch-level, since headers are
# not supposed to change for different patchlevels, so we mask it out.
# This only applies to kernels >= 3.0, but those are the only one
# we actually care about; we treat all 2.6.x kernels equally.
${HOSTCC} -imacros "${SYSROOT}/usr/include/linux/version.h" \
-x c -o "${EXEC}" - <<_EOF_
#include <stdio.h>
#include <stdlib.h>
int main(int argc __attribute__((unused)),
char** argv __attribute__((unused)))
{
if((LINUX_VERSION_CODE & ~0xFF)
!= KERNEL_VERSION(${HDR_M},${HDR_m},0))
{
printf("Incorrect selection of kernel headers: ");
printf("expected %d.%d.x, got %d.%d.x\n", ${HDR_M}, ${HDR_m},
((LINUX_VERSION_CODE>>16) & 0xFF),
((LINUX_VERSION_CODE>>8) & 0xFF));
return 1;
}
return 0;
}
_EOF_
"${EXEC}"
ret=${?}
rm -f "${EXEC}"
exit ${ret}

View File

@@ -0,0 +1,76 @@
#!/bin/sh
# This script registers the toolchain of a Buildroot project into the
# Eclipse plugin. To do so, it adds a new line for the Buildroot
# toolchain into the $HOME/.buildroot-eclipse.toolchains file, which
# the Eclipse Buildroot plugin reads to discover automatically the
# available Buildroot toolchains on the system.
#
# This script should typically not be called manually. Instead, one
# should enable the BR2_ECLIPSE_REGISTER configuration option, which
# will lead Buildroot to automatically call this script with the
# appropriate arguments.
#
# Usage:
# eclipse-register-toolchain project-directory toolchain-prefix architecture
#
# project-directory is the absolute path to the Buildroot project
# output directory (which contains the host/, target/, build/,
# images/, etc. subdirectories). It should be an absolute and
# canonical path.
#
# toolchain-prefix is the prefix of the cross-compilation tools, i.e
# 'arm-linux-' if the cross-compiler executable is 'arm-linux-gcc'.
#
# architecture is the lower-cased name of the architecture targetted
# by the Buildroot project.
if test $# -ne 3; then
echo "Invalid number of arguments."
echo "Usage: $0 project-directory toolchain-prefix architecture"
exit 1
fi
project_directory=$1
toolchain_prefix=$2
architecture=$3
if test ! -d ${project_directory} ; then
echo "Non-existing project directory ${project_directory}"
exit 1
fi
if test ! -d ${project_directory}/host ; then
echo "Your project directory does not look like a Buildroot output"
exit 1
fi
if test ! -e ${project_directory}/host/usr/bin/${toolchain_prefix}gcc ; then
echo "Cannot find the cross-compiler in the project directory"
exit 1
fi
TOOLCHAIN_ECLIPSE_FILE=${HOME}/.buildroot-eclipse.toolchains
# First, we remove all lines from the ${TOOLCHAIN_ECLISPE_FILE} that
# correspond to toolchains that no longer exist.
if test -f ${TOOLCHAIN_ECLIPSE_FILE} ; then
mv ${TOOLCHAIN_ECLIPSE_FILE} ${TOOLCHAIN_ECLIPSE_FILE}.tmp
cat ${TOOLCHAIN_ECLIPSE_FILE}.tmp | while read toolchain ; do
path=$(echo ${toolchain} | cut -f1 -d ':')
# Filter lines corresponding to still existing projects
echo "Testing ${path} ..."
if ! test -d ${path} ; then
continue
fi
# .. and the current project
if test ${path} = ${project_directory} ; then
continue
fi
echo ${toolchain} >> ${TOOLCHAIN_ECLIPSE_FILE}
done
rm ${TOOLCHAIN_ECLIPSE_FILE}.tmp
fi
# Add the toolchain
echo "${project_directory}:${toolchain_prefix}:${architecture}" >> ${TOOLCHAIN_ECLIPSE_FILE}

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
# This script is used to generate a gconv-modules file that takes into
# account only the gconv modules installed by Buildroot. It receives
# on its standard input the original complete gconv-modules file from
# the toolchain, and as arguments the list of gconv modules that were
# actually installed, and writes on its standard output the new
# gconv-modules file.
# The format of gconv-modules is precisely documented in the
# file itself. It consists of two different directives:
# module FROMSET TOSET FILENAME COST
# alias ALIAS REALNAME
# and that's what this script parses and generates.
#
# There are two kinds of 'module' directives:
# - the first defines conversion of a charset to/from INTERNAL representation
# - the second defines conversion of a charset to/from another charset
# we handle each with slightly different code, since the second never has
# associated aliases.
gawk -v files="${1}" '
$1 == "alias" {
aliases[$3] = aliases[$3] " " $2;
}
$1 == "module" && $2 != "INTERNAL" && $3 == "INTERNAL" {
file2internals[$4] = file2internals[$4] " " $2;
mod2cost[$2] = $5;
}
$1 == "module" && $2 != "INTERNAL" && $3 != "INTERNAL" {
file2cset[$4] = file2cset[$4] " " $2 ":" $3;
mod2cost[$2] = $5;
}
END {
nb_files = split(files, all_files);
for(f = 1; f <= nb_files; f++) {
file = all_files[f];
printf("# Modules and aliases for: %s\n", file);
nb_mods = split(file2internals[file], mods);
for(i = 1; i <= nb_mods; i++) {
nb_aliases = split(aliases[mods[i]], mod_aliases);
for(j = 1; j <= nb_aliases; j++) {
printf("alias\t%s\t%s\n", mod_aliases[j], mods[i]);
}
printf("module\t%s\t%s\t%s\t%d\n", mods[i], "INTERNAL", file, mod2cost[mods[i]]);
printf("module\t%s\t%s\t%s\t%d\n", "INTERNAL", mods[i], file, mod2cost[mods[i]]);
printf("\n" );
}
printf("%s", nb_mods != 0 ? "\n" : "");
nb_csets = split(file2cset[file], csets);
for(i = 1; i <= nb_csets; i++) {
split(csets[i], cs, ":");
printf("module\t%s\t%s\t%s\t%d\n", cs[1], cs[2], file, mod2cost[cs[1]]);
}
printf("%s", nb_csets != 0 ? "\n\n" : "");
}
}
'

View File

@@ -0,0 +1,47 @@
#!/bin/bash
# This is a script to find, and correct, a problem with old versions of
# configure that affect powerpc64 and powerpc64le.
# The issue causes configure to incorrectly determine that shared library
# support is not present in the linker. This causes the package to build a
# static library rather than a dynamic one and although the build will succeed,
# it may cause packages that link with the static library it to fail due to
# undefined symbols.
# This script searches for files named 'configure' that appear to have this
# issue (by searching for a known bad pattern) and patching them.
set -e
if [ $# -ne 1 ]; then
echo "Usage: $0 <package build directory>"
exit 2
fi
srcdir="$1"
files=$(cd "$srcdir" && find . -name configure \
-exec grep -qF 'Generated by GNU Autoconf' {} \; \
-exec grep -qF 'ppc*-*linux*|powerpc*-*linux*)' {} \; -print)
# --ignore-whitespace is needed because some packages have included
# copies of configure scripts where tabs have been replaced with spaces.
for c in $files; do
patch --ignore-whitespace "$srcdir"/"$c" <<'EOF'
--- a/configure 2016-11-16 15:31:46.097447271 +1100
+++ b/configure 2008-07-21 12:17:23.000000000 +1000
@@ -4433,7 +4433,10 @@
x86_64-*linux*)
LD="${LD-ld} -m elf_x86_64"
;;
- ppc*-*linux*|powerpc*-*linux*)
+ powerpcle-*linux*)
+ LD="${LD-ld} -m elf64lppc"
+ ;;
+ powerpc-*linux*)
LD="${LD-ld} -m elf64ppc"
;;
s390*-*linux*)
EOF
done

View File

@@ -0,0 +1,83 @@
#!/usr/bin/env python
import argparse
import getdeveloperlib
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('patches', metavar='P', type=argparse.FileType('r'), nargs='*',
help='list of patches (use - to read patches from stdin)')
parser.add_argument('-a', dest='architecture', action='store',
help='find developers in charge of this architecture')
parser.add_argument('-p', dest='package', action='store',
help='find developers in charge of this package')
parser.add_argument('-c', dest='check', action='store_const',
const=True, help='list files not handled by any developer')
return parser.parse_args()
def __main__():
devs = getdeveloperlib.parse_developers()
if devs is None:
sys.exit(1)
args = parse_args()
# Check that only one action is given
action = 0
if args.architecture is not None:
action += 1
if args.package is not None:
action += 1
if args.check:
action += 1
if len(args.patches) != 0:
action += 1
if action > 1:
print("Cannot do more than one action")
return
if action == 0:
print("No action specified")
return
# Handle the check action
if args.check:
files = getdeveloperlib.check_developers(devs)
for f in files:
print(f)
# Handle the architecture action
if args.architecture is not None:
for dev in devs:
if args.architecture in dev.architectures:
print(dev.name)
return
# Handle the package action
if args.package is not None:
for dev in devs:
if args.package in dev.packages:
print(dev.name)
return
# Handle the patches action
if len(args.patches) != 0:
(files, infras) = getdeveloperlib.analyze_patches(args.patches)
matching_devs = set()
for dev in devs:
# See if we have developers matching by package name
for f in files:
if dev.hasfile(f):
matching_devs.add(dev.name)
# See if we have developers matching by package infra
for i in infras:
if i in dev.infras:
matching_devs.add(dev.name)
result = "--to buildroot@buildroot.org"
for dev in matching_devs:
result += " --cc \"%s\"" % dev
if result != "":
print("git send-email %s" % result)
__main__()

View File

@@ -0,0 +1,200 @@
import sys
import os
import re
import argparse
import glob
import subprocess
#
# Patch parsing functions
#
FIND_INFRA_IN_PATCH = re.compile("^\+\$\(eval \$\((host-)?([^-]*)-package\)\)$")
def analyze_patch(patch):
"""Parse one patch and return the list of files modified, added or
removed by the patch."""
files = set()
infras = set()
for line in patch:
# If the patch is adding a package, find which infra it is
m = FIND_INFRA_IN_PATCH.match(line)
if m:
infras.add(m.group(2))
if not line.startswith("+++ "):
continue
line.strip()
fname = line[line.find("/") + 1 : ].strip()
if fname == "dev/null":
continue
files.add(fname)
return (files, infras)
FIND_INFRA_IN_MK = re.compile("^\$\(eval \$\((host-)?([^-]*)-package\)\)$")
def fname_get_package_infra(fname):
"""Checks whether the file name passed as argument is a Buildroot .mk
file describing a package, and find the infrastructure it's using."""
if not fname.endswith(".mk"):
return None
if not os.path.exists(fname):
return None
with open(fname, "r") as f:
for l in f:
l = l.strip()
m = FIND_INFRA_IN_MK.match(l)
if m:
return m.group(2)
return None
def get_infras(files):
"""Search in the list of files for .mk files, and collect the package
infrastructures used by those .mk files."""
infras = set()
for fname in files:
infra = fname_get_package_infra(fname)
if infra:
infras.add(infra)
return infras
def analyze_patches(patches):
"""Parse a list of patches and returns the list of files modified,
added or removed by the patches, as well as the list of package
infrastructures used by those patches (if any)"""
allfiles = set()
allinfras = set()
for patch in patches:
(files, infras) = analyze_patch(patch)
allfiles = allfiles | files
allinfras = allinfras | infras
allinfras = allinfras | get_infras(allfiles)
return (allfiles, allinfras)
#
# DEVELOPERS file parsing functions
#
class Developer:
def __init__(self, name, files):
self.name = name
self.files = files
self.packages = parse_developer_packages(files)
self.architectures = parse_developer_architectures(files)
self.infras = parse_developer_infras(files)
def hasfile(self, f):
f = os.path.abspath(f)
for fs in self.files:
if f.startswith(fs):
return True
return False
def parse_developer_packages(fnames):
"""Given a list of file patterns, travel through the Buildroot source
tree to find which packages are implemented by those file
patterns, and return a list of those packages."""
packages = set()
for fname in fnames:
for root, dirs, files in os.walk(fname):
for f in files:
path = os.path.join(root, f)
if fname_get_package_infra(path):
pkg = os.path.splitext(f)[0]
packages.add(pkg)
return packages
def parse_arches_from_config_in(fname):
"""Given a path to an arch/Config.in.* file, parse it to get the list
of BR2_ARCH values for this architecture."""
arches = set()
with open(fname, "r") as f:
parsing_arches = False
for l in f:
l = l.strip()
if l == "config BR2_ARCH":
parsing_arches = True
continue
if parsing_arches:
m = re.match("^\s*default \"([^\"]*)\".*", l)
if m:
arches.add(m.group(1))
else:
parsing_arches = False
return arches
def parse_developer_architectures(fnames):
"""Given a list of file names, find the ones starting by
'arch/Config.in.', and use that to determine the architecture a
developer is working on."""
arches = set()
for fname in fnames:
if not re.match("^.*/arch/Config\.in\..*$", fname):
continue
arches = arches | parse_arches_from_config_in(fname)
return arches
def parse_developer_infras(fnames):
infras = set()
for fname in fnames:
m = re.match("^package/pkg-([^.]*).mk$", fname)
if m:
infras.add(m.group(1))
return infras
def parse_developers(basepath=None):
"""Parse the DEVELOPERS file and return a list of Developer objects."""
developers = []
linen = 0
if basepath == None:
basepath = os.getcwd()
with open(os.path.join(basepath, "DEVELOPERS"), "r") as f:
files = []
name = None
for l in f:
l = l.strip()
if l.startswith("#"):
continue
elif l.startswith("N:"):
if name is not None or len(files) != 0:
print("Syntax error in DEVELOPERS file, line %d" % linen)
name = l[2:].strip()
elif l.startswith("F:"):
fname = l[2:].strip()
dev_files = glob.glob(os.path.join(basepath, fname))
if len(dev_files) == 0:
print("WARNING: '%s' doesn't match any file" % fname)
files += dev_files
elif l == "":
if not name:
continue
developers.append(Developer(name, files))
files = []
name = None
else:
print("Syntax error in DEVELOPERS file, line %d: '%s'" % (linen, l))
return None
linen += 1
# handle last developer
if name is not None:
developers.append(Developer(name, files))
return developers
def check_developers(developers, basepath=None):
"""Look at the list of files versioned in Buildroot, and returns the
list of files that are not handled by any developer"""
if basepath == None:
basepath = os.getcwd()
cmd = ["git", "--git-dir", os.path.join(basepath, ".git"), "ls-files"]
files = subprocess.check_output(cmd).strip().split("\n")
unhandled_files = []
for f in files:
handled = False
for d in developers:
if d.hasfile(os.path.join(basepath, f)):
handled = True
break
if not handled:
unhandled_files.append(f)
return unhandled_files

View File

@@ -0,0 +1,306 @@
#!/usr/bin/env python
# Copyright (C) 2011 by Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
# Copyright (C) 2013 by Yann E. MORIN <yann.morin.1998@free.fr>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# This script generates graphs of packages build time, from the timing
# data generated by Buildroot in the $(O)/build-time.log file.
#
# Example usage:
#
# cat $(O)/build-time.log | ./support/scripts/graph-build-time --type=histogram --output=foobar.pdf
#
# Three graph types are available :
#
# * histogram, which creates an histogram of the build time for each
# package, decomposed by each step (extract, patch, configure,
# etc.). The order in which the packages are shown is
# configurable: by package name, by build order, or by duration
# order. See the --order option.
#
# * pie-packages, which creates a pie chart of the build time of
# each package (without decomposition in steps). Packages that
# contributed to less than 1% of the overall build time are all
# grouped together in an "Other" entry.
#
# * pie-steps, which creates a pie chart of the time spent globally
# on each step (extract, patch, configure, etc...)
#
# The default is to generate an histogram ordered by package name.
#
# Requirements:
#
# * matplotlib (python-matplotlib on Debian/Ubuntu systems)
# * numpy (python-numpy on Debian/Ubuntu systems)
# * argparse (by default in Python 2.7, requires python-argparse if
# Python 2.6 is used)
import sys
try:
import matplotlib as mpl
import numpy
except ImportError:
sys.stderr.write("You need python-matplotlib and python-numpy to generate build graphs\n")
exit(1)
# Use the Agg backend (which produces a PNG output, see
# http://matplotlib.org/faq/usage_faq.html#what-is-a-backend),
# otherwise an incorrect backend is used on some host machines).
# Note: matplotlib.use() must be called *before* matplotlib.pyplot.
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import csv
import argparse
steps = [ 'extract', 'patch', 'configure', 'build',
'install-target', 'install-staging', 'install-images',
'install-host']
default_colors = ['#e60004', '#009836', '#2e1d86', '#ffed00',
'#0068b5', '#f28e00', '#940084', '#97c000']
alternate_colors = ['#00e0e0', '#3f7f7f', '#ff0000', '#00c000',
'#0080ff', '#c000ff', '#00eeee', '#e0e000']
class Package:
def __init__(self, name):
self.name = name
self.steps_duration = {}
self.steps_start = {}
self.steps_end = {}
def add_step(self, step, state, time):
if state == "start":
self.steps_start[step] = time
else:
self.steps_end[step] = time
if step in self.steps_start and step in self.steps_end:
self.steps_duration[step] = self.steps_end[step] - self.steps_start[step]
def get_duration(self, step=None):
if step is None:
duration = 0
for step in list(self.steps_duration.keys()):
duration += self.steps_duration[step]
return duration
if step in self.steps_duration:
return self.steps_duration[step]
return 0
# Generate an histogram of the time spent in each step of each
# package.
def pkg_histogram(data, output, order="build"):
n_pkgs = len(data)
ind = numpy.arange(n_pkgs)
if order == "duration":
data = sorted(data, key=lambda p: p.get_duration(), reverse=True)
elif order == "name":
data = sorted(data, key=lambda p: p.name, reverse=False)
# Prepare the vals array, containing one entry for each step
vals = []
for step in steps:
val = []
for p in data:
val.append(p.get_duration(step))
vals.append(val)
bottom = [0] * n_pkgs
legenditems = []
plt.figure()
# Draw the bars, step by step
for i in range(0, len(vals)):
b = plt.bar(ind+0.1, vals[i], width=0.8, color=colors[i], bottom=bottom, linewidth=0.25)
legenditems.append(b[0])
bottom = [ bottom[j] + vals[i][j] for j in range(0, len(vals[i])) ]
# Draw the package names
plt.xticks(ind + .6, [ p.name for p in data ], rotation=-60, rotation_mode="anchor", fontsize=8, ha='left')
# Adjust size of graph depending on the number of packages
# Ensure a minimal size twice as the default
# Magic Numbers do Magic Layout!
ratio = max(((n_pkgs + 10) / 48, 2))
borders = 0.1 / ratio
sz = plt.gcf().get_figwidth()
plt.gcf().set_figwidth(sz * ratio)
# Adjust space at borders, add more space for the
# package names at the bottom
plt.gcf().subplots_adjust(bottom=0.2, left=borders, right=1-borders)
# Remove ticks in the graph for each package
axes = plt.gcf().gca()
for line in axes.get_xticklines():
line.set_markersize(0)
axes.set_ylabel('Time (seconds)')
# Reduce size of legend text
leg_prop = fm.FontProperties(size=6)
# Draw legend
plt.legend(legenditems, steps, prop=leg_prop)
if order == "name":
plt.title('Build time of packages\n')
elif order == "build":
plt.title('Build time of packages, by build order\n')
elif order == "duration":
plt.title('Build time of packages, by duration order\n')
# Save graph
plt.savefig(output)
# Generate a pie chart with the time spent building each package.
def pkg_pie_time_per_package(data, output):
# Compute total build duration
total = 0
for p in data:
total += p.get_duration()
# Build the list of labels and values, and filter the packages
# that account for less than 1% of the build time.
labels = []
values = []
other_value = 0
for p in data:
if p.get_duration() < (total * 0.01):
other_value += p.get_duration()
else:
labels.append(p.name)
values.append(p.get_duration())
labels.append('Other')
values.append(other_value)
plt.figure()
# Draw pie graph
patches, texts, autotexts = plt.pie(values, labels=labels,
autopct='%1.1f%%', shadow=True,
colors=colors)
# Reduce text size
proptease = fm.FontProperties()
proptease.set_size('xx-small')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.title('Build time per package')
plt.savefig(output)
# Generate a pie chart with a portion for the overall time spent in
# each step for all packages.
def pkg_pie_time_per_step(data, output):
steps_values = []
for step in steps:
val = 0
for p in data:
val += p.get_duration(step)
steps_values.append(val)
plt.figure()
# Draw pie graph
patches, texts, autotexts = plt.pie(steps_values, labels=steps,
autopct='%1.1f%%', shadow=True,
colors=colors)
# Reduce text size
proptease = fm.FontProperties()
proptease.set_size('xx-small')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.title('Build time per step')
plt.savefig(output)
# Parses the csv file passed on standard input and returns a list of
# Package objects, filed with the duration of each step and the total
# duration of the package.
def read_data(input_file):
if input_file is None:
input_file = sys.stdin
else:
input_file = open(input_file)
reader = csv.reader(input_file, delimiter=':')
pkgs = []
# Auxilliary function to find a package by name in the list.
def getpkg(name):
for p in pkgs:
if p.name == name:
return p
return None
for row in reader:
time = int(row[0].strip())
state = row[1].strip()
step = row[2].strip()
pkg = row[3].strip()
p = getpkg(pkg)
if p is None:
p = Package(pkg)
pkgs.append(p)
p.add_step(step, state, time)
return pkgs
parser = argparse.ArgumentParser(description='Draw build time graphs')
parser.add_argument("--type", '-t', metavar="GRAPH_TYPE",
help="Type of graph (histogram, pie-packages, pie-steps)")
parser.add_argument("--order", '-O', metavar="GRAPH_ORDER",
help="Ordering of packages: build or duration (for histogram only)")
parser.add_argument("--alternate-colors", '-c', action="store_true",
help="Use alternate colour-scheme")
parser.add_argument("--input", '-i', metavar="INPUT",
help="Input file (usually $(O)/build/build-time.log)")
parser.add_argument("--output", '-o', metavar="OUTPUT", required=True,
help="Output file (.pdf or .png extension)")
args = parser.parse_args()
d = read_data(args.input)
if args.alternate_colors:
colors = alternate_colors
else:
colors = default_colors
if args.type == "histogram" or args.type is None:
if args.order == "build" or args.order == "duration" or args.order == "name":
pkg_histogram(d, args.output, args.order)
elif args.order is None:
pkg_histogram(d, args.output, "name")
else:
sys.stderr.write("Unknown ordering: %s\n" % args.order)
exit(1)
elif args.type == "pie-packages":
pkg_pie_time_per_package(d, args.output)
elif args.type == "pie-steps":
pkg_pie_time_per_step(d, args.output)
else:
sys.stderr.write("Unknown type: %s\n" % args.type)
exit(1)

View File

@@ -0,0 +1,397 @@
#!/usr/bin/python
# Usage (the graphviz package must be installed in your distribution)
# ./support/scripts/graph-depends [-p package-name] > test.dot
# dot -Tpdf test.dot -o test.pdf
#
# With no arguments, graph-depends will draw a complete graph of
# dependencies for the current configuration.
# If '-p <package-name>' is specified, graph-depends will draw a graph
# of dependencies for the given package name.
# If '-d <depth>' is specified, graph-depends will limit the depth of
# the dependency graph to 'depth' levels.
#
# Limitations
#
# * Some packages have dependencies that depend on the Buildroot
# configuration. For example, many packages have a dependency on
# openssl if openssl has been enabled. This tool will graph the
# dependencies as they are with the current Buildroot
# configuration.
#
# Copyright (C) 2010-2013 Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
import sys
import subprocess
import argparse
from fnmatch import fnmatch
import brpkgutil
# Modes of operation:
MODE_FULL = 1 # draw full dependency graph for all selected packages
MODE_PKG = 2 # draw dependency graph for a given package
mode = 0
# Limit drawing the dependency graph to this depth. 0 means 'no limit'.
max_depth = 0
# Whether to draw the transitive dependencies
transitive = True
parser = argparse.ArgumentParser(description="Graph packages dependencies")
parser.add_argument("--check-only", "-C", dest="check_only", action="store_true", default=False,
help="Only do the dependency checks (circular deps...)")
parser.add_argument("--outfile", "-o", metavar="OUT_FILE", dest="outfile",
help="File in which to generate the dot representation")
parser.add_argument("--package", '-p', metavar="PACKAGE",
help="Graph the dependencies of PACKAGE")
parser.add_argument("--depth", '-d', metavar="DEPTH", dest="depth", type=int, default=0,
help="Limit the dependency graph to DEPTH levels; 0 means no limit.")
parser.add_argument("--stop-on", "-s", metavar="PACKAGE", dest="stop_list", action="append",
help="Do not graph past this package (can be given multiple times)." \
+ " Can be a package name or a glob, " \
+ " 'virtual' to stop on virtual packages, or " \
+ "'host' to stop on host packages.")
parser.add_argument("--exclude", "-x", metavar="PACKAGE", dest="exclude_list", action="append",
help="Like --stop-on, but do not add PACKAGE to the graph.")
parser.add_argument("--colours", "-c", metavar="COLOR_LIST", dest="colours",
default="lightblue,grey,gainsboro",
help="Comma-separated list of the three colours to use" \
+ " to draw the top-level package, the target" \
+ " packages, and the host packages, in this order." \
+ " Defaults to: 'lightblue,grey,gainsboro'")
parser.add_argument("--transitive", dest="transitive", action='store_true',
default=False)
parser.add_argument("--no-transitive", dest="transitive", action='store_false',
help="Draw (do not draw) transitive dependencies")
parser.add_argument("--direct", dest="direct", action='store_true', default=True,
help="Draw direct dependencies (the default)")
parser.add_argument("--reverse", dest="direct", action='store_false',
help="Draw reverse dependencies")
args = parser.parse_args()
check_only = args.check_only
if args.outfile is None:
outfile = sys.stdout
else:
if check_only:
sys.stderr.write("don't specify outfile and check-only at the same time\n")
sys.exit(1)
outfile = open(args.outfile, "w")
if args.package is None:
mode = MODE_FULL
else:
mode = MODE_PKG
rootpkg = args.package
max_depth = args.depth
if args.stop_list is None:
stop_list = []
else:
stop_list = args.stop_list
if args.exclude_list is None:
exclude_list = []
else:
exclude_list = args.exclude_list
transitive = args.transitive
if args.direct:
get_depends_func = brpkgutil.get_depends
arrow_dir = "forward"
else:
if mode == MODE_FULL:
sys.stderr.write("--reverse needs a package\n")
sys.exit(1)
get_depends_func = brpkgutil.get_rdepends
arrow_dir = "back"
# Get the colours: we need exactly three colours,
# so no need not split more than 4
# We'll let 'dot' validate the colours...
colours = args.colours.split(',',4)
if len(colours) != 3:
sys.stderr.write("Error: incorrect colour list '%s'\n" % args.colours)
sys.exit(1)
root_colour = colours[0]
target_colour = colours[1]
host_colour = colours[2]
allpkgs = []
# Execute the "make show-targets" command to get the list of the main
# Buildroot PACKAGES and return it formatted as a Python list. This
# list is used as the starting point for full dependency graphs
def get_targets():
sys.stderr.write("Getting targets\n")
cmd = ["make", "-s", "--no-print-directory", "show-targets"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
output = p.communicate()[0].strip()
if p.returncode != 0:
return None
if output == '':
return []
return output.split(' ')
# Recursive function that builds the tree of dependencies for a given
# list of packages. The dependencies are built in a list called
# 'dependencies', which contains tuples of the form (pkg1 ->
# pkg2_on_which_pkg1_depends, pkg3 -> pkg4_on_which_pkg3_depends) and
# the function finally returns this list.
def get_all_depends(pkgs):
dependencies = []
# Filter the packages for which we already have the dependencies
filtered_pkgs = []
for pkg in pkgs:
if pkg in allpkgs:
continue
filtered_pkgs.append(pkg)
allpkgs.append(pkg)
if len(filtered_pkgs) == 0:
return []
depends = get_depends_func(filtered_pkgs)
deps = set()
for pkg in filtered_pkgs:
pkg_deps = depends[pkg]
# This package has no dependency.
if pkg_deps == []:
continue
# Add dependencies to the list of dependencies
for dep in pkg_deps:
dependencies.append((pkg, dep))
deps.add(dep)
if len(deps) != 0:
newdeps = get_all_depends(deps)
if newdeps is not None:
dependencies += newdeps
return dependencies
# The Graphviz "dot" utility doesn't like dashes in node names. So for
# node names, we strip all dashes.
def pkg_node_name(pkg):
return pkg.replace("-","")
TARGET_EXCEPTIONS = [
"target-finalize",
"target-post-image",
]
# In full mode, start with the result of get_targets() to get the main
# targets and then use get_all_depends() for all targets
if mode == MODE_FULL:
targets = get_targets()
dependencies = []
allpkgs.append('all')
filtered_targets = []
for tg in targets:
# Skip uninteresting targets
if tg in TARGET_EXCEPTIONS:
continue
dependencies.append(('all', tg))
filtered_targets.append(tg)
deps = get_all_depends(filtered_targets)
if deps is not None:
dependencies += deps
rootpkg = 'all'
# In pkg mode, start directly with get_all_depends() on the requested
# package
elif mode == MODE_PKG:
dependencies = get_all_depends([rootpkg])
# Make the dependencies a dictionnary { 'pkg':[dep1, dep2, ...] }
dict_deps = {}
for dep in dependencies:
if dep[0] not in dict_deps:
dict_deps[dep[0]] = []
dict_deps[dep[0]].append(dep[1])
# Basic cache for the results of the is_dep() function, in order to
# optimize the execution time. The cache is a dict of dict of boolean
# values. The key to the primary dict is "pkg", and the key of the
# sub-dicts is "pkg2".
is_dep_cache = {}
def is_dep_cache_insert(pkg, pkg2, val):
try:
is_dep_cache[pkg].update({pkg2: val})
except KeyError:
is_dep_cache[pkg] = {pkg2: val}
# Retrieves from the cache whether pkg2 is a transitive dependency
# of pkg.
# Note: raises a KeyError exception if the dependency is not known.
def is_dep_cache_lookup(pkg, pkg2):
return is_dep_cache[pkg][pkg2]
# This function return True if pkg is a dependency (direct or
# transitive) of pkg2, dependencies being listed in the deps
# dictionary. Returns False otherwise.
# This is the un-cached version.
def is_dep_uncached(pkg,pkg2,deps):
try:
for p in deps[pkg2]:
if pkg == p:
return True
if is_dep(pkg,p,deps):
return True
except KeyError:
pass
return False
# See is_dep_uncached() above; this is the cached version.
def is_dep(pkg,pkg2,deps):
try:
return is_dep_cache_lookup(pkg, pkg2)
except KeyError:
val = is_dep_uncached(pkg, pkg2, deps)
is_dep_cache_insert(pkg, pkg2, val)
return val
# This function eliminates transitive dependencies; for example, given
# these dependency chain: A->{B,C} and B->{C}, the A->{C} dependency is
# already covered by B->{C}, so C is a transitive dependency of A, via B.
# The functions does:
# - for each dependency d[i] of the package pkg
# - if d[i] is a dependency of any of the other dependencies d[j]
# - do not keep d[i]
# - otherwise keep d[i]
def remove_transitive_deps(pkg,deps):
d = deps[pkg]
new_d = []
for i in range(len(d)):
keep_me = True
for j in range(len(d)):
if j==i:
continue
if is_dep(d[i],d[j],deps):
keep_me = False
if keep_me:
new_d.append(d[i])
return new_d
# This function removes the dependency on some 'mandatory' package, like the
# 'toolchain' package, or the 'skeleton' package
def remove_mandatory_deps(pkg,deps):
return [p for p in deps[pkg] if p not in ['toolchain', 'skeleton']]
# This function will check that there is no loop in the dependency chain
# As a side effect, it builds up the dependency cache.
def check_circular_deps(deps):
def recurse(pkg):
if not pkg in list(deps.keys()):
return
if pkg in not_loop:
return
not_loop.append(pkg)
chain.append(pkg)
for p in deps[pkg]:
if p in chain:
sys.stderr.write("\nRecursion detected for : %s\n" % (p))
while True:
_p = chain.pop()
sys.stderr.write("which is a dependency of: %s\n" % (_p))
if p == _p:
sys.exit(1)
recurse(p)
chain.pop()
not_loop = []
chain = []
for pkg in list(deps.keys()):
recurse(pkg)
# This functions trims down the dependency list of all packages.
# It applies in sequence all the dependency-elimination methods.
def remove_extra_deps(deps):
for pkg in list(deps.keys()):
if not pkg == 'all':
deps[pkg] = remove_mandatory_deps(pkg,deps)
for pkg in list(deps.keys()):
if not transitive or pkg == 'all':
deps[pkg] = remove_transitive_deps(pkg,deps)
return deps
check_circular_deps(dict_deps)
if check_only:
sys.exit(0)
dict_deps = remove_extra_deps(dict_deps)
dict_version = brpkgutil.get_version([pkg for pkg in allpkgs
if pkg != "all" and not pkg.startswith("root")])
# Print the attributes of a node: label and fill-color
def print_attrs(pkg):
name = pkg_node_name(pkg)
if pkg == 'all':
label = 'ALL'
else:
label = pkg
if pkg == 'all' or (mode == MODE_PKG and pkg == rootpkg):
color = root_colour
else:
if pkg.startswith('host') \
or pkg.startswith('toolchain') \
or pkg.startswith('rootfs'):
color = host_colour
else:
color = target_colour
version = dict_version.get(pkg)
if version == "virtual":
outfile.write("%s [label = <<I>%s</I>>]\n" % (name, label))
else:
outfile.write("%s [label = \"%s\"]\n" % (name, label))
outfile.write("%s [color=%s,style=filled]\n" % (name, color))
# Print the dependency graph of a package
def print_pkg_deps(depth, pkg):
if pkg in done_deps:
return
done_deps.append(pkg)
print_attrs(pkg)
if pkg not in dict_deps:
return
for p in stop_list:
if fnmatch(pkg, p):
return
if dict_version.get(pkg) == "virtual" and "virtual" in stop_list:
return
if pkg.startswith("host-") and "host" in stop_list:
return
if max_depth == 0 or depth < max_depth:
for d in dict_deps[pkg]:
if dict_version.get(d) == "virtual" \
and "virtual" in exclude_list:
continue
if d.startswith("host-") \
and "host" in exclude_list:
continue
add = True
for p in exclude_list:
if fnmatch(d,p):
add = False
break
if add:
outfile.write("%s -> %s [dir=%s]\n" % (pkg_node_name(pkg), pkg_node_name(d), arrow_dir))
print_pkg_deps(depth+1, d)
# Start printing the graph data
outfile.write("digraph G {\n")
done_deps = []
print_pkg_deps(0, rootpkg)
outfile.write("}\n")

View File

@@ -0,0 +1,35 @@
#!/bin/bash
# Try to hardlink a file into a directory, fallback to copy on failure.
#
# Hardlink-or-copy the source file in the first argument into the
# destination directory in the second argument, using the basename in
# the third argument as basename for the destination file. If the third
# argument is missing, use the basename of the source file as basename
# for the destination file.
#
# In either case, remove the destination prior to doing the
# hardlink-or-copy.
#
# Note that this is NOT an atomic operation.
set -e
main() {
local src_file="${1}"
local dst_dir="${2}"
local dst_file="${3}"
if [ -n "${dst_file}" ]; then
dst_file="${dst_dir}/${dst_file}"
else
dst_file="${dst_dir}/${src_file##*/}"
fi
mkdir -p "${dst_dir}"
rm -f "${dst_file}"
ln -f "${src_file}" "${dst_file}" 2>/dev/null \
|| cp -f "${src_file}" "${dst_file}"
}
main "${@}"

View File

@@ -0,0 +1,45 @@
#!/bin/sh
# Generates a small Makefile used in the root of the output
# directory, to allow make to be started from there.
# The Makefile also allow for more convenient build of external modules
# Usage
# $1 - Kernel src directory
# $2 - Output directory
test ! -r $2/Makefile -o -O $2/Makefile || exit 0
# Only overwrite automatically generated Makefiles
# (so we do not overwrite buildroot Makefile)
if test -e $2/Makefile && ! grep -q Automatically $2/Makefile
then
exit 0
fi
echo " GEN $2/Makefile"
cat << EOF > $2/Makefile
# Automatically generated by $0: don't edit
lastword = \$(word \$(words \$(1)),\$(1))
makedir := \$(dir \$(call lastword,\$(MAKEFILE_LIST)))
MAKEARGS := -C $1
MAKEARGS += O=\$(if \$(patsubst /%,,\$(makedir)),\$(CURDIR)/)\$(patsubst %/,%,\$(makedir))
MAKEFLAGS += --no-print-directory
.PHONY: _all \$(MAKECMDGOALS)
all := \$(filter-out Makefile,\$(MAKECMDGOALS))
_all:
umask 0022 && \$(MAKE) \$(MAKEARGS) \$(all)
Makefile:;
\$(all): _all
@:
%/: _all
@:
EOF

View File

@@ -0,0 +1,434 @@
#!/usr/bin/env bash
set -e
myname="${0##*/}"
#----------------------------------------------------------------------------
# Configurable items
MIN_UID=1000
MAX_UID=1999
MIN_GID=1000
MAX_GID=1999
# No more is configurable below this point
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
error() {
local fmt="${1}"
shift
printf "%s: " "${myname}" >&2
printf "${fmt}" "${@}" >&2
}
fail() {
error "$@"
exit 1
}
#----------------------------------------------------------------------------
if [ ${#} -ne 2 ]; then
fail "usage: %s USERS_TABLE TARGET_DIR\n"
fi
USERS_TABLE="${1}"
TARGET_DIR="${2}"
shift 2
PASSWD="${TARGET_DIR}/etc/passwd"
SHADOW="${TARGET_DIR}/etc/shadow"
GROUP="${TARGET_DIR}/etc/group"
# /etc/gshadow is not part of the standard skeleton, so not everybody
# will have it, but some may have it, and its content must be in sync
# with /etc/group, so any use of gshadow must be conditional.
GSHADOW="${TARGET_DIR}/etc/gshadow"
# We can't simply source ${BR2_CONFIG} as it may contains constructs
# such as:
# BR2_DEFCONFIG="$(CONFIG_DIR)/defconfig"
# which when sourced from a shell script will eventually try to execute
# a command named 'CONFIG_DIR', which is plain wrong for virtually every
# systems out there.
# So, we have to scan that file instead. Sigh... :-(
PASSWD_METHOD="$( sed -r -e '/^BR2_TARGET_GENERIC_PASSWD_METHOD="(.*)"$/!d;' \
-e 's//\1/;' \
"${BR2_CONFIG}" \
)"
#----------------------------------------------------------------------------
get_uid() {
local username="${1}"
awk -F: -v username="${username}" \
'$1 == username { printf( "%d\n", $3 ); }' "${PASSWD}"
}
#----------------------------------------------------------------------------
get_ugid() {
local username="${1}"
awk -F: -v username="${username}" \
'$1 == username { printf( "%d\n", $4 ); }' "${PASSWD}"
}
#----------------------------------------------------------------------------
get_gid() {
local group="${1}"
awk -F: -v group="${group}" \
'$1 == group { printf( "%d\n", $3 ); }' "${GROUP}"
}
#----------------------------------------------------------------------------
get_username() {
local uid="${1}"
awk -F: -v uid="${uid}" \
'$3 == uid { printf( "%s\n", $1 ); }' "${PASSWD}"
}
#----------------------------------------------------------------------------
get_group() {
local gid="${1}"
awk -F: -v gid="${gid}" \
'$3 == gid { printf( "%s\n", $1 ); }' "${GROUP}"
}
#----------------------------------------------------------------------------
get_ugroup() {
local username="${1}"
local ugid
ugid="$( get_ugid "${username}" )"
if [ -n "${ugid}" ]; then
get_group "${ugid}"
fi
}
#----------------------------------------------------------------------------
# Sanity-check the new user/group:
# - check the gid is not already used for another group
# - check the group does not already exist with another gid
# - check the user does not already exist with another gid
# - check the uid is not already used for another user
# - check the user does not already exist with another uid
# - check the user does not already exist in another group
check_user_validity() {
local username="${1}"
local uid="${2}"
local group="${3}"
local gid="${4}"
local _uid _ugid _gid _username _group _ugroup
_group="$( get_group "${gid}" )"
_gid="$( get_gid "${group}" )"
_ugid="$( get_ugid "${username}" )"
_username="$( get_username "${uid}" )"
_uid="$( get_uid "${username}" )"
_ugroup="$( get_ugroup "${username}" )"
if [ "${username}" = "root" ]; then
fail "invalid username '%s\n'" "${username}"
fi
if [ ${gid} -lt -1 -o ${gid} -eq 0 ]; then
fail "invalid gid '%d' for '%s'\n" ${gid} "${username}"
elif [ ${gid} -ne -1 ]; then
# check the gid is not already used for another group
if [ -n "${_group}" -a "${_group}" != "${group}" ]; then
fail "gid '%d' for '%s' is already used by group '%s'\n" \
${gid} "${username}" "${_group}"
fi
# check the group does not already exists with another gid
# Need to split the check in two, otherwise '[' complains it
# is missing arguments when _gid is empty
if [ -n "${_gid}" ] && [ ${_gid} -ne ${gid} ]; then
fail "group '%s' for '%s' already exists with gid '%d' (wants '%d')\n" \
"${group}" "${username}" ${_gid} ${gid}
fi
# check the user does not already exists with another gid
# Need to split the check in two, otherwise '[' complains it
# is missing arguments when _ugid is empty
if [ -n "${_ugid}" ] && [ ${_ugid} -ne ${gid} ]; then
fail "user '%s' already exists with gid '%d' (wants '%d')\n" \
"${username}" ${_ugid} ${gid}
fi
fi
if [ ${uid} -lt -1 -o ${uid} -eq 0 ]; then
fail "invalid uid '%d' for '%s'\n" ${uid} "${username}"
elif [ ${uid} -ne -1 ]; then
# check the uid is not already used for another user
if [ -n "${_username}" -a "${_username}" != "${username}" ]; then
fail "uid '%d' for '%s' already used by user '%s'\n" \
${uid} "${username}" "${_username}"
fi
# check the user does not already exists with another uid
# Need to split the check in two, otherwise '[' complains it
# is missing arguments when _uid is empty
if [ -n "${_uid}" ] && [ ${_uid} -ne ${uid} ]; then
fail "user '%s' already exists with uid '%d' (wants '%d')\n" \
"${username}" ${_uid} ${uid}
fi
fi
# check the user does not already exist in another group
if [ -n "${_ugroup}" -a "${_ugroup}" != "${group}" ]; then
fail "user '%s' already exists with group '%s' (wants '%s')\n" \
"${username}" "${_ugroup}" "${group}"
fi
return 0
}
#----------------------------------------------------------------------------
# Generate a unique GID for given group. If the group already exists,
# then simply report its current GID. Otherwise, generate the lowest GID
# that is:
# - not 0
# - comprised in [MIN_GID..MAX_GID]
# - not already used by a group
generate_gid() {
local group="${1}"
local gid
gid="$( get_gid "${group}" )"
if [ -z "${gid}" ]; then
for(( gid=MIN_GID; gid<=MAX_GID; gid++ )); do
if [ -z "$( get_group "${gid}" )" ]; then
break
fi
done
if [ ${gid} -gt ${MAX_GID} ]; then
fail "can not allocate a GID for group '%s'\n" "${group}"
fi
fi
printf "%d\n" "${gid}"
}
#----------------------------------------------------------------------------
# Add a group; if it does already exist, remove it first
add_one_group() {
local group="${1}"
local gid="${2}"
local _f
# Generate a new GID if needed
if [ ${gid} -eq -1 ]; then
gid="$( generate_gid "${group}" )"
fi
# Remove any previous instance of this group, and re-add the new one
sed -i --follow-symlinks -e '/^'"${group}"':.*/d;' "${GROUP}"
printf "%s:x:%d:\n" "${group}" "${gid}" >>"${GROUP}"
# Ditto for /etc/gshadow if it exists
if [ -f "${GSHADOW}" ]; then
sed -i --follow-symlinks -e '/^'"${group}"':.*/d;' "${GSHADOW}"
printf "%s:*::\n" "${group}" >>"${GSHADOW}"
fi
}
#----------------------------------------------------------------------------
# Generate a unique UID for given username. If the username already exists,
# then simply report its current UID. Otherwise, generate the lowest UID
# that is:
# - not 0
# - comprised in [MIN_UID..MAX_UID]
# - not already used by a user
generate_uid() {
local username="${1}"
local uid
uid="$( get_uid "${username}" )"
if [ -z "${uid}" ]; then
for(( uid=MIN_UID; uid<=MAX_UID; uid++ )); do
if [ -z "$( get_username "${uid}" )" ]; then
break
fi
done
if [ ${uid} -gt ${MAX_UID} ]; then
fail "can not allocate a UID for user '%s'\n" "${username}"
fi
fi
printf "%d\n" "${uid}"
}
#----------------------------------------------------------------------------
# Add given user to given group, if not already the case
add_user_to_group() {
local username="${1}"
local group="${2}"
local _f
for _f in "${GROUP}" "${GSHADOW}"; do
[ -f "${_f}" ] || continue
sed -r -i --follow-symlinks \
-e 's/^('"${group}"':.*:)(([^:]+,)?)'"${username}"'(,[^:]+*)?$/\1\2\4/;' \
-e 's/^('"${group}"':.*)$/\1,'"${username}"'/;' \
-e 's/,+/,/' \
-e 's/:,/:/' \
"${_f}"
done
}
#----------------------------------------------------------------------------
# Encode a password
encode_password() {
local passwd="${1}"
mkpasswd -m "${PASSWD_METHOD}" "${passwd}"
}
#----------------------------------------------------------------------------
# Add a user; if it does already exist, remove it first
add_one_user() {
local username="${1}"
local uid="${2}"
local group="${3}"
local gid="${4}"
local passwd="${5}"
local home="${6}"
local shell="${7}"
local groups="${8}"
local comment="${9}"
local _f _group _home _shell _gid _passwd
# First, sanity-check the user
check_user_validity "${username}" "${uid}" "${group}" "${gid}"
# Generate a new UID if needed
if [ ${uid} -eq -1 ]; then
uid="$( generate_uid "${username}" )"
fi
# Remove any previous instance of this user
for _f in "${PASSWD}" "${SHADOW}"; do
sed -r -i --follow-symlinks -e '/^'"${username}"':.*/d;' "${_f}"
done
_gid="$( get_gid "${group}" )"
_shell="${shell}"
if [ "${shell}" = "-" ]; then
_shell="/bin/false"
fi
case "${home}" in
-) _home="/";;
/) fail "home can not explicitly be '/'\n";;
/*) _home="${home}";;
*) fail "home must be an absolute path\n";;
esac
case "${passwd}" in
-)
_passwd=""
;;
!=*)
_passwd='!'"$( encode_password "${passwd#!=}" )"
;;
=*)
_passwd="$( encode_password "${passwd#=}" )"
;;
*)
_passwd="${passwd}"
;;
esac
printf "%s:x:%d:%d:%s:%s:%s\n" \
"${username}" "${uid}" "${_gid}" \
"${comment}" "${_home}" "${_shell}" \
>>"${PASSWD}"
printf "%s:%s:::::::\n" \
"${username}" "${_passwd}" \
>>"${SHADOW}"
# Add the user to its additional groups
if [ "${groups}" != "-" ]; then
for _group in ${groups//,/ }; do
add_user_to_group "${username}" "${_group}"
done
fi
# If the user has a home, chown it
# (Note: stdout goes to the fakeroot-script)
if [ "${home}" != "-" ]; then
mkdir -p "${TARGET_DIR}/${home}"
printf "chown -h -R %d:%d '%s'\n" "${uid}" "${_gid}" "${TARGET_DIR}/${home}"
fi
}
#----------------------------------------------------------------------------
main() {
local username uid group gid passwd home shell groups comment
local line
local -a ENTRIES
# Some sanity checks
if [ ${MIN_UID} -le 0 ]; then
fail "MIN_UID must be >0 (currently %d)\n" ${MIN_UID}
fi
if [ ${MIN_GID} -le 0 ]; then
fail "MIN_GID must be >0 (currently %d)\n" ${MIN_GID}
fi
# Read in all the file in memory, exclude empty lines and comments
while read line; do
ENTRIES+=( "${line}" )
done < <( sed -r -e 's/#.*//; /^[[:space:]]*$/d;' "${USERS_TABLE}" )
# We first create groups whose gid is not -1, and then we create groups
# whose gid is -1 (automatic), so that, if a group is defined both with
# a specified gid and an automatic gid, we ensure the specified gid is
# used, rather than a different automatic gid is computed.
# First, create all the main groups which gid is *not* automatic
for line in "${ENTRIES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
[ ${gid} -ge 0 ] || continue # Automatic gid
add_one_group "${group}" "${gid}"
done
# Then, create all the main groups which gid *is* automatic
for line in "${ENTRIES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
[ ${gid} -eq -1 ] || continue # Non-automatic gid
add_one_group "${group}" "${gid}"
done
# Then, create all the additional groups
# If any additional group is already a main group, we should use
# the gid of that main group; otherwise, we can use any gid
for line in "${ENTRIES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
if [ "${groups}" != "-" ]; then
for g in ${groups//,/ }; do
add_one_group "${g}" -1
done
fi
done
# When adding users, we do as for groups, in case two packages create
# the same user, one with an automatic uid, the other with a specified
# uid, to ensure the specified uid is used, rather than an incompatible
# uid be generated.
# Now, add users whose uid is *not* automatic
for line in "${ENTRIES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
[ "${username}" != "-" ] || continue # Magic string to skip user creation
[ ${uid} -ge 0 ] || continue # Automatic uid
add_one_user "${username}" "${uid}" "${group}" "${gid}" "${passwd}" \
"${home}" "${shell}" "${groups}" "${comment}"
done
# Finally, add users whose uid *is* automatic
for line in "${ENTRIES[@]}"; do
read username uid group gid passwd home shell groups comment <<<"${line}"
[ "${username}" != "-" ] || continue # Magic string to skip user creation
[ ${uid} -eq -1 ] || continue # Non-automatic uid
add_one_user "${username}" "${uid}" "${group}" "${gid}" "${passwd}" \
"${home}" "${shell}" "${groups}" "${comment}"
done
}
#----------------------------------------------------------------------------
main "${@}"

View File

@@ -0,0 +1,410 @@
#!/usr/bin/env bash
# Copyright (C) 2009 by Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# This script generates an HTML file that contains a report about all
# Buildroot packages, their usage of the different package
# infrastructure and possible cleanup actions
#
# Run the script from the Buildroot toplevel directory:
#
# ./support/scripts/pkg-stats > /tmp/pkg.html
#
echo "<head>
<style type=\"text/css\">
table {
width: 100%;
}
td {
border: 1px solid black;
}
td.centered {
text-align: center;
}
td.wrong {
background: #ff9a69;
}
td.correct {
background: #d2ffc4;
}
td.nopatches {
background: #d2ffc4;
}
td.somepatches {
background: #ffd870;
}
td.lotsofpatches {
background: #ff9a69;
}
</style>
<title>Statistics of Buildroot packages</title>
</head>
<a href=\"#results\">Results</a><br/>
<table>
<tr>
<td>Id</td>
<td>Package</td>
<td class=\"centered\">Patch count</td>
<td class=\"centered\">Infrastructure</td>
<td class=\"centered\">License</td>
<td class=\"centered\">License files</td>
<td class=\"centered\">Hash file</td>
</tr>
"
autotools_packages=0
cmake_packages=0
kconfig_packages=0
luarocks_package=0
perl_packages=0
python_packages=0
rebar_packages=0
virtual_packages=0
generic_packages=0
manual_packages=0
packages_with_licence=0
packages_without_licence=0
packages_with_license_files=0
packages_without_license_files=0
packages_with_hash_file=0
packages_without_hash_file=0
total_patch_count=0
cnt=0
for i in $(find boot/ linux/ package/ -name '*.mk' | sort) ; do
if test \
$i = "boot/common.mk" -o \
$i = "linux/linux-ext-ev3dev-linux-drivers.mk" -o \
$i = "linux/linux-ext-fbtft.mk" -o \
$i = "linux/linux-ext-xenomai.mk" -o \
$i = "linux/linux-ext-rtai.mk" -o \
$i = "package/freescale-imx/freescale-imx.mk" -o \
$i = "package/gcc/gcc.mk" -o \
$i = "package/gstreamer/gstreamer.mk" -o \
$i = "package/gstreamer1/gstreamer1.mk" -o \
$i = "package/gtk2-themes/gtk2-themes.mk" -o \
$i = "package/matchbox/matchbox.mk" -o \
$i = "package/opengl/opengl.mk" -o \
$i = "package/qt5/qt5.mk" -o \
$i = "package/x11r7/x11r7.mk" -o \
$i = "package/doc-asciidoc.mk" -o \
$i = "package/pkg-autotools.mk" -o \
$i = "package/pkg-cmake.mk" -o \
$i = "package/pkg-kconfig.mk" -o \
$i = "package/pkg-luarocks.mk" -o \
$i = "package/pkg-perl.mk" -o \
$i = "package/pkg-python.mk" -o \
$i = "package/pkg-rebar.mk" -o \
$i = "package/pkg-virtual.mk" -o \
$i = "package/pkg-download.mk" -o \
$i = "package/pkg-generic.mk" -o \
$i = "package/pkg-utils.mk" ; then
echo "skipping $i" 1>&2
continue
fi
cnt=$((cnt+1))
hashost=0
hastarget=0
infratype=""
# Determine package infrastructure
if grep -E "\(host-autotools-package\)" $i > /dev/null ; then
infratype="autotools"
hashost=1
fi
if grep -E "\(autotools-package\)" $i > /dev/null ; then
infratype="autotools"
hastarget=1
fi
if grep -E "\(kconfig-package\)" $i > /dev/null ; then
infratype="kconfig"
hastarget=1
fi
if grep -E "\(host-luarocks-package\)" $i > /dev/null ; then
infratype="luarocks"
hashost=1
fi
if grep -E "\(luarocks-package\)" $i > /dev/null ; then
infratype="luarocks"
hastarget=1
fi
if grep -E "\(host-perl-package\)" $i > /dev/null ; then
infratype="perl"
hashost=1
fi
if grep -E "\(perl-package\)" $i > /dev/null ; then
infratype="perl"
hastarget=1
fi
if grep -E "\(host-python-package\)" $i > /dev/null ; then
infratype="python"
hashost=1
fi
if grep -E "\(python-package\)" $i > /dev/null ; then
infratype="python"
hastarget=1
fi
if grep -E "\(host-rebar-package\)" $i > /dev/null ; then
infratype="rebar"
hashost=1
fi
if grep -E "\(rebar-package\)" $i > /dev/null ; then
infratype="rebar"
hastarget=1
fi
if grep -E "\(host-virtual-package\)" $i > /dev/null ; then
infratype="virtual"
hashost=1
fi
if grep -E "\(virtual-package\)" $i > /dev/null ; then
infratype="virtual"
hastarget=1
fi
if grep -E "\(host-generic-package\)" $i > /dev/null ; then
infratype="generic"
hashost=1
fi
if grep -E "\(generic-package\)" $i > /dev/null ; then
infratype="generic"
hastarget=1
fi
if grep -E "\(host-cmake-package\)" $i > /dev/null ; then
infratype="cmake"
hashost=1
fi
if grep -E "\(cmake-package\)" $i > /dev/null ; then
infratype="cmake"
hastarget=1
fi
pkg=$(basename $i)
dir=$(dirname $i)
pkg=${pkg%.mk}
pkgvariable=$(echo ${pkg} | tr "a-z-" "A-Z_")
# Count packages per infrastructure
if [ -z ${infratype} ] ; then
infratype="manual"
manual_packages=$(($manual_packages+1))
elif [ ${infratype} = "autotools" ]; then
autotools_packages=$(($autotools_packages+1))
elif [ ${infratype} = "cmake" ]; then
cmake_packages=$(($cmake_packages+1))
elif [ ${infratype} = "kconfig" ]; then
kconfig_packages=$(($kconfig_packages+1))
elif [ ${infratype} = "luarocks" ]; then
luarocks_packages=$(($luarocks_packages+1))
elif [ ${infratype} = "perl" ]; then
perl_packages=$(($perl_packages+1))
elif [ ${infratype} = "python" ]; then
python_packages=$(($python_packages+1))
elif [ ${infratype} = "rebar" ]; then
rebar_packages=$(($rebar_packages+1))
elif [ ${infratype} = "virtual" ]; then
virtual_packages=$(($virtual_packages+1))
elif [ ${infratype} = "generic" ]; then
generic_packages=$(($generic_packages+1))
fi
if grep -qE "^${pkgvariable}_LICENSE[ ]*=" $i ; then
packages_with_license=$(($packages_with_license+1))
license=1
else
packages_without_license=$(($packages_without_license+1))
license=0
fi
if grep -qE "^${pkgvariable}_LICENSE_FILES[ ]*=" $i ; then
packages_with_license_files=$(($packages_with_license_files+1))
license_files=1
else
packages_without_license_files=$(($packages_without_license_files+1))
license_files=0
fi
if test -f ${dir}/${pkg}.hash; then
packages_with_hash_file=$(($packages_with_hash_file+1))
hash_file=1
else
packages_without_hash_file=$(($packages_without_hash_file+1))
hash_file=0
fi
echo "<tr>"
echo "<td>$cnt</td>"
echo "<td>$i</td>"
package_dir=$(dirname $i)
patch_count=$(find ${package_dir} -name '*.patch' | wc -l)
total_patch_count=$(($total_patch_count+$patch_count))
if test $patch_count -lt 1 ; then
patch_count_class="nopatches"
elif test $patch_count -lt 5 ; then
patch_count_class="somepatches"
else
patch_count_class="lotsofpatches"
fi
echo "<td class=\"centered ${patch_count_class}\">"
echo "<b>$patch_count</b>"
echo "</td>"
if [ ${infratype} = "manual" ] ; then
echo "<td class=\"centered wrong\"><b>manual</b></td>"
else
echo "<td class=\"centered correct\">"
echo "<b>${infratype}</b><br/>"
if [ ${hashost} -eq 1 -a ${hastarget} -eq 1 ]; then
echo "target + host"
elif [ ${hashost} -eq 1 ]; then
echo "host"
else
echo "target"
fi
echo "</td>"
fi
if [ ${license} -eq 0 ] ; then
echo "<td class=\"centered wrong\">No</td>"
else
echo "<td class=\"centered correct\">Yes</td>"
fi
if [ ${license_files} -eq 0 ] ; then
echo "<td class=\"centered wrong\">No</td>"
else
echo "<td class=\"centered correct\">Yes</td>"
fi
if [ ${hash_file} -eq 0 ] ; then
echo "<td class=\"centered wrong\">No</td>"
else
echo "<td class=\"centered correct\">Yes</td>"
fi
echo "</tr>"
done
echo "</table>"
echo "<a id="results"></a>"
echo "<table>"
echo "<tr>"
echo "<td>Packages using the <i>generic</i> infrastructure</td>"
echo "<td>$generic_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>cmake</i> infrastructure</td>"
echo "<td>$cmake_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>autotools</i> infrastructure</td>"
echo "<td>$autotools_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>luarocks</i> infrastructure</td>"
echo "<td>$luarocks_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>kconfig</i> infrastructure</td>"
echo "<td>$kconfig_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>perl</i> infrastructure</td>"
echo "<td>$perl_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>python</i> infrastructure</td>"
echo "<td>$python_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>rebar</i> infrastructure</td>"
echo "<td>$rebar_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages using the <i>virtual</i> infrastructure</td>"
echo "<td>$virtual_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages not using any infrastructure</td>"
echo "<td>$manual_packages</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages having license information</td>"
echo "<td>$packages_with_license</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages not having licence information</td>"
echo "<td>$packages_without_license</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages having license files information</td>"
echo "<td>$packages_with_license_files</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages not having licence files information</td>"
echo "<td>$packages_without_license_files</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages having hash file</td>"
echo "<td>$packages_with_hash_file</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Packages not having hash file</td>"
echo "<td>$packages_without_hash_file</td>"
echo "</tr>"
echo "<tr>"
echo "<td>Number of patches in all packages</td>"
echo "<td>$total_patch_count</td>"
echo "</tr>"
echo "<tr>"
echo "<td>TOTAL</td>"
echo "<td>$cnt</td>"
echo "</tr>"
echo "</table>"
echo "<hr/>"
echo "<i>Updated on $(LANG=C date), Git commit $(git log master -n 1 --pretty=format:%H)</i>"
echo "</body>"
echo "</html>"

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env python
# Wrapper for python2 and python3 around compileall to raise exception
# when a python byte code generation failed.
#
# Inspired from:
# http://stackoverflow.com/questions/615632/how-to-detect-errors-from-compileall-compile-dir
from __future__ import print_function
import sys
import py_compile
import compileall
class ReportProblem:
def __nonzero__(self):
type, value, traceback = sys.exc_info()
if type is not None and issubclass(type, py_compile.PyCompileError):
print("Cannot compile %s" %value.file)
raise value
return 1
report_problem = ReportProblem()
compileall.compile_dir(sys.argv[1], quiet=report_problem)

View File

@@ -0,0 +1,861 @@
#!/usr/bin/env perl
# This chunk of stuff was generated by App::FatPacker. To find the original
# file's code, look for the end of this BEGIN block or the string 'FATPACK'
BEGIN {
my %fatpacked;
$fatpacked{"MetaCPAN/API/Tiny.pm"} = <<'METACPAN_API_TINY';
package MetaCPAN::API::Tiny;
{
$MetaCPAN::API::Tiny::VERSION = '1.131730';
}
use strict;
use warnings;
# ABSTRACT: A Tiny API client for MetaCPAN
use Carp;
use JSON::PP 'encode_json', 'decode_json';
use HTTP::Tiny;
sub new {
my ($class, @args) = @_;
$#_ % 2 == 0
or croak 'Arguments must be provided as name/value pairs';
my %params = @args;
die 'ua_args must be an array reference'
if $params{ua_args} && ref($params{ua_args}) ne 'ARRAY';
my $self = +{
base_url => $params{base_url} || 'http://api.metacpan.org/v0',
ua => $params{ua} || HTTP::Tiny->new(
$params{ua_args}
? @{$params{ua_args}}
: (agent => 'MetaCPAN::API::Tiny/'
. ($MetaCPAN::API::VERSION || 'xx'))),
};
return bless($self, $class);
}
sub _build_extra_params {
my $self = shift;
@_ % 2 == 0
or croak 'Incorrect number of params, must be key/value';
my %extra = @_;
my $ua = $self->{ua};
foreach my $key (keys %extra)
{
# The implementation in HTTP::Tiny uses + instead of %20, fix that
$extra{$key} = $ua->_uri_escape($extra{$key});
$extra{$key} =~ s/\+/%20/g;
}
my $params = join '&', map { "$_=" . $extra{$_} } sort keys %extra;
return $params;
}
# /source/{author}/{release}/{path}
sub source {
my $self = shift;
my %opts = @_ ? @_ : ();
my $url = '';
my $error = "Provide 'author' and 'release' and 'path'";
%opts or croak $error;
if (
defined ( my $author = $opts{'author'} ) &&
defined ( my $release = $opts{'release'} ) &&
defined ( my $path = $opts{'path'} )
) {
$url = "source/$author/$release/$path";
} else {
croak $error;
}
$url = $self->{base_url} . "/$url";
my $result = $self->{ua}->get($url);
$result->{'success'}
or croak "Failed to fetch '$url': " . $result->{'reason'};
return $result->{'content'};
}
# /release/{distribution}
# /release/{author}/{release}
sub release {
my $self = shift;
my %opts = @_ ? @_ : ();
my $url = '';
my $error = "Either provide 'distribution', or 'author' and 'release', " .
"or 'search'";
%opts or croak $error;
my %extra_opts = ();
if ( defined ( my $dist = $opts{'distribution'} ) ) {
$url = "release/$dist";
} elsif (
defined ( my $author = $opts{'author'} ) &&
defined ( my $release = $opts{'release'} )
) {
$url = "release/$author/$release";
} elsif ( defined ( my $search_opts = $opts{'search'} ) ) {
ref $search_opts && ref $search_opts eq 'HASH'
or croak $error;
%extra_opts = %{$search_opts};
$url = 'release/_search';
} else {
croak $error;
}
return $self->fetch( $url, %extra_opts );
}
# /pod/{module}
# /pod/{author}/{release}/{path}
sub pod {
my $self = shift;
my %opts = @_ ? @_ : ();
my $url = '';
my $error = "Either provide 'module' or 'author and 'release' and 'path'";
%opts or croak $error;
if ( defined ( my $module = $opts{'module'} ) ) {
$url = "pod/$module";
} elsif (
defined ( my $author = $opts{'author'} ) &&
defined ( my $release = $opts{'release'} ) &&
defined ( my $path = $opts{'path'} )
) {
$url = "pod/$author/$release/$path";
} else {
croak $error;
}
# check content-type
my %extra = ();
if ( defined ( my $type = $opts{'content-type'} ) ) {
$type =~ m{^ text/ (?: html|plain|x-pod|x-markdown ) $}x
or croak 'Incorrect content-type provided';
$extra{headers}{'content-type'} = $type;
}
$url = $self->{base_url}. "/$url";
my $result = $self->{ua}->get( $url, \%extra );
$result->{'success'}
or croak "Failed to fetch '$url': " . $result->{'reason'};
return $result->{'content'};
}
# /module/{module}
sub module {
my $self = shift;
my $name = shift;
$name or croak 'Please provide a module name';
return $self->fetch("module/$name");
}
# file() is a synonym of module
sub file { goto &module }
# /author/{author}
sub author {
my $self = shift;
my ( $pause_id, $url, %extra_opts );
if ( @_ == 1 ) {
$url = 'author/' . shift;
} elsif ( @_ == 2 ) {
my %opts = @_;
if ( defined $opts{'pauseid'} ) {
$url = "author/" . $opts{'pauseid'};
} elsif ( defined $opts{'search'} ) {
my $search_opts = $opts{'search'};
ref $search_opts && ref $search_opts eq 'HASH'
or croak "'search' key must be hashref";
%extra_opts = %{$search_opts};
$url = 'author/_search';
} else {
croak 'Unknown option given';
}
} else {
croak 'Please provide an author PAUSEID or a "search"';
}
return $self->fetch( $url, %extra_opts );
}
sub fetch {
my $self = shift;
my $url = shift;
my $extra = $self->_build_extra_params(@_);
my $base = $self->{base_url};
my $req_url = $extra ? "$base/$url?$extra" : "$base/$url";
my $result = $self->{ua}->get($req_url);
return $self->_decode_result( $result, $req_url );
}
sub post {
my $self = shift;
my $url = shift;
my $query = shift;
my $base = $self->{base_url};
defined $url
or croak 'First argument of URL must be provided';
ref $query and ref $query eq 'HASH'
or croak 'Second argument of query hashref must be provided';
my $query_json = encode_json( $query );
my $result = $self->{ua}->request(
'POST',
"$base/$url",
{
headers => { 'Content-Type' => 'application/json' },
content => $query_json,
}
);
return $self->_decode_result( $result, $url, $query_json );
}
sub _decode_result {
my $self = shift;
my ( $result, $url, $original ) = @_;
my $decoded_result;
ref $result and ref $result eq 'HASH'
or croak 'First argument must be hashref';
defined $url
or croak 'Second argument of a URL must be provided';
if ( defined ( my $success = $result->{'success'} ) ) {
my $reason = $result->{'reason'} || '';
$reason .= ( defined $original ? " (request: $original)" : '' );
$success or croak "Failed to fetch '$url': $reason";
} else {
croak 'Missing success in return value';
}
defined ( my $content = $result->{'content'} )
or croak 'Missing content in return value';
eval { $decoded_result = decode_json $content; 1 }
or do { croak "Couldn't decode '$content': $@" };
return $decoded_result;
}
1;
__END__
=pod
=head1 NAME
MetaCPAN::API::Tiny - A Tiny API client for MetaCPAN
=head1 VERSION
version 1.131730
=head1 DESCRIPTION
This is the Tiny version of L<MetaCPAN::API>. It implements a compatible API
with a few notable exceptions:
=over 4
=item Attributes are direct hash access
The attributes defined using Mo(o|u)se are now accessed via the blessed hash
directly. There are no accessors defined to access this elements.
=item Exception handling
Instead of using Try::Tiny, raw evals are used. This could potentially cause
issues, so just be aware.
=item Testing
Test::Fatal was replaced with an eval implementation of exception().
Test::TinyMocker usage is retained, but may be absorbed since it is pure perl
=back
=head1 CLASS_METHODS
=head2 new
new is the constructor for MetaCPAN::API::Tiny. In the non-tiny version of this
module, this is provided via Any::Moose built from the attributes defined. In
the tiny version, we define our own constructor. It takes the same arguments
and provides similar checks to MetaCPAN::API with regards to arguments passed.
=head1 PUBLIC_METHODS
=head2 source
my $source = $mcpan->source(
author => 'DOY',
release => 'Moose-2.0201',
path => 'lib/Moose.pm',
);
Searches MetaCPAN for a module or a specific release and returns the plain source.
=head2 release
my $result = $mcpan->release( distribution => 'Moose' );
# or
my $result = $mcpan->release( author => 'DOY', release => 'Moose-2.0001' );
Searches MetaCPAN for a dist.
You can do complex searches using 'search' parameter:
# example lifted from MetaCPAN docs
my $result = $mcpan->release(
search => {
author => "OALDERS AND ",
filter => "status:latest",
fields => "name",
size => 1,
},
);
=head2 pod
my $result = $mcpan->pod( module => 'Moose' );
# or
my $result = $mcpan->pod(
author => 'DOY',
release => 'Moose-2.0201',
path => 'lib/Moose.pm',
);
Searches MetaCPAN for a module or a specific release and returns the POD.
=head2 module
my $result = $mcpan->module('MetaCPAN::API');
Searches MetaCPAN and returns a module's ".pm" file.
=head2 file
A synonym of L</module>
=head2 author
my $result1 = $mcpan->author('XSAWYERX');
my $result2 = $mcpan->author( pauseid => 'XSAWYERX' );
Searches MetaCPAN for a specific author.
You can do complex searches using 'search' parameter:
# example lifted from MetaCPAN docs
my $result = $mcpan->author(
search => {
q => 'profile.name:twitter',
size => 1,
},
);
=head2 fetch
my $result = $mcpan->fetch('/release/distribution/Moose');
# with parameters
my $more = $mcpan->fetch(
'/release/distribution/Moose',
param => 'value',
);
This is a helper method for API implementations. It fetches a path from MetaCPAN, decodes the JSON from the content variable and returns it.
You don't really need to use it, but you can in case you want to write your own extension implementation to MetaCPAN::API.
It accepts an additional hash as "GET" parameters.
=head2 post
# /release&content={"query":{"match_all":{}},"filter":{"prefix":{"archive":"Cache-Cache-1.06"}}}
my $result = $mcpan->post(
'release',
{
query => { match_all => {} },
filter => { prefix => { archive => 'Cache-Cache-1.06' } },
},
);
The POST equivalent of the "fetch()" method. It gets the path and JSON request.
=head1 THANKS
Overall the tests and code were ripped directly from MetaCPAN::API and
tiny-fied. A big thanks to Sawyer X for writing the original module.
=head1 AUTHOR
Nicholas R. Perez <nperez@cpan.org>
=head1 COPYRIGHT AND LICENSE
This software is copyright (c) 2013 by Nicholas R. Perez <nperez@cpan.org>.
This is free software; you can redistribute it and/or modify it under
the same terms as the Perl 5 programming language system itself.
=cut
METACPAN_API_TINY
s/^ //mg for values %fatpacked;
unshift @INC, sub {
if (my $fat = $fatpacked{$_[1]}) {
if ($] < 5.008) {
return sub {
return 0 unless length $fat;
$fat =~ s/^([^\n]*\n?)//;
$_ = $1;
return 1;
};
}
open my $fh, '<', \$fat
or die "FatPacker error loading $_[1] (could be a perl installation issue?)";
return $fh;
}
return
};
} # END OF FATPACK CODE
use 5.010;
use strict;
use warnings;
use Fatal qw(open close);
use Getopt::Long;
use Pod::Usage;
use File::Basename;
use Module::CoreList;
use HTTP::Tiny;
use Safe;
use MetaCPAN::API::Tiny;
# Below, 5.024 should be aligned with the version of perl actually
# bundled in Buildroot:
die <<"MSG" if $] < 5.024;
This script needs a host perl with the same major version as Buildroot target perl.
Your current host perl is:
$^X
version $]
You may install a local one by running:
perlbrew install perl-5.24.0
MSG
my ($help, $man, $quiet, $force, $recommend, $test, $host);
my $target = 1;
GetOptions( 'help|?' => \$help,
'man' => \$man,
'quiet|q' => \$quiet,
'force|f' => \$force,
'host!' => \$host,
'target!' => \$target,
'recommend' => \$recommend,
'test' => \$test
) or pod2usage(-exitval => 1);
pod2usage(-exitval => 0) if $help;
pod2usage(-exitval => 0, -verbose => 2) if $man;
pod2usage(-exitval => 1) if scalar @ARGV == 0;
my %dist; # name -> metacpan data
my %need_target; # name -> 1 if target package is needed
my %need_host; # name -> 1 if host package is needed
my %need_dlopen; # name -> 1 if requires dynamic library
my %deps_build; # name -> list of host dependencies
my %deps_runtime; # name -> list of target dependencies
my %deps_optional; # name -> list of optional target dependencies
my %license_files; # name -> list of license files
my %checksum; # author -> list of checksum
my $mcpan = MetaCPAN::API::Tiny->new(base_url => 'http://fastapi.metacpan.org/v1');
my $ua = HTTP::Tiny->new();
sub get_checksum {
my ($url) = @_;
my($path) = $url =~ m|^[^:/?#]+://[^/?#]*([^?#]*)|;
my($basename, $dirname) = fileparse( $path );
unless ($checksum{$dirname}) {
my $response = $ua->get(qq{http://cpan.metacpan.org${dirname}CHECKSUMS});
$checksum{$dirname} = $response->{content};
}
my $chksum = Safe->new->reval($checksum{$dirname});
return $chksum->{$basename}, $basename;
}
sub get_manifest {
my ($author, $distname, $version) = @_;
my $url = qq{http://fastapi.metacpan.org/source/${author}/${distname}-${version}/MANIFEST};
my $response = $ua->get($url);
return $response->{content};
}
sub is_xs {
my ($manifest) = @_;
# This heuristic determines if a module is a native extension, by searching
# some file extension types in the MANIFEST of the distribution.
# It was inspired by http://deps.cpantesters.org/static/purity.html
return $manifest =~ m/\.(swg|xs|c|h|i)[\n\s]/;
}
sub find_license_files {
my ($manifest) = @_;
my @license_files;
foreach (split /\n/, $manifest) {
next if m|/|;
push @license_files, $_ if m/(ARTISTIC|COPYING|COPYRIGHT|LICENSE)/i;
}
if (scalar @license_files == 0 && $manifest =~ m/(README)[\n\s]/i) {
@license_files = ($1);
}
return \@license_files;
}
sub fetch {
my ($name, $need_target, $need_host, $top) = @_;
$need_target{$name} = $need_target if $need_target;
$need_host{$name} = $need_host if $need_host;
unless ($dist{$name} && !$top) {
say qq{fetch ${name}} unless $quiet;
my $result = $mcpan->release( distribution => $name );
$dist{$name} = $result;
my $manifest = get_manifest( $result->{author}, $name, $result->{version} );
$need_dlopen{$name} = is_xs( $manifest );
$license_files{$name} = find_license_files( $manifest );
my %build = ();
my %runtime = ();
my %optional = ();
foreach my $dep (@{$result->{dependency}}) {
my $modname = ${$dep}{module};
next if $modname eq q{perl};
next if $modname =~ m|^Alien|;
next if $modname =~ m|^Win32|;
next if !($test && $top) && $modname =~ m|^Test|;
next if Module::CoreList::is_core( $modname, undef, $] );
# we could use the host Module::CoreList data, because host perl and
# target perl have the same major version
next if ${$dep}{phase} eq q{develop};
next if !($test && $top) && ${$dep}{phase} eq q{test};
my $distname = $mcpan->module( $modname )->{distribution};
if (${$dep}{phase} eq q{runtime}) {
if (${$dep}{relationship} eq q{requires}) {
$runtime{$distname} = 1;
}
else {
$optional{$distname} = 1 if $recommend && $top;
}
}
else { # configure, build
$build{$distname} = 1;
}
}
$deps_build{$name} = [keys %build];
$deps_runtime{$name} = [keys %runtime];
$deps_optional{$name} = [keys %optional];
foreach my $distname (@{$deps_build{$name}}) {
fetch( $distname, 0, 1 );
}
foreach my $distname (@{$deps_runtime{$name}}) {
fetch( $distname, $need_target, $need_host );
$need_dlopen{$name} ||= $need_dlopen{$distname};
}
foreach my $distname (@{$deps_optional{$name}}) {
fetch( $distname, $need_target, $need_host );
}
}
return;
}
foreach my $distname (@ARGV) {
# Command-line's distributions
fetch( $distname, !!$target, !!$host, 1 );
}
say scalar keys %dist, q{ packages fetched.} unless $quiet;
# Buildroot package name: lowercase
sub fsname {
my $name = shift;
$name =~ s|_|-|g;
return q{perl-} . lc $name;
}
# Buildroot variable name: uppercase
sub brname {
my $name = shift;
$name =~ s|-|_|g;
return uc $name;
}
while (my ($distname, $dist) = each %dist) {
my $fsname = fsname( $distname );
my $dirname = q{package/} . $fsname;
my $cfgname = $dirname . q{/Config.in};
my $mkname = $dirname . q{/} . $fsname . q{.mk};
my $hashname = $dirname . q{/} . $fsname . q{.hash};
my $brname = brname( $fsname );
mkdir $dirname unless -d $dirname;
if ($need_target{$distname} && ($force || !-f $cfgname)) {
my $abstract = $dist->{abstract};
my $homepage = $dist->{resources}->{homepage} || qq{https://metacpan.org/release/${distname}};
say qq{write ${cfgname}} unless $quiet;
open my $fh, q{>}, $cfgname;
say {$fh} qq{config BR2_PACKAGE_${brname}};
say {$fh} qq{\tbool "${fsname}"};
say {$fh} qq{\tdepends on !BR2_STATIC_LIBS} if $need_dlopen{$distname};
foreach my $dep (sort @{$deps_runtime{$distname}}) {
my $brdep = brname( fsname( $dep ) );
say {$fh} qq{\tselect BR2_PACKAGE_${brdep}};
}
say {$fh} qq{\thelp};
say {$fh} qq{\t ${abstract}\n} if $abstract;
say {$fh} qq{\t ${homepage}};
if ($need_dlopen{$distname}) {
say {$fh} qq{\ncomment "${fsname} needs a toolchain w/ dynamic library"};
say {$fh} qq{\tdepends on BR2_STATIC_LIBS};
}
close $fh;
}
if ($force || !-f $mkname) {
my $version = $dist->{version};
my($path) = $dist->{download_url} =~ m|^[^:/?#]+://[^/?#]*([^?#]*)|;
# this URL contains only the scheme, auth and path parts (but no query and fragment parts)
# the scheme is not used, because the job is done by the BR download infrastructure
# the auth part is not used, because we use $(BR2_CPAN_MIRROR)
my($filename, $directories, $suffix) = fileparse( $path, q{tar.gz}, q{tgz} );
$directories =~ s|/$||;
my $dependencies = join q{ }, map( { q{host-} . fsname( $_ ); } sort @{$deps_build{$distname}} ),
map( { fsname( $_ ); } sort @{$deps_runtime{$distname}} );
my $host_dependencies = join q{ }, map { q{host-} . fsname( $_ ); } sort( @{$deps_build{$distname}},
@{$deps_runtime{$distname}} );
my $license = ref $dist->{license} eq 'ARRAY'
? join q{ or }, @{$dist->{license}}
: $dist->{license};
# BR requires license name as in http://spdx.org/licenses/
$license =~ s|apache_2_0|Apache-2.0|;
$license =~ s|artistic_2|Artistic-2.0|;
$license =~ s|mit|MIT|;
$license =~ s|openssl|OpenSSL|;
$license =~ s|perl_5|Artistic or GPLv1+|;
my $license_files = join q{ }, @{$license_files{$distname}};
say qq{write ${mkname}} unless $quiet;
open my $fh, q{>}, $mkname;
say {$fh} qq{################################################################################};
say {$fh} qq{#};
say {$fh} qq{# ${fsname}};
say {$fh} qq{#};
say {$fh} qq{################################################################################};
say {$fh} qq{};
say {$fh} qq{${brname}_VERSION = ${version}};
say {$fh} qq{${brname}_SOURCE = ${distname}-\$(${brname}_VERSION).${suffix}};
say {$fh} qq{${brname}_SITE = \$(BR2_CPAN_MIRROR)${directories}};
say {$fh} qq{${brname}_DEPENDENCIES = ${dependencies}} if $need_target{$distname} && $dependencies;
say {$fh} qq{HOST_${brname}_DEPENDENCIES = ${host_dependencies}} if $need_host{$distname} && $host_dependencies;
say {$fh} qq{${brname}_LICENSE = ${license}} if $license && $license ne q{unknown};
say {$fh} qq{${brname}_LICENSE_FILES = ${license_files}} if $license_files;
say {$fh} qq{};
foreach (sort @{$deps_optional{$distname}}) {
next if grep { $_ eq $distname; } @{$deps_runtime{$_}}; # avoid cyclic dependencies
my $opt_brname = brname( $_ );
my $opt_fsname = fsname( $_ );
say {$fh} qq{ifeq (\$(BR2_PACKAGE_PERL_${opt_brname}),y)};
say {$fh} qq{${brname}_DEPENDENCIES += ${opt_fsname}};
say {$fh} qq{endif};
say {$fh} qq{};
}
say {$fh} qq{\$(eval \$(perl-package))} if $need_target{$distname};
say {$fh} qq{\$(eval \$(host-perl-package))} if $need_host{$distname};
close $fh;
}
if ($force || !-f $hashname) {
my($checksum, $filename) = get_checksum($dist->{download_url});
my $md5 = $checksum->{md5};
my $sha256 = $checksum->{sha256};
say qq{write ${hashname}} unless $quiet;
open my $fh, q{>}, $hashname;
say {$fh} qq{# retrieved by scancpan from http://cpan.metacpan.org/};
say {$fh} qq{md5 ${md5} ${filename}};
say {$fh} qq{sha256 ${sha256} ${filename}};
close $fh;
}
}
my %pkg;
my $cfgname = q{package/Config.in};
if (-f $cfgname) {
open my $fh, q{<}, $cfgname;
while (<$fh>) {
chomp;
$pkg{$_} = 1 if m|package/perl-|;
}
close $fh;
}
foreach my $distname (keys %need_target) {
my $fsname = fsname( $distname );
$pkg{qq{\tsource "package/${fsname}/Config.in"}} = 1;
}
say qq{${cfgname} must contain the following lines:};
say join qq{\n}, sort keys %pkg;
__END__
=head1 NAME
support/scripts/scancpan Try-Tiny Moo
=head1 SYNOPSIS
supports/scripts/scancpan [options] [distname ...]
Options:
-help
-man
-quiet
-force
-target/-notarget
-host/-nohost
-recommend
-test
=head1 OPTIONS
=over 8
=item B<-help>
Prints a brief help message and exits.
=item B<-man>
Prints the manual page and exits.
=item B<-quiet>
Executes without output
=item B<-force>
Forces the overwriting of existing files.
=item B<-target/-notarget>
Switches package generation for the target variant (the default is C<-target>).
=item B<-host/-nohost>
Switches package generation for the host variant (the default is C<-nohost>).
=item B<-recommend>
Adds I<recommended> dependencies.
=item B<-test>
Adds dependencies for test.
=back
=head1 DESCRIPTION
This script creates templates of the Buildroot package files for all the
Perl/CPAN distributions required by the specified distnames. The
dependencies and metadata are fetched from https://metacpan.org/.
After running this script, it is necessary to check the generated files.
You have to manually add the license files (PERL_FOO_LICENSE_FILES variable).
For distributions that link against a target library, you have to add the
buildroot package name for that library to the DEPENDENCIES variable.
See the Buildroot documentation for details on the usage of the Perl
infrastructure.
The major version of the host perl must be aligned on the target one,
in order to work with the right CoreList data.
=head1 LICENSE
Copyright (C) 2013-2016 by Francois Perrad <francois.perrad@gadz.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
This script is a part of Buildroot.
This script requires the module C<MetaCPAN::API::Tiny> (version 1.131730)
which was included at the beginning of this file by the tool C<fatpack>.
See L<http://search.cpan.org/~nperez/MetaCPAN-API-Tiny-1.131730/>.
See L<http://search.cpan.org/search?query=App-FatPacker&mode=dist>.
These both libraries are free software and may be distributed under the same
terms as perl itself.
And perl may be distributed under the terms of Artistic v1 or GPL v1 license.
=cut

View File

@@ -0,0 +1,653 @@
#!/usr/bin/python2
"""
Utility for building Buildroot packages for existing PyPI packages
Any package built by scanpypi should be manually checked for
errors.
"""
from __future__ import print_function
import argparse
import json
import urllib2
import sys
import os
import shutil
import StringIO
import tarfile
import zipfile
import errno
import hashlib
import re
import textwrap
import tempfile
import imp
from functools import wraps
def setup_decorator(func, method):
"""
Decorator for distutils.core.setup and setuptools.setup.
Puts the arguments with which setup is called as a dict
Add key 'method' which should be either 'setuptools' or 'distutils'.
Keyword arguments:
func -- either setuptools.setup or distutils.core.setup
method -- either 'setuptools' or 'distutils'
"""
@wraps(func)
def closure(*args, **kwargs):
# Any python packages calls its setup function to be installed.
# Argument 'name' of this setup function is the package's name
BuildrootPackage.setup_args[kwargs['name']] = kwargs
BuildrootPackage.setup_args[kwargs['name']]['method'] = method
return closure
# monkey patch
import setuptools
setuptools.setup = setup_decorator(setuptools.setup, 'setuptools')
import distutils
distutils.core.setup = setup_decorator(setuptools.setup, 'distutils')
def find_file_upper_case(filenames, path='./'):
"""
List generator:
Recursively find files that matches one of the specified filenames.
Returns a relative path starting with path argument.
Keyword arguments:
filenames -- List of filenames to be found
path -- Path to the directory to search
"""
for root, dirs, files in os.walk(path):
for file in files:
if file.upper() in filenames:
yield (os.path.join(root, file))
def pkg_buildroot_name(pkg_name):
"""
Returns the Buildroot package name for the PyPI package pkg_name.
Remove all non alphanumeric characters except -
Also lowers the name and adds 'python-' suffix
Keyword arguments:
pkg_name -- String to rename
"""
name = re.sub('[^\w-]', '', pkg_name.lower())
prefix = 'python-'
pattern = re.compile('^(?!' + prefix + ')(.+?)$')
name = pattern.sub(r'python-\1', name)
return name
class DownloadFailed(Exception):
pass
class BuildrootPackage():
"""This class's methods are not meant to be used individually please
use them in the correct order:
__init__
download_package
extract_package
load_module
get_requirements
create_package_mk
create_hash_file
create_config_in
"""
setup_args = {}
def __init__(self, real_name, pkg_folder):
self.real_name = real_name
self.buildroot_name = pkg_buildroot_name(self.real_name)
self.pkg_dir = os.path.join(pkg_folder, self.buildroot_name)
self.mk_name = self.buildroot_name.upper().replace('-', '_')
self.as_string = None
self.md5_sum = None
self.metadata = None
self.metadata_name = None
self.metadata_url = None
self.pkg_req = None
self.setup_metadata = None
self.tmp_extract = None
self.used_url = None
self.filename = None
self.url = None
self.version = None
def fetch_package_info(self):
"""
Fetch a package's metadata from the python package index
"""
self.metadata_url = 'https://pypi.python.org/pypi/{pkg}/json'.format(
pkg=self.real_name)
try:
pkg_json = urllib2.urlopen(self.metadata_url).read().decode()
except urllib2.HTTPError as error:
print('ERROR:', error.getcode(), error.msg, file=sys.stderr)
print('ERROR: Could not find package {pkg}.\n'
'Check syntax inside the python package index:\n'
'https://pypi.python.org/pypi/ '
.format(pkg=self.real_name))
raise
except urllib2.URLError:
print('ERROR: Could not find package {pkg}.\n'
'Check syntax inside the python package index:\n'
'https://pypi.python.org/pypi/ '
.format(pkg=self.real_name))
raise
self.metadata = json.loads(pkg_json)
self.version = self.metadata['info']['version']
self.metadata_name = self.metadata['info']['name']
def download_package(self):
"""
Download a package using metadata from pypi
"""
try:
self.metadata['urls'][0]['filename']
except IndexError:
print(
'Non-conventional package, ',
'please check carefully after creation')
self.metadata['urls'] = [{
'packagetype': 'sdist',
'url': self.metadata['info']['download_url'],
'md5_digest': None}]
# In this case, we can't get the name of the downloaded file
# from the pypi api, so we need to find it, this should work
urlpath = urllib2.urlparse.urlparse(
self.metadata['info']['download_url']).path
# urlparse().path give something like
# /path/to/file-version.tar.gz
# We use basename to remove /path/to
self.metadata['urls'][0]['filename'] = os.path.basename(urlpath)
for download_url in self.metadata['urls']:
if 'bdist' in download_url['packagetype']:
continue
try:
print('Downloading package {pkg} from {url}...'.format(
pkg=self.real_name, url=download_url['url']))
download = urllib2.urlopen(download_url['url'])
except urllib2.HTTPError as http_error:
download = http_error
else:
self.used_url = download_url
self.as_string = download.read()
if not download_url['md5_digest']:
break
self.md5_sum = hashlib.md5(self.as_string).hexdigest()
if self.md5_sum == download_url['md5_digest']:
break
else:
if download.__class__ == urllib2.HTTPError:
raise download
raise DownloadFailed('Failed to downloas package {pkg}'
.format(pkg=self.real_name))
self.filename = self.used_url['filename']
self.url = self.used_url['url']
def extract_package(self, tmp_path):
"""
Extract the package contents into a directrory
Keyword arguments:
tmp_path -- directory where you want the package to be extracted
"""
as_file = StringIO.StringIO(self.as_string)
if self.filename[-3:] == 'zip':
with zipfile.ZipFile(as_file) as as_zipfile:
tmp_pkg = os.path.join(tmp_path, self.buildroot_name)
try:
os.makedirs(tmp_pkg)
except OSError as exception:
if exception.errno != errno.EEXIST:
print("ERROR: ", exception.message, file=sys.stderr)
return None, None
print('WARNING:', exception.message, file=sys.stderr)
print('Removing {pkg}...'.format(pkg=tmp_pkg))
shutil.rmtree(tmp_pkg)
os.makedirs(tmp_pkg)
as_zipfile.extractall(tmp_pkg)
else:
with tarfile.open(fileobj=as_file) as as_tarfile:
tmp_pkg = os.path.join(tmp_path, self.buildroot_name)
try:
os.makedirs(tmp_pkg)
except OSError as exception:
if exception.errno != errno.EEXIST:
print("ERROR: ", exception.message, file=sys.stderr)
return None, None
print('WARNING:', exception.message, file=sys.stderr)
print('Removing {pkg}...'.format(pkg=tmp_pkg))
shutil.rmtree(tmp_pkg)
os.makedirs(tmp_pkg)
as_tarfile.extractall(tmp_pkg)
tmp_extract = '{folder}/{name}-{version}'
self.tmp_extract = tmp_extract.format(
folder=tmp_pkg,
name=self.metadata_name,
version=self.version)
def load_setup(self):
"""
Loads the corresponding setup and store its metadata
"""
current_dir = os.getcwd()
os.chdir(self.tmp_extract)
sys.path.append(self.tmp_extract)
s_file, s_path, s_desc = imp.find_module('setup', [self.tmp_extract])
setup = imp.load_module('setup', s_file, s_path, s_desc)
try:
self.setup_metadata = self.setup_args[self.metadata_name]
except KeyError:
# This means setup was not called which most likely mean that it is
# called through the if __name__ == '__main__' directive.
# In this case, we can only pray that it is called through a
# function called main() in setup.py.
setup.main([]) # Will raise AttributeError if not found
self.setup_metadata = self.setup_args[self.metadata_name]
# Here we must remove the module the hard way.
# We must do this because of a very sepcific case: if a package calls
# setup from the __main__ but does not come with a 'main()' function,
# for some reason setup.main([]) will successfully call the main
# function of a previous package...
sys.modules.pop('setup',None)
del setup
os.chdir(current_dir)
sys.path.remove(self.tmp_extract)
def get_requirements(self, pkg_folder):
"""
Retrieve dependencies from the metadata found in the setup.py script of
a pypi package.
Keyword Arguments:
pkg_folder -- location of the already created packages
"""
if 'install_requires' not in self.setup_metadata:
self.pkg_req = None
return set()
self.pkg_req = self.setup_metadata['install_requires']
self.pkg_req = [re.sub('([-.\w]+).*', r'\1', req)
for req in self.pkg_req]
req_not_found = self.pkg_req
self.pkg_req = map(pkg_buildroot_name, self.pkg_req)
pkg_tuples = zip(req_not_found, self.pkg_req)
# pkg_tuples is a list of tuples that looks like
# ('werkzeug','python-werkzeug') because I need both when checking if
# dependencies already exist or are already in the download list
req_not_found = set(
pkg[0] for pkg in pkg_tuples
if not os.path.isdir(pkg[1])
)
return req_not_found
def __create_mk_header(self):
"""
Create the header of the <package_name>.mk file
"""
header = ['#' * 80 + '\n']
header.append('#\n')
header.append('# {name}\n'.format(name=self.buildroot_name))
header.append('#\n')
header.append('#' * 80 + '\n')
header.append('\n')
return header
def __create_mk_download_info(self):
"""
Create the lines refering to the download information of the
<package_name>.mk file
"""
lines = []
version_line = '{name}_VERSION = {version}\n'.format(
name=self.mk_name,
version=self.version)
lines.append(version_line)
targz = self.filename.replace(
self.version,
'$({name}_VERSION)'.format(name=self.mk_name))
targz_line = '{name}_SOURCE = {filename}\n'.format(
name=self.mk_name,
filename=targz)
lines.append(targz_line)
if self.filename not in self.url:
# Sometimes the filename is in the url, sometimes it's not
site_url = self.url
else:
site_url = self.url[:self.url.find(self.filename)]
site_line = '{name}_SITE = {url}'.format(name=self.mk_name,
url=site_url)
site_line = site_line.rstrip('/') + '\n'
lines.append(site_line)
return lines
def __create_mk_setup(self):
"""
Create the line refering to the setup method of the package of the
<package_name>.mk file
There are two things you can use to make an installer
for a python package: distutils or setuptools
distutils comes with python but does not support dependencies.
distutils is mostly still there for backward support.
setuptools is what smart people use,
but it is not shipped with python :(
"""
lines = []
setup_type_line = '{name}_SETUP_TYPE = {method}\n'.format(
name=self.mk_name,
method=self.setup_metadata['method'])
lines.append(setup_type_line)
return lines
def __create_mk_license(self):
"""
Create the lines referring to the package's license informations of the
<package_name>.mk file
The license is found using the metadata from pypi.
In the metadata, the license can be found either with standard names in
the classifiers part or with naming from the packager in the "License"
part.
From the classifiers, the license is "translated" according to
buildroot standards if need be (i.e. from Apache Software License to
Apache-2.0).
From the License part, we cannot guess what formatting the packager
used. Hence, it is likely to be incorrect. (i.e. Apache License 2.0
instead of Apache-2.0).
The license's files are found by searching the package for files named
license or license.txt (case insensitive).
If more than one license file is found, the user is asked to select
which ones he wants to use.
"""
license_dict = {
'Apache Software License': 'Apache-2.0',
'BSD License': 'BSD',
'European Union Public Licence 1.0': 'EUPLv1.0',
'European Union Public Licence 1.1': 'EUPLv1.1',
"GNU General Public License": "GPL",
"GNU General Public License v2": "GPLv2",
"GNU General Public License v2 or later": "GPLv2+",
"GNU General Public License v3": "GPLv3",
"GNU General Public License v3 or later": "GPLv3+",
"GNU Lesser General Public License v2": "LGPLv2.1",
"GNU Lesser General Public License v2 or later": "LGPLv2.1+",
"GNU Lesser General Public License v3": "LGPLv3",
"GNU Lesser General Public License v3 or later": "LGPLv3+",
"GNU Library or Lesser General Public License": "LGPLv2",
"ISC License": "ISC",
"MIT License": "MIT",
"Mozilla Public License 1.0": "MPL-1.0",
"Mozilla Public License 1.1": "MPL-1.1",
"Mozilla Public License 2.0": "MPL-2.0",
"Zope Public License": "ZPL"
}
regexp = re.compile('^License :* *.* *:+ (.*)( \(.*\))?$')
classifiers_licenses = [regexp.sub(r"\1", lic)
for lic in self.metadata['info']['classifiers']
if regexp.match(lic)]
licenses = map(lambda x: license_dict[x] if x in license_dict else x,
classifiers_licenses)
lines = []
if not len(licenses):
print('WARNING: License has been set to "{license}". It is most'
' likely wrong, please change it if need be'.format(
license=', '.join(licenses)))
licenses = [self.metadata['info']['license']]
license_line = '{name}_LICENSE = {license}\n'.format(
name=self.mk_name,
license=', '.join(licenses))
lines.append(license_line)
filenames = ['LICENCE', 'LICENSE', 'LICENSE.TXT', 'COPYING',
'COPYING.TXT']
license_files = list(find_file_upper_case(filenames, self.tmp_extract))
license_files = [license.replace(self.tmp_extract, '')[1:]
for license in license_files]
if len(license_files) > 0:
if len(license_files) > 1:
print('More than one file found for license:',
', '.join(license_files))
license_files = [filename
for index, filename in enumerate(license_files)]
license_file_line = ('{name}_LICENSE_FILES ='
' {files}\n'.format(
name=self.mk_name,
files=' '.join(license_files)))
lines.append(license_file_line)
else:
print('WARNING: No license file found,'
' please specify it manually afterwards')
license_file_line = '# No license file found\n'
return lines
def __create_mk_requirements(self):
"""
Create the lines referring to the dependencies of the of the
<package_name>.mk file
Keyword Arguments:
pkg_name -- name of the package
pkg_req -- dependencies of the package
"""
lines = []
dependencies_line = ('{name}_DEPENDENCIES ='
' {reqs}\n'.format(
name=self.mk_name,
reqs=' '.join(self.pkg_req)))
lines.append(dependencies_line)
return lines
def create_package_mk(self):
"""
Create the lines corresponding to the <package_name>.mk file
"""
pkg_mk = '{name}.mk'.format(name=self.buildroot_name)
path_to_mk = os.path.join(self.pkg_dir, pkg_mk)
print('Creating {file}...'.format(file=path_to_mk))
lines = self.__create_mk_header()
lines += self.__create_mk_download_info()
lines += self.__create_mk_setup()
lines += self.__create_mk_license()
lines.append('\n')
lines.append('$(eval $(python-package))')
lines.append('\n')
with open(path_to_mk, 'w') as mk_file:
mk_file.writelines(lines)
def create_hash_file(self):
"""
Create the lines corresponding to the <package_name>.hash files
"""
pkg_hash = '{name}.hash'.format(name=self.buildroot_name)
path_to_hash = os.path.join(self.pkg_dir, pkg_hash)
print('Creating {filename}...'.format(filename=path_to_hash))
lines = []
if self.used_url['md5_digest']:
md5_comment = '# md5 from {url}, sha256 locally computed\n'.format(
url=self.metadata_url)
lines.append(md5_comment)
hash_line = '{method}\t{digest} {filename}\n'.format(
method='md5',
digest=self.used_url['md5_digest'],
filename=self.filename)
lines.append(hash_line)
digest = hashlib.sha256(self.as_string).hexdigest()
hash_line = '{method}\t{digest} {filename}\n'.format(
method='sha256',
digest=digest,
filename=self.filename)
lines.append(hash_line)
with open(path_to_hash, 'w') as hash_file:
hash_file.writelines(lines)
def create_config_in(self):
"""
Creates the Config.in file of a package
"""
path_to_config = os.path.join(self.pkg_dir, 'Config.in')
print('Creating {file}...'.format(file=path_to_config))
lines = []
config_line = 'config BR2_PACKAGE_{name}\n'.format(
name=self.mk_name)
lines.append(config_line)
bool_line = '\tbool "{name}"\n'.format(name=self.buildroot_name)
lines.append(bool_line)
if self.pkg_req:
for dep in self.pkg_req:
dep_line = '\tselect BR2_PACKAGE_{req} # runtime\n'.format(
req=dep.upper().replace('-', '_'))
lines.append(dep_line)
lines.append('\thelp\n')
help_lines = textwrap.wrap(self.metadata['info']['summary'],
initial_indent='\t ',
subsequent_indent='\t ')
# make sure a help text is terminated with a full stop
if help_lines[-1][-1] != '.':
help_lines[-1] += '.'
# \t + two spaces is 3 char long
help_lines.append('')
help_lines.append('\t ' + self.metadata['info']['home_page'])
help_lines = map(lambda x: x + '\n', help_lines)
lines += help_lines
with open(path_to_config, 'w') as config_file:
config_file.writelines(lines)
def main():
# Building the parser
parser = argparse.ArgumentParser(
description="Creates buildroot packages from the metadata of "
"an existing PyPI packages and include it "
"in menuconfig")
parser.add_argument("packages",
help="list of packages to be created",
nargs='+')
parser.add_argument("-o", "--output",
help="""
Output directory for packages.
Default is ./package
""",
default='./package')
args = parser.parse_args()
packages = list(set(args.packages))
# tmp_path is where we'll extract the files later
tmp_prefix = 'scanpypi-'
pkg_folder = args.output
tmp_path = tempfile.mkdtemp(prefix=tmp_prefix)
try:
for real_pkg_name in packages:
package = BuildrootPackage(real_pkg_name, pkg_folder)
print('buildroot package name for {}:'.format(package.real_name),
package.buildroot_name)
# First we download the package
# Most of the info we need can only be found inside the package
print('Package:', package.buildroot_name)
print('Fetching package', package.real_name)
try:
package.fetch_package_info()
except (urllib2.URLError, urllib2.HTTPError):
continue
if package.metadata_name.lower() == 'setuptools':
# setuptools imports itself, that does not work very well
# with the monkey path at the begining
print('Error: setuptools cannot be built using scanPyPI')
continue
try:
package.download_package()
except urllib2.HTTPError as error:
print('Error: {code} {reason}'.format(code=error.code,
reason=error.reason))
print('Error downloading package :', package.buildroot_name)
print()
continue
# extract the tarball
try:
package.extract_package(tmp_path)
except (tarfile.ReadError, zipfile.BadZipfile):
print('Error extracting package {}'.format(package.real_name))
print()
continue
# Loading the package install info from the package
try:
package.load_setup()
except ImportError as err:
if 'buildutils' in err.message:
print('This package needs buildutils')
else:
raise
continue
except AttributeError:
print('Error: Could not install package {pkg}'.format(
pkg=package.real_name))
continue
# Package requirement are an argument of the setup function
req_not_found = package.get_requirements(pkg_folder)
req_not_found = req_not_found.difference(packages)
packages += req_not_found
if req_not_found:
print('Added packages \'{pkgs}\' as dependencies of {pkg}'
.format(pkgs=", ".join(req_not_found),
pkg=package.buildroot_name))
print('Checking if package {name} already exists...'.format(
name=package.pkg_dir))
try:
os.makedirs(package.pkg_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
print("ERROR: ", exception.message, file=sys.stderr)
continue
print('Error: Package {name} already exists'
.format(name=package.pkg_dir))
del_pkg = raw_input(
'Do you want to delete existing package ? [y/N]')
if del_pkg.lower() == 'y':
shutil.rmtree(package.pkg_dir)
os.makedirs(package.pkg_dir)
else:
continue
package.create_package_mk()
package.create_hash_file()
package.create_config_in()
print()
# printing an empty line for visual confort
finally:
shutil.rmtree(tmp_path)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,82 @@
#!/bin/sh
#
# This scripts adds local version information from the version
# control systems git, mercurial (hg) and subversion (svn).
#
# If something goes wrong, send a mail the kernel build mailinglist
# (see MAINTAINERS) and CC Nico Schottelius
# <nico-linuxsetlocalversion -at- schottelius.org>.
#
#
usage() {
echo "Usage: $0 [srctree]" >&2
exit 1
}
cd "${1:-.}" || usage
# Check for git and a git repo.
if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore it,
# because this version is defined in the top level Makefile.
if [ -z "`git describe --exact-match 2>/dev/null`" ]; then
# If we are past a tagged commit (like "v2.6.30-rc5-302-g72357d5"),
# we pretty print it.
if atag="`git describe 2>/dev/null`"; then
echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
# If we don't have a tag at all we print -g{commitish}.
else
printf '%s%s' -g $head
fi
fi
# Is this git on svn?
if git config --get svn-remote.svn.url >/dev/null; then
printf -- '-svn%s' "`git svn find-rev $head`"
fi
# Update index only on r/w media
[ -w . ] && git update-index --refresh --unmerged > /dev/null
# Check for uncommitted changes
if git diff-index --name-only HEAD | grep -v "^scripts/package" \
| read dummy; then
printf '%s' -dirty
fi
# All done with git
exit
fi
# Check for mercurial and a mercurial repo.
if hgid=`hg id 2>/dev/null`; then
tag=`printf '%s' "$hgid" | cut -d' ' -f2 --only-delimited`
# Do we have an untagged version?
if [ -z "$tag" -o "$tag" = tip ]; then
id=`printf '%s' "$hgid" | sed 's/[+ ].*//'`
printf '%s%s' -hg "$id"
fi
# Are there uncommitted changes?
# These are represented by + after the changeset id.
case "$hgid" in
*+|*+\ *) printf '%s' -dirty ;;
esac
# All done with mercurial
exit
fi
# Check for svn and a svn repo.
if rev=`LC_ALL=C svn info 2>/dev/null | grep '^Last Changed Rev'`; then
rev=`echo $rev | awk '{print $NF}'`
printf -- '-svn%s' "$rev"
# All done with svn
exit
fi

View File

@@ -0,0 +1,234 @@
#!/usr/bin/env python
# Copyright (C) 2014 by Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os
import os.path
import argparse
import csv
import collections
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("You need python-matplotlib to generate the size graph\n")
exit(1)
colors = ['#e60004', '#009836', '#2e1d86', '#ffed00',
'#0068b5', '#f28e00', '#940084', '#97c000']
#
# This function adds a new file to 'filesdict', after checking its
# size. The 'filesdict' contain the relative path of the file as the
# key, and as the value a tuple containing the name of the package to
# which the file belongs and the size of the file.
#
# filesdict: the dict to which the file is added
# relpath: relative path of the file
# fullpath: absolute path to the file
# pkg: package to which the file belongs
#
def add_file(filesdict, relpath, abspath, pkg):
if not os.path.exists(abspath):
return
if os.path.islink(abspath):
return
sz = os.stat(abspath).st_size
filesdict[relpath] = (pkg, sz)
#
# This function returns a dict where each key is the path of a file in
# the root filesystem, and the value is a tuple containing two
# elements: the name of the package to which this file belongs and the
# size of the file.
#
# builddir: path to the Buildroot output directory
#
def build_package_dict(builddir):
filesdict = {}
with open(os.path.join(builddir, "build", "packages-file-list.txt")) as filelistf:
for l in filelistf.readlines():
pkg, fpath = l.split(",", 1)
# remove the initial './' in each file path
fpath = fpath.strip()[2:]
fullpath = os.path.join(builddir, "target", fpath)
add_file(filesdict, fpath, fullpath, pkg)
return filesdict
#
# This function builds a dictionary that contains the name of a
# package as key, and the size of the files installed by this package
# as the value.
#
# filesdict: dictionary with the name of the files as key, and as
# value a tuple containing the name of the package to which the files
# belongs, and the size of the file. As returned by
# build_package_dict.
#
# builddir: path to the Buildroot output directory
#
def build_package_size(filesdict, builddir):
pkgsize = collections.defaultdict(int)
seeninodes = set()
for root, _, files in os.walk(os.path.join(builddir, "target")):
for f in files:
fpath = os.path.join(root, f)
if os.path.islink(fpath):
continue
st = os.stat(fpath)
if st.st_ino in seeninodes:
# hard link
continue
else:
seeninodes.add(st.st_ino)
frelpath = os.path.relpath(fpath, os.path.join(builddir, "target"))
if not frelpath in filesdict:
print("WARNING: %s is not part of any package" % frelpath)
pkg = "unknown"
else:
pkg = filesdict[frelpath][0]
pkgsize[pkg] += st.st_size
return pkgsize
#
# Given a dict returned by build_package_size(), this function
# generates a pie chart of the size installed by each package.
#
# pkgsize: dictionary with the name of the package as a key, and the
# size as the value, as returned by build_package_size.
#
# outputf: output file for the graph
#
def draw_graph(pkgsize, outputf):
total = sum(pkgsize.values())
labels = []
values = []
other_value = 0
for (p, sz) in pkgsize.items():
if sz < (total * 0.01):
other_value += sz
else:
labels.append("%s (%d kB)" % (p, sz / 1000.))
values.append(sz)
labels.append("Other (%d kB)" % (other_value / 1000.))
values.append(other_value)
plt.figure()
patches, texts, autotexts = plt.pie(values, labels=labels,
autopct='%1.1f%%', shadow=True,
colors=colors)
# Reduce text size
proptease = fm.FontProperties()
proptease.set_size('xx-small')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.suptitle("Filesystem size per package", fontsize=18, y=.97)
plt.title("Total filesystem size: %d kB" % (total / 1000.), fontsize=10, y=.96)
plt.savefig(outputf)
#
# Generate a CSV file with statistics about the size of each file, its
# size contribution to the package and to the overall system.
#
# filesdict: dictionary with the name of the files as key, and as
# value a tuple containing the name of the package to which the files
# belongs, and the size of the file. As returned by
# build_package_dict.
#
# pkgsize: dictionary with the name of the package as a key, and the
# size as the value, as returned by build_package_size.
#
# outputf: output CSV file
#
def gen_files_csv(filesdict, pkgsizes, outputf):
total = 0
for (p, sz) in pkgsizes.items():
total += sz
with open(outputf, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
wr.writerow(["File name",
"Package name",
"File size",
"Package size",
"File size in package (%)",
"File size in system (%)"])
for f, (pkgname, filesize) in filesdict.items():
pkgsize = pkgsizes[pkgname]
if pkgsize == 0:
percent_pkg = 0
else:
percent_pkg = float(filesize) / pkgsize * 100
percent_total = float(filesize) / total * 100
wr.writerow([f, pkgname, filesize, pkgsize,
"%.1f" % percent_pkg,
"%.1f" % percent_total])
#
# Generate a CSV file with statistics about the size of each package,
# and their size contribution to the overall system.
#
# pkgsize: dictionary with the name of the package as a key, and the
# size as the value, as returned by build_package_size.
#
# outputf: output CSV file
#
def gen_packages_csv(pkgsizes, outputf):
total = sum(pkgsizes.values())
with open(outputf, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
wr.writerow(["Package name", "Package size", "Package size in system (%)"])
for (pkg, size) in pkgsizes.items():
wr.writerow([pkg, size, "%.1f" % (float(size) / total * 100)])
parser = argparse.ArgumentParser(description='Draw size statistics graphs')
parser.add_argument("--builddir", '-i', metavar="BUILDDIR", required=True,
help="Buildroot output directory")
parser.add_argument("--graph", '-g', metavar="GRAPH",
help="Graph output file (.pdf or .png extension)")
parser.add_argument("--file-size-csv", '-f', metavar="FILE_SIZE_CSV",
help="CSV output file with file size statistics")
parser.add_argument("--package-size-csv", '-p', metavar="PKG_SIZE_CSV",
help="CSV output file with package size statistics")
args = parser.parse_args()
# Find out which package installed what files
pkgdict = build_package_dict(args.builddir)
# Collect the size installed by each package
pkgsize = build_package_size(pkgdict, args.builddir)
if args.graph:
draw_graph(pkgsize, args.graph)
if args.file_size_csv:
gen_files_csv(pkgdict, pkgsize, args.file_size_csv)
if args.package_size_csv:
gen_packages_csv(pkgsize, args.package_size_csv)

View File

@@ -0,0 +1,127 @@
#!/usr/bin/env python
# Copyright (C) 2016 Thomas De Schampheleire <thomas.de.schampheleire@gmail.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# TODO (improvements)
# - support K,M,G size suffixes for threshold
# - output CSV file in addition to stdout reporting
import csv
import argparse
import sys
def read_file_size_csv(inputf, detail=None):
"""Extract package or file sizes from CSV file into size dictionary"""
sizes = {}
reader = csv.reader(inputf)
header = next(reader)
if (header[0] != 'File name' or header[1] != 'Package name' or
header[2] != 'File size' or header[3] != 'Package size'):
print(("Input file %s does not contain the expected header. Are you "
"sure this file corresponds to the file-size-stats.csv "
"file created by 'make graph-size'?") % inputf.name)
sys.exit(1)
for row in reader:
if detail:
sizes[row[0]] = int(row[2])
else:
sizes[row[1]] = int(row[3])
return sizes
def compare_sizes(old, new):
"""Return delta/added/removed dictionaries based on two input size
dictionaries"""
delta = {}
oldkeys = set(old.keys())
newkeys = set(new.keys())
# packages/files in both
for entry in newkeys.intersection(oldkeys):
delta[entry] = ('', new[entry] - old[entry])
# packages/files only in new
for entry in newkeys.difference(oldkeys):
delta[entry] = ('added', new[entry])
# packages/files only in old
for entry in oldkeys.difference(newkeys):
delta[entry] = ('removed', -old[entry])
return delta
def print_results(result, threshold):
"""Print the given result dictionary sorted by size, ignoring any entries
below or equal to threshold"""
from six import iteritems
list_result = list(iteritems(result))
# result is a dictionary: name -> (flag, size difference)
# list_result is a list of tuples: (name, (flag, size difference))
for entry in sorted(list_result, key=lambda entry: entry[1][1]):
if threshold is not None and abs(entry[1][1]) <= threshold:
continue
print('%12s %7s %s' % (entry[1][1], entry[1][0], entry[0]))
# main #########################################################################
description = """
Compare rootfs size between Buildroot compilations, for example after changing
configuration options or after switching to another Buildroot release.
This script compares the file-size-stats.csv file generated by 'make graph-size'
with the corresponding file from another Buildroot compilation.
The size differences can be reported per package or per file.
Size differences smaller or equal than a given threshold can be ignored.
"""
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--detail', action='store_true',
help='''report differences for individual files rather than
packages''')
parser.add_argument('-t', '--threshold', type=int,
help='''ignore size differences smaller or equal than this
value (bytes)''')
parser.add_argument('old_file_size_csv', type=argparse.FileType('r'),
metavar='old-file-size-stats.csv',
help="""old CSV file with file and package size statistics,
generated by 'make graph-size'""")
parser.add_argument('new_file_size_csv', type=argparse.FileType('r'),
metavar='new-file-size-stats.csv',
help='new CSV file with file and package size statistics')
args = parser.parse_args()
if args.detail:
keyword = 'file'
else:
keyword = 'package'
old_sizes = read_file_size_csv(args.old_file_size_csv, args.detail)
new_sizes = read_file_size_csv(args.new_file_size_csv, args.detail)
delta = compare_sizes(old_sizes, new_sizes)
print('Size difference per %s (bytes), threshold = %s' % (keyword, args.threshold))
print(80*'-')
print_results(delta, args.threshold)
print(80*'-')
print_results({'TOTAL': ('', sum(new_sizes.values()) - sum(old_sizes.values()))},
threshold=None)

View File

@@ -0,0 +1,195 @@
#!/bin/bash
set -e
TOOLCHAINS_URL='http://autobuild.buildroot.org/toolchains/configs/toolchain-configs.csv'
main() {
local o O opts
local cfg dir pkg random toolchain
local ret nb nb_skip nb_fail
local -a toolchains
o='hc:d:p:r:'
O='help,config-snippet:build-dir:package:,random:'
opts="$(getopt -n "${my_name}" -o "${o}" -l "${O}" -- "${@}")"
eval set -- "${opts}"
random=0
while [ ${#} -gt 0 ]; do
case "${1}" in
(-h|--help)
help; exit 0
;;
(-c|--config-snippet)
cfg="${2}"; shift 2
;;
(-d|--build-dir)
dir="${2}"; shift 2
;;
(-p|--package)
pkg="${2}"; shift 2
;;
(-r|--random)
random="${2}"; shift 2
;;
(--)
shift; break
;;
esac
done
if [ -z "${cfg}" ]; then
printf "error: no config snippet specified\n" >&2; exit 1
fi
if [ ! -e "${cfg}" ]; then
printf "error: %s: no such file\n" "${cfg}" >&2; exit 1
fi
if [ -z "${dir}" ]; then
dir="${HOME}/br-test-pkg"
fi
# Extract the URLs of the toolchains; drop internal toolchains
# E.g.: http://server/path/to/name.config,arch,libc
# --> http://server/path/to/name.config
toolchains=($(curl -s "${TOOLCHAINS_URL}" \
|sed -r -e 's/,.*//; /internal/d;' \
|if [ ${random} -gt 0 ]; then \
sort -R |head -n ${random}
else
cat
fi |sort
)
)
if [ ${#toolchains[@]} -eq 0 ]; then
printf "error: no toolchain found (networking issue?)\n" >&2; exit 1
fi
nb=0
nb_skip=0
nb_fail=0
for toolchain in "${toolchains[@]}"; do
build_one "${dir}" "${toolchain}" "${cfg}" "${pkg}" && ret=0 || ret=${?}
case ${ret} in
(0) ;;
(1) : $((nb_skip++));;
(2) : $((nb_fail++));;
esac
: $((nb++))
done
printf "%d builds, %d skipped, %d failed\n" ${nb} ${nb_skip} ${nb_fail}
}
build_one() {
local dir="${1}"
local url="${2}"
local cfg="${3}"
local pkg="${4}"
local toolchain
# Using basename(1) on a URL works nicely
toolchain="$(basename "${url}" .config)"
printf "%40s: " "${toolchain}"
dir="${dir}/${toolchain}"
mkdir -p "${dir}"
if ! curl -s "${url}" >"${dir}/.config"; then
printf "FAILED\n"
return 2
fi
cat >>"${dir}/.config" <<-_EOF_
BR2_INIT_NONE=y
BR2_SYSTEM_BIN_SH_NONE=y
# BR2_PACKAGE_BUSYBOX is not set
# BR2_TARGET_ROOTFS_TAR is not set
_EOF_
cat "${cfg}" >>"${dir}/.config"
if ! make O="${dir}" olddefconfig >/dev/null 2>&1; then
printf "FAILED\n"
return 2
fi
# We want all the options from the snippet to be present as-is (set
# or not set) in the actual .config; if one of them is not, it means
# some dependency from the toolchain or arch is not available, in
# which case this config is untestable and we skip it.
# We don't care about the locale to sort in, as long as both sort are
# done in the same locale.
comm -23 <(sort "${cfg}") <(sort "${dir}/.config") >"${dir}/missing.config"
if [ -s "${dir}/missing.config" ]; then
printf "SKIPPED\n"
return 1
fi
# Remove file, it's empty anyway.
rm -f "${dir}/missing.config"
if [ -n "${pkg}" ]; then
if ! make O="${dir}" "${pkg}-dirclean" >> "${dir}/logfile" 2>&1; then
printf "FAILED\n"
return 2
fi
fi
# shellcheck disable=SC2086
if ! make O="${dir}" ${pkg} >> "${dir}/logfile" 2>&1; then
printf "FAILED\n"
return 2
fi
printf "OK\n"
}
help() {
cat <<_EOF_
test-pkg: test-build a package against various toolchains and architectures
The supplied config snippet is appended to each toolchain config, the
resulting configuration is checked to ensure it still contains all options
specified in the snippet; if any is missing, the build is skipped, on the
assumption that the package under test requires a toolchain or architecture
feature that is missing.
In case failures are noticed, you can fix the package and just re-run the
same command again; it will re-run the test where it failed. If you did
specify a package (with -p), the package build dir will be removed first.
The list of toolchains is retrieved from the Buildroot autobuilders, available
at ${TOOLCHAINS_URL}.
Options:
-h, --help
Print this help.
-c CFG, --config-snippet CFG
Use the CFG file as the source for the config snippet. This file
should contain all the config options required to build a package.
-d DIR, --build-dir DIR
Do the builds in directory DIR, one sub-dir per toolchain.
-p PKG, --package PKG
Test-build the package PKG, by running 'make PKG'; if not specified,
just runs 'make'.
-r N, --random N
Limit the tests to the N randomly selected toolchains, instead of
building with all toolchains.
Example:
Testing libcec would require a config snippet that contains:
BR2_PACKAGE_LIBCEC=y
Testing libcurl with openSSL support would require a snippet such as:
BR2_PACKAGE_OPENSSL=y
BR2_PACKAGE_LIBCURL=y
_EOF_
}
my_name="${0##*/}"
main "${@}"

View File

@@ -0,0 +1,180 @@
#!/usr/bin/python
# This script generates a report on the packaging status of X.org
# releases in Buildroot. It does so by downloading the list of
# tarballs that are part of a given X.org release, and compare that
# with the packages that are available in Buildroot.
import BeautifulSoup
import re
import os
import urllib
from distutils.version import LooseVersion
# This can be customized
XORG_VERSION = "X11R7.7"
# Key names in dictionaries
XORG_VERSION_KEY = "xorg-version"
BR_VERSION_KEY = "br-version"
BR_NAME_KEY = "br-name"
# Packages part of X.org releases that we do not want to package in
# Buildroot (old drivers for hardware unlikely to be used in embedded
# contexts).
XORG_EXCEPTIONS = [
'xf86-video-suncg6',
'xf86-video-sunffb',
]
# Get the list of tarballs of a X.org release, parse it, and return a
# dictionary of dictionaries, of the form:
#
# { <name_of_package> : { XORG_VERSION_KEY: <version_of_package> },
# <name_of_package2> : { XORG_VERSION_KEY: <version_of_package2> }}
#
def get_xorg_release_pkgs():
u = urllib.URLopener().open("http://www.x.org/releases/%s/src/everything/" % XORG_VERSION)
b = BeautifulSoup.BeautifulSoup()
b.feed(u.read())
links = b.findAll("a")
packages = {}
r = re.compile("(.*)-([0-9\.]*).tar.bz2")
# We now have a list of all links.
for link in links:
href = link.get("href")
# Skip everything but tarballs
if not href.endswith(".tar.bz2"):
continue
# Separate the name and the version
groups = r.match(href)
if not groups:
continue
name = groups.group(1)
version = groups.group(2)
# Skip packages we don't want to hear about
if name in XORG_EXCEPTIONS:
continue
packages[name] = { XORG_VERSION_KEY : version }
return packages
# Files and directories in package/x11r7/ that should be ignored in
# our processing.
BUILDROOT_EXCEPTIONS = [
"mcookie", # Code is directly in package directory
"x11r7.mk",
"Config.in",
"xdriver_xf86-input-tslib", # From Pengutronix, not part of X.org releases
]
# Prefixes of directories in package/x11r7/ that must be stripped
# before trying to match Buildroot package names with X.org tarball
# names.
BUILDROOT_PREFIXES = [
"xapp",
"xdriver",
"xfont",
"xlib",
"xserver",
"xutil",
"xproto",
]
# From a Buildroot package name, try to see if a prefix should be
# stripped from it. For example, passing "xapp_xlsfonts" as argument
# to this function will return "xlsfonts".
def buildroot_strip_prefix(dirname):
for prefix in BUILDROOT_PREFIXES:
if dirname.startswith(prefix + "_"):
return dirname[len(prefix) + 1:]
return dirname
# From a Buildroot package name, parse its .mk file to find the
# Buildroot version of the package by looking at the <foo>_VERSION
# line.
def buildroot_get_version(dirname):
f = open(os.path.join("package", "x11r7", dirname, dirname + ".mk"))
r = re.compile("^([A-Z0-9_]*)_VERSION = ([0-9\.]*)$")
for l in f.readlines():
m = r.match(l)
if m:
return m.group(2)
return None
# Augment the information of the X.org list of packages (given as
# argument) by details about their packaging in Buildroot. Those
# details are found by looking at the contents of package/x11r7/.
def get_buildroot_pkgs(packages):
dirs = os.listdir(os.path.join(os.getcwd(), "package", "x11r7"))
for d in dirs:
# Skip exceptions
if d in BUILDROOT_EXCEPTIONS:
continue
pkgname = buildroot_strip_prefix(d)
version = buildroot_get_version(d)
if packages.has_key(pkgname):
# There is a X.org package of the same name, so we just
# add information to the existing dict entry.
packages[pkgname]['br-version'] = version
packages[pkgname]['br-name'] = d
else:
# There is no X.org package with this name, so we add a
# new dict entry.
packages[pkgname] = { BR_VERSION_KEY: version,
BR_NAME_KEY : d }
return packages
def show_summary(packages):
FORMAT_STRING = "%40s | %15s | %15s | %-30s"
print FORMAT_STRING % ("Package name", "Vers in BR", "Vers in X.org", "Action")
print FORMAT_STRING % ("-" * 40, "-" * 15, "-" * 15, "-" * 30)
pkgs = packages.keys()
pkgs.sort()
total_pkgs = 0
upgrade_pkgs = 0
add_pkgs = 0
remove_pkgs = 0
nothing_todo_pkgs = 0
for pkgname in pkgs:
pkg = packages[pkgname]
total_pkgs += 1
if pkg.has_key(XORG_VERSION_KEY) and not pkg.has_key(BR_VERSION_KEY):
xorg_version = pkg[XORG_VERSION_KEY]
br_version = "N/A"
action = "Add to Buildroot"
add_pkgs += 1
elif not pkg.has_key(XORG_VERSION_KEY) and pkg.has_key(BR_VERSION_KEY):
br_version = pkg[BR_VERSION_KEY]
xorg_version = "N/A"
action = "Remove from Buildroot"
remove_pkgs += 1
elif LooseVersion(pkg[XORG_VERSION_KEY]) > LooseVersion(pkg[BR_VERSION_KEY]):
br_version = pkg[BR_VERSION_KEY]
xorg_version = pkg[XORG_VERSION_KEY]
action = "Upgrade"
upgrade_pkgs += 1
elif LooseVersion(pkg[XORG_VERSION_KEY]) < LooseVersion(pkg[BR_VERSION_KEY]):
br_version = pkg[BR_VERSION_KEY]
xorg_version = pkg[XORG_VERSION_KEY]
action = "More recent"
nothing_todo_pkgs += 1
else:
br_version = pkg[BR_VERSION_KEY]
xorg_version = pkg[XORG_VERSION_KEY]
action = ""
nothing_todo_pkgs += 1
print FORMAT_STRING % (pkgname, br_version.center(15), xorg_version.center(15), action)
print FORMAT_STRING % ("-" * 40, "-" * 15, "-" * 15, "-" * 30)
STAT_FORMAT_STRING = "%40s : %3d"
print STAT_FORMAT_STRING % ("Total number of packages", total_pkgs)
print STAT_FORMAT_STRING % ("Packages to upgrade", upgrade_pkgs)
print STAT_FORMAT_STRING % ("Packages to add", add_pkgs)
print STAT_FORMAT_STRING % ("Packages to remove", remove_pkgs)
print STAT_FORMAT_STRING % ("Packages with nothing to do", nothing_todo_pkgs)
packages = get_xorg_release_pkgs()
packages = get_buildroot_pkgs(packages)
# print packages
show_summary(packages)