Upgrade buildroot from 2020.05 to 2021.08.2, upgrade nwipe to 0.32.014

This commit is contained in:
PartialVolume
2021-12-08 17:15:20 +00:00
parent 6fc224af1a
commit 46237109fc
936 changed files with 18 additions and 72065 deletions

View File

@@ -2,4 +2,4 @@
exclude=
# copied from the kernel sources
utils/diffconfig
max-line-length=80
max-line-length=132

View File

@@ -1,520 +1,23 @@
# Configuration for Gitlab-CI.
# Builds appear on https://gitlab.com/buildroot.org/buildroot/pipelines
# The .gitlab-ci.yml file is generated from .gitlab-ci.yml.in.
# It needs to be regenerated every time a defconfig is added, using
# "make .gitlab-ci.yml".
image: buildroot/base:20191027.2027
image: buildroot/base:20200814.2228
.check_base:
except:
- /^.*-.*_defconfig$/
- /^.*-tests\..*$/
stages:
- generate-gitlab-ci
- build
check-DEVELOPERS:
extends: .check_base
# get-developers should print just "No action specified"; if it prints
# anything else, it's a parse error.
# The initial ! is removed by YAML so we need to quote it.
script:
- "! utils/get-developers | grep -v 'No action specified'"
generate-gitlab-ci-yml:
stage: generate-gitlab-ci
script: ./support/scripts/generate-gitlab-ci-yml support/misc/gitlab-ci.yml.in > generated-gitlab-ci.yml
artifacts:
paths:
- generated-gitlab-ci.yml
check-flake8:
extends: .check_base
before_script:
# Help flake8 to find the Python files without .py extension.
- find * -type f -name '*.py' > files.txt
- find * -type f -print0 | xargs -0 file | grep 'Python script' | cut -d':' -f1 >> files.txt
- sort -u files.txt | tee files.processed
script:
- python -m flake8 --statistics --count --max-line-length=132 $(cat files.processed)
after_script:
- wc -l files.processed
check-gitlab-ci.yml:
extends: .check_base
script:
- mv .gitlab-ci.yml .gitlab-ci.yml.orig
- make .gitlab-ci.yml
- diff -u .gitlab-ci.yml.orig .gitlab-ci.yml
check-package:
extends: .check_base
script:
- make check-package
.defconfig_base:
script:
- echo "Configure Buildroot for ${DEFCONFIG_NAME}"
- make ${DEFCONFIG_NAME}
- echo 'Build buildroot'
- |
make > >(tee build.log |grep '>>>') 2>&1 || {
echo 'Failed build last output'
tail -200 build.log
exit 1
}
artifacts:
when: always
expire_in: 2 weeks
paths:
- .config
- build.log
- output/images/
- output/build/build-time.log
- output/build/packages-file-list.txt
- output/build/*/.config
.defconfig:
extends: .defconfig_base
# Running the defconfigs for every push is too much, so limit to
# explicit triggers through the API.
only:
- triggers
- tags
- /-defconfigs$/
before_script:
- DEFCONFIG_NAME=${CI_JOB_NAME}
one-defconfig:
extends: .defconfig_base
only:
- /^.*-.*_defconfig$/
before_script:
- DEFCONFIG_NAME=$(echo ${CI_COMMIT_REF_NAME} | sed -e 's,^.*-,,g')
.runtime_test_base:
# Keep build directories so the rootfs can be an artifact of the job. The
# runner will clean up those files for us.
# Multiply every emulator timeout by 10 to avoid sporadic failures in
# elastic runners.
script:
- echo "Starting runtime test ${TEST_CASE_NAME}"
- ./support/testing/run-tests -o test-output/ -d test-dl/ -k --timeout-multiplier 10 ${TEST_CASE_NAME}
artifacts:
when: always
expire_in: 2 weeks
paths:
- test-output/*.log
- test-output/*/.config
- test-output/*/images/*
.runtime_test:
extends: .runtime_test_base
# Running the runtime tests for every push is too much, so limit to
# explicit triggers through the API.
only:
- triggers
- tags
- /-runtime-tests$/
before_script:
- TEST_CASE_NAME=${CI_JOB_NAME}
one-runtime_test:
extends: .runtime_test_base
only:
- /^.*-tests\..*$/
before_script:
- TEST_CASE_NAME=$(echo ${CI_COMMIT_REF_NAME} | sed -e 's,^.*-,,g')
aarch64_efi_defconfig: { extends: .defconfig }
acmesystems_aria_g25_128mb_defconfig: { extends: .defconfig }
acmesystems_aria_g25_256mb_defconfig: { extends: .defconfig }
acmesystems_arietta_g25_128mb_defconfig: { extends: .defconfig }
acmesystems_arietta_g25_256mb_defconfig: { extends: .defconfig }
amarula_a64_relic_defconfig: { extends: .defconfig }
amarula_vyasa_rk3288_defconfig: { extends: .defconfig }
andes_ae3xx_defconfig: { extends: .defconfig }
arcturus_ucls1012a_defconfig: { extends: .defconfig }
arcturus_ucp1020_defconfig: { extends: .defconfig }
arm_foundationv8_defconfig: { extends: .defconfig }
arm_juno_defconfig: { extends: .defconfig }
armadeus_apf27_defconfig: { extends: .defconfig }
armadeus_apf28_defconfig: { extends: .defconfig }
armadeus_apf51_defconfig: { extends: .defconfig }
asus_tinker_rk3288_defconfig: { extends: .defconfig }
at91sam9260eknf_defconfig: { extends: .defconfig }
at91sam9g20dfc_defconfig: { extends: .defconfig }
at91sam9g45m10ek_defconfig: { extends: .defconfig }
at91sam9rlek_defconfig: { extends: .defconfig }
at91sam9x5ek_defconfig: { extends: .defconfig }
at91sam9x5ek_dev_defconfig: { extends: .defconfig }
at91sam9x5ek_mmc_defconfig: { extends: .defconfig }
at91sam9x5ek_mmc_dev_defconfig: { extends: .defconfig }
atmel_sama5d27_som1_ek_mmc_dev_defconfig: { extends: .defconfig }
atmel_sama5d2_xplained_mmc_defconfig: { extends: .defconfig }
atmel_sama5d2_xplained_mmc_dev_defconfig: { extends: .defconfig }
atmel_sama5d3_xplained_defconfig: { extends: .defconfig }
atmel_sama5d3_xplained_dev_defconfig: { extends: .defconfig }
atmel_sama5d3_xplained_mmc_defconfig: { extends: .defconfig }
atmel_sama5d3_xplained_mmc_dev_defconfig: { extends: .defconfig }
atmel_sama5d3xek_defconfig: { extends: .defconfig }
atmel_sama5d4_xplained_defconfig: { extends: .defconfig }
atmel_sama5d4_xplained_dev_defconfig: { extends: .defconfig }
atmel_sama5d4_xplained_mmc_defconfig: { extends: .defconfig }
atmel_sama5d4_xplained_mmc_dev_defconfig: { extends: .defconfig }
bananapi_m1_defconfig: { extends: .defconfig }
bananapi_m2_plus_defconfig: { extends: .defconfig }
bananapi_m2_ultra_defconfig: { extends: .defconfig }
bananapi_m64_defconfig: { extends: .defconfig }
bananapro_defconfig: { extends: .defconfig }
beagleboardx15_defconfig: { extends: .defconfig }
beaglebone_defconfig: { extends: .defconfig }
beaglebone_qt5_defconfig: { extends: .defconfig }
beagleboneai_defconfig: { extends: .defconfig }
beelink_gs1_defconfig: { extends: .defconfig }
chromebook_snow_defconfig: { extends: .defconfig }
ci20_defconfig: { extends: .defconfig }
csky_gx6605s_defconfig: { extends: .defconfig }
cubieboard2_defconfig: { extends: .defconfig }
engicam_imx6qdl_icore_defconfig: { extends: .defconfig }
engicam_imx6qdl_icore_qt5_defconfig: { extends: .defconfig }
engicam_imx6qdl_icore_rqs_defconfig: { extends: .defconfig }
engicam_imx6ul_geam_defconfig: { extends: .defconfig }
engicam_imx6ul_isiot_defconfig: { extends: .defconfig }
freescale_imx28evk_defconfig: { extends: .defconfig }
freescale_imx6dlsabreauto_defconfig: { extends: .defconfig }
freescale_imx6dlsabresd_defconfig: { extends: .defconfig }
freescale_imx6qsabreauto_defconfig: { extends: .defconfig }
freescale_imx6qsabresd_defconfig: { extends: .defconfig }
freescale_imx6sxsabresd_defconfig: { extends: .defconfig }
freescale_imx6ullevk_defconfig: { extends: .defconfig }
freescale_imx7dsabresd_defconfig: { extends: .defconfig }
freescale_imx8mmevk_defconfig: { extends: .defconfig }
freescale_imx8mqevk_defconfig: { extends: .defconfig }
freescale_imx8qmmek_defconfig: { extends: .defconfig }
freescale_imx8qxpmek_defconfig: { extends: .defconfig }
freescale_p1025twr_defconfig: { extends: .defconfig }
freescale_t1040d4rdb_defconfig: { extends: .defconfig }
freescale_t2080_qds_rdb_defconfig: { extends: .defconfig }
friendlyarm_nanopi_a64_defconfig: { extends: .defconfig }
friendlyarm_nanopi_neo2_defconfig: { extends: .defconfig }
friendlyarm_nanopi_neo_plus2_defconfig: { extends: .defconfig }
galileo_defconfig: { extends: .defconfig }
grinn_chiliboard_defconfig: { extends: .defconfig }
grinn_liteboard_defconfig: { extends: .defconfig }
hifive_unleashed_defconfig: { extends: .defconfig }
imx23evk_defconfig: { extends: .defconfig }
imx6-sabreauto_defconfig: { extends: .defconfig }
imx6-sabresd_defconfig: { extends: .defconfig }
imx6-sabresd_qt5_defconfig: { extends: .defconfig }
imx6slevk_defconfig: { extends: .defconfig }
imx6sx-sdb_defconfig: { extends: .defconfig }
imx6ulevk_defconfig: { extends: .defconfig }
imx6ulpico_defconfig: { extends: .defconfig }
imx7d-sdb_defconfig: { extends: .defconfig }
imx7dpico_defconfig: { extends: .defconfig }
imx8mmpico_defconfig: { extends: .defconfig }
imx8mpico_defconfig: { extends: .defconfig }
lafrite_defconfig: { extends: .defconfig }
lego_ev3_defconfig: { extends: .defconfig }
licheepi_zero_defconfig: { extends: .defconfig }
linksprite_pcduino_defconfig: { extends: .defconfig }
minnowboard_max-graphical_defconfig: { extends: .defconfig }
minnowboard_max_defconfig: { extends: .defconfig }
mx25pdk_defconfig: { extends: .defconfig }
mx51evk_defconfig: { extends: .defconfig }
mx53loco_defconfig: { extends: .defconfig }
mx6cubox_defconfig: { extends: .defconfig }
mx6sx_udoo_neo_defconfig: { extends: .defconfig }
mx6udoo_defconfig: { extends: .defconfig }
nanopi_m1_defconfig: { extends: .defconfig }
nanopi_m1_plus_defconfig: { extends: .defconfig }
nanopi_neo_defconfig: { extends: .defconfig }
nexbox_a95x_defconfig: { extends: .defconfig }
nitrogen6sx_defconfig: { extends: .defconfig }
nitrogen6x_defconfig: { extends: .defconfig }
nitrogen7_defconfig: { extends: .defconfig }
nitrogen8m_defconfig: { extends: .defconfig }
nitrogen8mm_defconfig: { extends: .defconfig }
odroidxu4_defconfig: { extends: .defconfig }
olimex_a10_olinuxino_lime_defconfig: { extends: .defconfig }
olimex_a13_olinuxino_defconfig: { extends: .defconfig }
olimex_a20_olinuxino_lime2_defconfig: { extends: .defconfig }
olimex_a20_olinuxino_lime_defconfig: { extends: .defconfig }
olimex_a20_olinuxino_micro_defconfig: { extends: .defconfig }
olimex_a33_olinuxino_defconfig: { extends: .defconfig }
olimex_a64_olinuxino_defconfig: { extends: .defconfig }
olimex_imx233_olinuxino_defconfig: { extends: .defconfig }
openblocks_a6_defconfig: { extends: .defconfig }
orangepi_lite2_defconfig: { extends: .defconfig }
orangepi_lite_defconfig: { extends: .defconfig }
orangepi_one_defconfig: { extends: .defconfig }
orangepi_one_plus_defconfig: { extends: .defconfig }
orangepi_pc2_defconfig: { extends: .defconfig }
orangepi_pc_defconfig: { extends: .defconfig }
orangepi_pc_plus_defconfig: { extends: .defconfig }
orangepi_plus_defconfig: { extends: .defconfig }
orangepi_prime_defconfig: { extends: .defconfig }
orangepi_r1_defconfig: { extends: .defconfig }
orangepi_win_defconfig: { extends: .defconfig }
orangepi_zero_defconfig: { extends: .defconfig }
orangepi_zero_plus2_defconfig: { extends: .defconfig }
pandaboard_defconfig: { extends: .defconfig }
pc_x86_64_bios_defconfig: { extends: .defconfig }
pc_x86_64_efi_defconfig: { extends: .defconfig }
pine64_defconfig: { extends: .defconfig }
pine64_sopine_defconfig: { extends: .defconfig }
qemu_aarch64_virt_defconfig: { extends: .defconfig }
qemu_arm_versatile_defconfig: { extends: .defconfig }
qemu_arm_versatile_nommu_defconfig: { extends: .defconfig }
qemu_arm_vexpress_defconfig: { extends: .defconfig }
qemu_arm_vexpress_tz_defconfig: { extends: .defconfig }
qemu_csky610_virt_defconfig: { extends: .defconfig }
qemu_csky807_virt_defconfig: { extends: .defconfig }
qemu_csky810_virt_defconfig: { extends: .defconfig }
qemu_csky860_virt_defconfig: { extends: .defconfig }
qemu_m68k_mcf5208_defconfig: { extends: .defconfig }
qemu_m68k_q800_defconfig: { extends: .defconfig }
qemu_microblazebe_mmu_defconfig: { extends: .defconfig }
qemu_microblazeel_mmu_defconfig: { extends: .defconfig }
qemu_mips32r2_malta_defconfig: { extends: .defconfig }
qemu_mips32r2el_malta_defconfig: { extends: .defconfig }
qemu_mips32r6_malta_defconfig: { extends: .defconfig }
qemu_mips32r6el_malta_defconfig: { extends: .defconfig }
qemu_mips64_malta_defconfig: { extends: .defconfig }
qemu_mips64el_malta_defconfig: { extends: .defconfig }
qemu_mips64r6_malta_defconfig: { extends: .defconfig }
qemu_mips64r6el_malta_defconfig: { extends: .defconfig }
qemu_nios2_10m50_defconfig: { extends: .defconfig }
qemu_or1k_defconfig: { extends: .defconfig }
qemu_ppc64_e5500_defconfig: { extends: .defconfig }
qemu_ppc64_pseries_defconfig: { extends: .defconfig }
qemu_ppc64le_pseries_defconfig: { extends: .defconfig }
qemu_ppc_g3beige_defconfig: { extends: .defconfig }
qemu_ppc_mac99_defconfig: { extends: .defconfig }
qemu_ppc_mpc8544ds_defconfig: { extends: .defconfig }
qemu_ppc_virtex_ml507_defconfig: { extends: .defconfig }
qemu_riscv32_virt_defconfig: { extends: .defconfig }
qemu_riscv64_virt_defconfig: { extends: .defconfig }
qemu_sh4_r2d_defconfig: { extends: .defconfig }
qemu_sh4eb_r2d_defconfig: { extends: .defconfig }
qemu_sparc64_sun4u_defconfig: { extends: .defconfig }
qemu_sparc_ss10_defconfig: { extends: .defconfig }
qemu_x86_64_defconfig: { extends: .defconfig }
qemu_x86_defconfig: { extends: .defconfig }
qemu_xtensa_lx60_defconfig: { extends: .defconfig }
qemu_xtensa_lx60_nommu_defconfig: { extends: .defconfig }
raspberrypi0_defconfig: { extends: .defconfig }
raspberrypi0w_defconfig: { extends: .defconfig }
raspberrypi2_defconfig: { extends: .defconfig }
raspberrypi3_64_defconfig: { extends: .defconfig }
raspberrypi3_defconfig: { extends: .defconfig }
raspberrypi3_qt5we_defconfig: { extends: .defconfig }
raspberrypi4_64_defconfig: { extends: .defconfig }
raspberrypi4_defconfig: { extends: .defconfig }
raspberrypi_defconfig: { extends: .defconfig }
riotboard_defconfig: { extends: .defconfig }
roc_pc_rk3399_defconfig: { extends: .defconfig }
rock64_defconfig: { extends: .defconfig }
roseapplepi_defconfig: { extends: .defconfig }
s6lx9_microboard_defconfig: { extends: .defconfig }
sheevaplug_defconfig: { extends: .defconfig }
snps_aarch64_vdk_defconfig: { extends: .defconfig }
snps_arc700_axs101_defconfig: { extends: .defconfig }
snps_archs38_axs103_defconfig: { extends: .defconfig }
snps_archs38_haps_defconfig: { extends: .defconfig }
snps_archs38_hsdk_defconfig: { extends: .defconfig }
snps_archs38_vdk_defconfig: { extends: .defconfig }
socrates_cyclone5_defconfig: { extends: .defconfig }
solidrun_clearfog_defconfig: { extends: .defconfig }
solidrun_clearfog_gt_8k_defconfig: { extends: .defconfig }
solidrun_macchiatobin_mainline_defconfig: { extends: .defconfig }
solidrun_macchiatobin_marvell_defconfig: { extends: .defconfig }
stm32f429_disco_defconfig: { extends: .defconfig }
stm32f469_disco_defconfig: { extends: .defconfig }
stm32mp157a_dk1_defconfig: { extends: .defconfig }
stm32mp157c_dk2_defconfig: { extends: .defconfig }
toradex_apalis_imx6_defconfig: { extends: .defconfig }
ts4900_defconfig: { extends: .defconfig }
ts5500_defconfig: { extends: .defconfig }
ts7680_defconfig: { extends: .defconfig }
wandboard_defconfig: { extends: .defconfig }
warp7_defconfig: { extends: .defconfig }
warpboard_defconfig: { extends: .defconfig }
zynq_microzed_defconfig: { extends: .defconfig }
zynq_zc706_defconfig: { extends: .defconfig }
zynq_zed_defconfig: { extends: .defconfig }
zynqmp_zcu106_defconfig: { extends: .defconfig }
tests.boot.test_atf.TestATFAllwinner: { extends: .runtime_test }
tests.boot.test_atf.TestATFMarvell: { extends: .runtime_test }
tests.boot.test_atf.TestATFVexpress: { extends: .runtime_test }
tests.boot.test_syslinux.TestSysLinuxX86EFI: { extends: .runtime_test }
tests.boot.test_syslinux.TestSysLinuxX86LegacyBios: { extends: .runtime_test }
tests.boot.test_syslinux.TestSysLinuxX86_64EFI: { extends: .runtime_test }
tests.boot.test_syslinux.TestSysLinuxX86_64LegacyBios: { extends: .runtime_test }
tests.core.test_file_capabilities.TestFileCapabilities: { extends: .runtime_test }
tests.core.test_hardening.TestFortifyConserv: { extends: .runtime_test }
tests.core.test_hardening.TestFortifyNone: { extends: .runtime_test }
tests.core.test_hardening.TestRelro: { extends: .runtime_test }
tests.core.test_hardening.TestRelroPartial: { extends: .runtime_test }
tests.core.test_hardening.TestSspNone: { extends: .runtime_test }
tests.core.test_hardening.TestSspStrong: { extends: .runtime_test }
tests.core.test_post_scripts.TestPostScripts: { extends: .runtime_test }
tests.core.test_root_password.TestRootPassword: { extends: .runtime_test }
tests.core.test_rootfs_overlay.TestRootfsOverlay: { extends: .runtime_test }
tests.core.test_timezone.TestGlibcAllTimezone: { extends: .runtime_test }
tests.core.test_timezone.TestGlibcNonDefaultLimitedTimezone: { extends: .runtime_test }
tests.core.test_timezone.TestNoTimezone: { extends: .runtime_test }
tests.download.test_git.TestGitHash: { extends: .runtime_test }
tests.download.test_git.TestGitRefs: { extends: .runtime_test }
tests.fs.test_ext.TestExt2: { extends: .runtime_test }
tests.fs.test_ext.TestExt2r1: { extends: .runtime_test }
tests.fs.test_ext.TestExt3: { extends: .runtime_test }
tests.fs.test_ext.TestExt4: { extends: .runtime_test }
tests.fs.test_f2fs.TestF2FS: { extends: .runtime_test }
tests.fs.test_iso9660.TestIso9660Grub2External: { extends: .runtime_test }
tests.fs.test_iso9660.TestIso9660Grub2ExternalCompress: { extends: .runtime_test }
tests.fs.test_iso9660.TestIso9660Grub2Internal: { extends: .runtime_test }
tests.fs.test_iso9660.TestIso9660SyslinuxExternal: { extends: .runtime_test }
tests.fs.test_iso9660.TestIso9660SyslinuxExternalCompress: { extends: .runtime_test }
tests.fs.test_iso9660.TestIso9660SyslinuxInternal: { extends: .runtime_test }
tests.fs.test_jffs2.TestJffs2: { extends: .runtime_test }
tests.fs.test_squashfs.TestSquashfs: { extends: .runtime_test }
tests.fs.test_ubi.TestUbi: { extends: .runtime_test }
tests.fs.test_yaffs2.TestYaffs2: { extends: .runtime_test }
tests.init.test_busybox.TestInitSystemBusyboxRo: { extends: .runtime_test }
tests.init.test_busybox.TestInitSystemBusyboxRoNet: { extends: .runtime_test }
tests.init.test_busybox.TestInitSystemBusyboxRw: { extends: .runtime_test }
tests.init.test_busybox.TestInitSystemBusyboxRwNet: { extends: .runtime_test }
tests.init.test_none.TestInitSystemNone: { extends: .runtime_test }
tests.init.test_openrc.TestInitSystemOpenrcRoFull: { extends: .runtime_test }
tests.init.test_openrc.TestInitSystemOpenrcRwFull: { extends: .runtime_test }
tests.init.test_systemd.TestInitSystemSystemdRoFull: { extends: .runtime_test }
tests.init.test_systemd.TestInitSystemSystemdRoIfupdown: { extends: .runtime_test }
tests.init.test_systemd.TestInitSystemSystemdRoNetworkd: { extends: .runtime_test }
tests.init.test_systemd.TestInitSystemSystemdRwFull: { extends: .runtime_test }
tests.init.test_systemd.TestInitSystemSystemdRwIfupdown: { extends: .runtime_test }
tests.init.test_systemd.TestInitSystemSystemdRwNetworkd: { extends: .runtime_test }
tests.package.test_atop.TestAtop: { extends: .runtime_test }
tests.package.test_crudini.TestCrudiniPy2: { extends: .runtime_test }
tests.package.test_crudini.TestCrudiniPy3: { extends: .runtime_test }
tests.package.test_docker_compose.TestDockerCompose: { extends: .runtime_test }
tests.package.test_dropbear.TestDropbear: { extends: .runtime_test }
tests.package.test_glxinfo.TestGlxinfo: { extends: .runtime_test }
tests.package.test_gst1_python.TestGst1Python: { extends: .runtime_test }
tests.package.test_ipython.TestIPythonPy3: { extends: .runtime_test }
tests.package.test_libftdi1.TestPythonPy2Libftdi1: { extends: .runtime_test }
tests.package.test_libftdi1.TestPythonPy3Libftdi1: { extends: .runtime_test }
tests.package.test_lpeg.TestLuaLPeg: { extends: .runtime_test }
tests.package.test_lpeg.TestLuajitLPeg: { extends: .runtime_test }
tests.package.test_lsqlite3.TestLuaLsqlite3: { extends: .runtime_test }
tests.package.test_lsqlite3.TestLuajitLsqlite3: { extends: .runtime_test }
tests.package.test_lua.TestLua: { extends: .runtime_test }
tests.package.test_lua.TestLuajit: { extends: .runtime_test }
tests.package.test_lua_cqueues.TestLuaLuaCqueues: { extends: .runtime_test }
tests.package.test_lua_cqueues.TestLuajitLuaCqueues: { extends: .runtime_test }
tests.package.test_lua_curl.TestLuaLuacURL: { extends: .runtime_test }
tests.package.test_lua_curl.TestLuajitLuacURL: { extends: .runtime_test }
tests.package.test_lua_gd.TestLuaLuaGD: { extends: .runtime_test }
tests.package.test_lua_gd.TestLuajitLuaGD: { extends: .runtime_test }
tests.package.test_lua_http.TestLuaHttp: { extends: .runtime_test }
tests.package.test_lua_http.TestLuajitHttp: { extends: .runtime_test }
tests.package.test_lua_lyaml.TestLuaLuaLyaml: { extends: .runtime_test }
tests.package.test_lua_lyaml.TestLuajitLuaLyaml: { extends: .runtime_test }
tests.package.test_lua_sdl2.TestLuaLuaSDL2: { extends: .runtime_test }
tests.package.test_lua_sdl2.TestLuajitLuaSDL2: { extends: .runtime_test }
tests.package.test_lua_utf8.TestLuaUtf8: { extends: .runtime_test }
tests.package.test_lua_utf8.TestLuajitUtf8: { extends: .runtime_test }
tests.package.test_luaexpat.TestLuaLuaExpat: { extends: .runtime_test }
tests.package.test_luaexpat.TestLuajitLuaExpat: { extends: .runtime_test }
tests.package.test_luafilesystem.TestLuaLuaFileSystem: { extends: .runtime_test }
tests.package.test_luafilesystem.TestLuajitLuaFileSystem: { extends: .runtime_test }
tests.package.test_luaossl.TestLuaLuaossl: { extends: .runtime_test }
tests.package.test_luaossl.TestLuajitLuaossl: { extends: .runtime_test }
tests.package.test_luaposix.TestLuaLuaPosix: { extends: .runtime_test }
tests.package.test_luaposix.TestLuajitLuaPosix: { extends: .runtime_test }
tests.package.test_luasec.TestLuaLuaSec: { extends: .runtime_test }
tests.package.test_luasec.TestLuajitLuaSec: { extends: .runtime_test }
tests.package.test_luasocket.TestLuaLuaSocket: { extends: .runtime_test }
tests.package.test_luasocket.TestLuajitLuaSocket: { extends: .runtime_test }
tests.package.test_luasyslog.TestLuaLuasyslog: { extends: .runtime_test }
tests.package.test_luasyslog.TestLuajitLuasyslog: { extends: .runtime_test }
tests.package.test_luvi.TestLuvi: { extends: .runtime_test }
tests.package.test_lxc.TestLxc: { extends: .runtime_test }
tests.package.test_lzlib.TestLuaLzlib: { extends: .runtime_test }
tests.package.test_netdata.TestNetdata: { extends: .runtime_test }
tests.package.test_openjdk.TestOpenJdk: { extends: .runtime_test }
tests.package.test_opkg.TestOpkg: { extends: .runtime_test }
tests.package.test_perl.TestPerl: { extends: .runtime_test }
tests.package.test_perl_class_load.TestPerlClassLoad: { extends: .runtime_test }
tests.package.test_perl_dbd_mysql.TestPerlDBDmysql: { extends: .runtime_test }
tests.package.test_perl_encode_detect.TestPerlEncodeDetect: { extends: .runtime_test }
tests.package.test_perl_gdgraph.TestPerlGDGraph: { extends: .runtime_test }
tests.package.test_perl_html_parser.TestPerlHTMLParser: { extends: .runtime_test }
tests.package.test_perl_io_socket_multicast.TestPerlIOSocketMulticast: { extends: .runtime_test }
tests.package.test_perl_io_socket_ssl.TestPerlIOSocketSSL: { extends: .runtime_test }
tests.package.test_perl_libwww_perl.TestPerllibwwwperl: { extends: .runtime_test }
tests.package.test_perl_lwp_protocol_https.TestPerlLWPProtocolhttps: { extends: .runtime_test }
tests.package.test_perl_mail_dkim.TestPerlMailDKIM: { extends: .runtime_test }
tests.package.test_perl_x10.TestPerlX10: { extends: .runtime_test }
tests.package.test_perl_xml_libxml.TestPerlXMLLibXML: { extends: .runtime_test }
tests.package.test_prosody.TestProsodyLua51: { extends: .runtime_test }
tests.package.test_prosody.TestProsodyLuajit: { extends: .runtime_test }
tests.package.test_python.TestPython2: { extends: .runtime_test }
tests.package.test_python.TestPython3: { extends: .runtime_test }
tests.package.test_python_argh.TestPythonPy2Argh: { extends: .runtime_test }
tests.package.test_python_argh.TestPythonPy3Argh: { extends: .runtime_test }
tests.package.test_python_attrs.TestPythonPy2Attrs: { extends: .runtime_test }
tests.package.test_python_attrs.TestPythonPy3Attrs: { extends: .runtime_test }
tests.package.test_python_autobahn.TestPythonPy3Autobahn: { extends: .runtime_test }
tests.package.test_python_automat.TestPythonPy2Automat: { extends: .runtime_test }
tests.package.test_python_automat.TestPythonPy3Automat: { extends: .runtime_test }
tests.package.test_python_avro.TestPythonAvro: { extends: .runtime_test }
tests.package.test_python_bitstring.TestPythonPy2Bitstring: { extends: .runtime_test }
tests.package.test_python_bitstring.TestPythonPy3Bitstring: { extends: .runtime_test }
tests.package.test_python_can.TestPythonPy2Can: { extends: .runtime_test }
tests.package.test_python_can.TestPythonPy3Can: { extends: .runtime_test }
tests.package.test_python_cbor.TestPythonPy2Cbor: { extends: .runtime_test }
tests.package.test_python_cbor.TestPythonPy3Cbor: { extends: .runtime_test }
tests.package.test_python_click.TestPythonPy2Click: { extends: .runtime_test }
tests.package.test_python_click.TestPythonPy3Click: { extends: .runtime_test }
tests.package.test_python_constantly.TestPythonPy2Constantly: { extends: .runtime_test }
tests.package.test_python_constantly.TestPythonPy3Constantly: { extends: .runtime_test }
tests.package.test_python_crossbar.TestPythonPy3Crossbar: { extends: .runtime_test }
tests.package.test_python_cryptography.TestPythonPy2Cryptography: { extends: .runtime_test }
tests.package.test_python_cryptography.TestPythonPy3Cryptography: { extends: .runtime_test }
tests.package.test_python_django.TestPythonPy3Django: { extends: .runtime_test }
tests.package.test_python_gitdb2.TestPythonPy2Gitdb2: { extends: .runtime_test }
tests.package.test_python_gitdb2.TestPythonPy3Gitdb2: { extends: .runtime_test }
tests.package.test_python_gobject.TestPythonPy3Gobject: { extends: .runtime_test }
tests.package.test_python_incremental.TestPythonPy2Incremental: { extends: .runtime_test }
tests.package.test_python_incremental.TestPythonPy3Incremental: { extends: .runtime_test }
tests.package.test_python_passlib.TestPythonPy2Passlib: { extends: .runtime_test }
tests.package.test_python_passlib.TestPythonPy3Passlib: { extends: .runtime_test }
tests.package.test_python_pexpect.TestPythonPy2Pexpect: { extends: .runtime_test }
tests.package.test_python_pexpect.TestPythonPy3Pexpect: { extends: .runtime_test }
tests.package.test_python_pynacl.TestPythonPy2Pynacl: { extends: .runtime_test }
tests.package.test_python_pynacl.TestPythonPy3Pynacl: { extends: .runtime_test }
tests.package.test_python_pyyaml.TestPythonPy2Pyyaml: { extends: .runtime_test }
tests.package.test_python_pyyaml.TestPythonPy3Pyyaml: { extends: .runtime_test }
tests.package.test_python_service_identity.TestPythonPy2ServiceIdentity: { extends: .runtime_test }
tests.package.test_python_service_identity.TestPythonPy3ServiceIdentity: { extends: .runtime_test }
tests.package.test_python_smmap2.TestPythonPy2Smmap2: { extends: .runtime_test }
tests.package.test_python_smmap2.TestPythonPy3Smmap2: { extends: .runtime_test }
tests.package.test_python_subprocess32.TestPythonPy2Subprocess32: { extends: .runtime_test }
tests.package.test_python_treq.TestPythonPy2Treq: { extends: .runtime_test }
tests.package.test_python_treq.TestPythonPy3Treq: { extends: .runtime_test }
tests.package.test_python_twisted.TestPythonPy2Twisted: { extends: .runtime_test }
tests.package.test_python_twisted.TestPythonPy3Twisted: { extends: .runtime_test }
tests.package.test_python_txaio.TestPythonPy2Txaio: { extends: .runtime_test }
tests.package.test_python_txaio.TestPythonPy3Txaio: { extends: .runtime_test }
tests.package.test_python_txtorcon.TestPythonPy2Txtorcon: { extends: .runtime_test }
tests.package.test_python_txtorcon.TestPythonPy3Txtorcon: { extends: .runtime_test }
tests.package.test_python_ubjson.TestPythonPy2Ubjson: { extends: .runtime_test }
tests.package.test_python_ubjson.TestPythonPy3Ubjson: { extends: .runtime_test }
tests.package.test_rings.TestLuaRings: { extends: .runtime_test }
tests.package.test_rings.TestLuajitRings: { extends: .runtime_test }
tests.package.test_rust.TestRust: { extends: .runtime_test }
tests.package.test_rust.TestRustBin: { extends: .runtime_test }
tests.package.test_syslog_ng.TestSyslogNg: { extends: .runtime_test }
tests.package.test_tmux.TestTmux: { extends: .runtime_test }
tests.package.test_turbolua.TestLuajitTurbolua: { extends: .runtime_test }
tests.toolchain.test_external.TestExternalToolchainBuildrootMusl: { extends: .runtime_test }
tests.toolchain.test_external.TestExternalToolchainBuildrootuClibc: { extends: .runtime_test }
tests.toolchain.test_external.TestExternalToolchainCCache: { extends: .runtime_test }
tests.toolchain.test_external.TestExternalToolchainCtngMusl: { extends: .runtime_test }
tests.toolchain.test_external.TestExternalToolchainLinaroArm: { extends: .runtime_test }
tests.toolchain.test_external.TestExternalToolchainSourceryArmv4: { extends: .runtime_test }
tests.toolchain.test_external.TestExternalToolchainSourceryArmv5: { extends: .runtime_test }
tests.toolchain.test_external.TestExternalToolchainSourceryArmv7: { extends: .runtime_test }
tests.utils.test_check_package.TestCheckPackage: { extends: .runtime_test }
buildroot-pipeline:
stage: build
trigger:
include:
- artifact: generated-gitlab-ci.yml
job: generate-gitlab-ci-yml
strategy: depend

View File

@@ -1,32 +0,0 @@
image boot.vfat {
vfat {
files = {
"MLO",
"u-boot.img",
"zImage",
"uEnv.txt",
"am335x-evm.dtb",
"am335x-evmsk.dtb",
"am335x-bone.dtb",
"am335x-boneblack.dtb",
}
}
size = 16M
}
image sdcard.img {
hdimage {
}
partition u-boot {
partition-type = 0xC
bootable = "true"
image = "boot.vfat"
}
partition rootfs {
partition-type = 0x83
image = "rootfs.ext4"
size = 512M
}
}

View File

@@ -1,39 +0,0 @@
From 523ab5be1a84e9aa15fb62c3a15a6338b01d3961 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= <peron.clem@gmail.com>
Date: Tue, 9 Apr 2019 00:15:06 +0200
Subject: [PATCH] plat: allwinner: common: use r_wdog instead of wdog
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Some Allwinner H6 has a broken watchdog that doesn't
make the soc reboot.
Use the R_WATCHDOG instead.
Signed-off-by: Clément Péron <peron.clem@gmail.com>
Change-Id: Ie95cc30a80ed517b60b30d6bc2e655a1b53f18ba
---
plat/allwinner/common/sunxi_pm.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/plat/allwinner/common/sunxi_pm.c b/plat/allwinner/common/sunxi_pm.c
index 1d2dc938..13e13532 100644
--- a/plat/allwinner/common/sunxi_pm.c
+++ b/plat/allwinner/common/sunxi_pm.c
@@ -20,9 +20,9 @@
#include <sunxi_mmap.h>
#include <sunxi_private.h>
-#define SUNXI_WDOG0_CTRL_REG (SUNXI_WDOG_BASE + 0x0010)
-#define SUNXI_WDOG0_CFG_REG (SUNXI_WDOG_BASE + 0x0014)
-#define SUNXI_WDOG0_MODE_REG (SUNXI_WDOG_BASE + 0x0018)
+#define SUNXI_WDOG0_CTRL_REG (SUNXI_R_WDOG_BASE + 0x0010)
+#define SUNXI_WDOG0_CFG_REG (SUNXI_R_WDOG_BASE + 0x0014)
+#define SUNXI_WDOG0_MODE_REG (SUNXI_R_WDOG_BASE + 0x0018)
#define mpidr_is_valid(mpidr) ( \
MPIDR_AFFLVL3_VAL(mpidr) == 0 && \
--
2.20.1

View File

@@ -1,279 +0,0 @@
From 99cade8743158889b3e8db93c003b3318ebd4bda Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= <peron.clem@gmail.com>
Date: Sun, 11 Aug 2019 22:38:57 +0200
Subject: [PATCH] arm: dts: sync dts for Allwinner H6
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Sync Kernel DTS for Allwinner H6 boards.
Drop /omit-if-no-ref/ keyword as it's not supported by U-boot.
commit <d45331b00ddb> Linux 5.3-rc4
Signed-off-by: Clément Péron <peron.clem@gmail.com>
---
arch/arm/dts/sun50i-h6-beelink-gs1.dts | 76 ++++++++++++++++++++++++++
arch/arm/dts/sun50i-h6-pine-h64.dts | 12 ++++
arch/arm/dts/sun50i-h6.dtsi | 46 +++++++++++++++-
3 files changed, 131 insertions(+), 3 deletions(-)
diff --git a/arch/arm/dts/sun50i-h6-beelink-gs1.dts b/arch/arm/dts/sun50i-h6-beelink-gs1.dts
index 54b0882bed..0dc33c90dd 100644
--- a/arch/arm/dts/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm/dts/sun50i-h6-beelink-gs1.dts
@@ -14,6 +14,7 @@
compatible = "azw,beelink-gs1", "allwinner,sun50i-h6";
aliases {
+ ethernet0 = &emac;
serial0 = &uart0;
};
@@ -21,6 +22,17 @@
stdout-path = "serial0:115200n8";
};
+ connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
leds {
compatible = "gpio-leds";
@@ -41,6 +53,40 @@
};
};
+&de {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&emac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ext_rgmii_pins>;
+ phy-mode = "rgmii";
+ phy-handle = <&ext_rgmii_phy>;
+ phy-supply = <&reg_aldo2>;
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&mdio {
+ ext_rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+};
+
&mmc0 {
vmmc-supply = <&reg_cldo1>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
@@ -57,6 +103,15 @@
status = "okay";
};
+&ohci0 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pd-supply = <&reg_cldo1>;
+ vcc-pg-supply = <&reg_aldo1>;
+};
+
&r_i2c {
status = "okay";
@@ -177,8 +232,29 @@
};
};
+&r_pio {
+ /*
+ * PL0 and PL1 are used for PMIC I2C
+ * don't enable the pl-supply else
+ * it will fail at boot
+ *
+ * vcc-pl-supply = <&reg_aldo1>;
+ */
+ vcc-pm-supply = <&reg_aldo1>;
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_ph_pins>;
status = "okay";
};
+
+&usb2otg {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb2phy {
+ usb0_vbus-supply = <&reg_vcc5v>;
+ status = "okay";
+};
diff --git a/arch/arm/dts/sun50i-h6-pine-h64.dts b/arch/arm/dts/sun50i-h6-pine-h64.dts
index 4802902e12..1898345183 100644
--- a/arch/arm/dts/sun50i-h6-pine-h64.dts
+++ b/arch/arm/dts/sun50i-h6-pine-h64.dts
@@ -127,6 +127,12 @@
status = "okay";
};
+&pio {
+ vcc-pc-supply = <&reg_bldo2>;
+ vcc-pd-supply = <&reg_cldo1>;
+ vcc-pg-supply = <&reg_aldo1>;
+};
+
&r_i2c {
status = "okay";
@@ -243,10 +249,16 @@
pcf8563: rtc@51 {
compatible = "nxp,pcf8563";
reg = <0x51>;
+ interrupt-parent = <&r_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <0>;
};
};
+&r_pio {
+ vcc-pm-supply = <&reg_aldo1>;
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_ph_pins>;
diff --git a/arch/arm/dts/sun50i-h6.dtsi b/arch/arm/dts/sun50i-h6.dtsi
index e0dc4a05c1..a117f479ae 100644
--- a/arch/arm/dts/sun50i-h6.dtsi
+++ b/arch/arm/dts/sun50i-h6.dtsi
@@ -101,7 +101,7 @@
#size-cells = <1>;
ranges;
- display-engine@1000000 {
+ bus@1000000 {
compatible = "allwinner,sun50i-h6-de3",
"allwinner,sun50i-a64-de2";
reg = <0x1000000 0x400000>;
@@ -203,11 +203,32 @@
#reset-cells = <1>;
};
+ dma: dma-controller@3002000 {
+ compatible = "allwinner,sun50i-h6-dma";
+ reg = <0x03002000 0x1000>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DMA>, <&ccu CLK_MBUS_DMA>;
+ clock-names = "bus", "mbus";
+ dma-channels = <16>;
+ dma-requests = <46>;
+ resets = <&ccu RST_BUS_DMA>;
+ #dma-cells = <1>;
+ };
+
sid: sid@3006000 {
compatible = "allwinner,sun50i-h6-sid";
reg = <0x03006000 0x400>;
};
+ watchdog: watchdog@30090a0 {
+ compatible = "allwinner,sun50i-h6-wdt",
+ "allwinner,sun6i-a31-wdt";
+ reg = <0x030090a0 0x20>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ /* Broken on some H6 boards */
+ status = "disabled";
+ };
+
pio: pinctrl@300b000 {
compatible = "allwinner,sun50i-h6-pinctrl";
reg = <0x0300b000 0x400>;
@@ -243,6 +264,18 @@
bias-pull-up;
};
+ /*
+ * /omit-if-no-ref/ isn't supported by U-boot
+ * keep this comment to avoid bad sync with Linux
+ */
+ mmc1_pins: mmc1-pins {
+ pins = "PG0", "PG1", "PG2", "PG3",
+ "PG4", "PG5";
+ function = "mmc1";
+ drive-strength = <30>;
+ bias-pull-up;
+ };
+
mmc2_pins: mmc2-pins {
pins = "PC1", "PC4", "PC5", "PC6",
"PC7", "PC8", "PC9", "PC10",
@@ -294,6 +327,8 @@
resets = <&ccu RST_BUS_MMC1>;
reset-names = "ahb";
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -445,7 +480,6 @@
resets = <&ccu RST_BUS_OHCI3>,
<&ccu RST_BUS_EHCI3>;
phys = <&usb2phy 3>;
- phy-names = "usb";
status = "disabled";
};
@@ -457,7 +491,6 @@
<&ccu CLK_USB_OHCI3>;
resets = <&ccu RST_BUS_OHCI3>;
phys = <&usb2phy 3>;
- phy-names = "usb";
status = "disabled";
};
@@ -613,6 +646,13 @@
#reset-cells = <1>;
};
+ r_watchdog: watchdog@7020400 {
+ compatible = "allwinner,sun50i-h6-wdt",
+ "allwinner,sun6i-a31-wdt";
+ reg = <0x07020400 0x20>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
r_intc: interrupt-controller@7021000 {
compatible = "allwinner,sun50i-h6-r-intc",
"allwinner,sun6i-a31-r-intc";
--
2.20.1

View File

@@ -1,69 +0,0 @@
#!/bin/sh
# This scripts makes a minimal bootable SD card image for the Chromebook.
# The resulting file is called bootsd.img. It should be written directly
# to the card:
#
# SD=/dev/mmcblk1 # check your device name!
# dd if=output/images/bootsd.img of=$SD
#
# The partitions are created just large enough to hold the kernel and
# the rootfs image. Most of the card will be empty, and the secondary
# GPT will not be in its proper location.
# cgpt does not create protective MBR, and the kernel refuses to read
# GPT unless there's some kind of MBR in sector 0. So we need parted
# to write that single sector before doing anything with the GPT.
cgpt=$HOST_DIR/bin/cgpt
parted=$HOST_DIR/sbin/parted
kernel=$BINARIES_DIR/uImage.kpart
rootfs=$BINARIES_DIR/rootfs.ext2
run() { echo "$@"; "$@"; }
die() { echo "$@" >&2; exit 1; }
test -f $kernel || die "No kernel image found"
test -f $rootfs || die "No rootfs image found"
test -x $cgpt || die "cgpt not found (host-vboot-utils have not been built?)"
# True file sizes in bytes
kernelsize=`stat -t $kernel | cut -d\ -f2`
rootfssize=`stat -t $rootfs | cut -d\ -f2`
# The card is partitioned in sectors of 8KB.
# 4 sectors are reserved for MBR+GPT. Their actual size turns out
# to be 33 512-blocks which is just over 2 sectors, but we align
# it to a nice round number.
sec=8192
kernelsec=$(((kernelsize+8191)>>13))
rootfssec=$(((rootfssize+8191)>>13))
headersec=4
# There is also a copy of MBR+GPT at the end of the image.
# It's going to be useless but both tools assume it's there.
imagesec=$((2*headersec+kernelsec+rootfssec))
bootsd="$BINARIES_DIR/bootsd.img"
run dd bs=$sec count=$imagesec if=/dev/zero of=$bootsd
# cgpt needs offsets and sizes in 512-blocks.
block=512
kernelstart=$((headersec<<4))
kernelblocks=$((kernelsec<<4))
rootfsblocks=$((rootfssec<<4))
rootfsstart=$((kernelstart+kernelblocks))
# This command initializes both GPT and MBR
run $parted -s $bootsd mklabel gpt
# The kernel partition must be marked as bootable, that's why -S -T -P
run $cgpt add -i 1 -b $kernelstart -s $kernelblocks \
-t kernel -l kernel \
-S 1 -T 1 -P 10 $bootsd
# It does not really matter where the rootfs partition is located as long
# as the kernel can find it.
# However, if anything is changed here, kernel.args must be updated as well.
run $cgpt add -i 2 -b $rootfsstart -s $rootfsblocks \
-t data -l rootfs $bootsd
run dd bs=$block if=$kernel of=$bootsd seek=$kernelstart
run dd bs=$block if=$rootfs of=$bootsd seek=$rootfsstart

View File

@@ -1,37 +0,0 @@
From e9b507b695331ef6fa941b471be0a7f284ec6980 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Vincent=20Stehl=C3=A9?= <vincent.stehle@freescale.com>
Date: Tue, 12 Aug 2014 10:17:31 +0200
Subject: [PATCH] mx6qsabre_common: boot Linux to /init in mfgtools mode
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Buildroot supplies a nice /init wrapper script to use when booting from a
ramdisk.
This patch tells u-boot to tell the kernel to boot into /init (instead of
/linuxrc) on i.MX6, when booting in mfgtools mode. This way we can boot a
buildroot system entirely through USB.
Signed-off-by: Vincent Stehlé <vincent.stehle@freescale.com>
Signed-off-by: Julien Olivain <julien.olivain@oss.nxp.com>
---
include/configs/imx_env.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/configs/imx_env.h b/include/configs/imx_env.h
index 234af33e55..7e31d208dc 100644
--- a/include/configs/imx_env.h
+++ b/include/configs/imx_env.h
@@ -20,7 +20,7 @@
#define CONFIG_MFG_ENV_SETTINGS_DEFAULT \
"mfgtool_args=setenv bootargs console=${console},${baudrate} " \
- "rdinit=/linuxrc " \
+ "rdinit=/init " \
"clk_ignore_unused "\
"\0" \
"kboot="MFG_BOOT_CMD"\0"\
--
2.25.1

View File

@@ -1,176 +0,0 @@
ODROIDC2-UBOOT-CONFIG
########################################################################
# Changes made to this are overwritten every time there's a new upgrade
# To make your changes permanent change it on
# boot.ini.default
# After changing it on boot.ini.default run the bootini command to
# rewrite this file with your personal permanent settings.
# Documentation: http://odroid.com/dokuwiki/doku.php?id=en:c2_persistent_bootini
########################################################################
# Possible screen resolutions
# Uncomment only a single Line! The line with setenv written.
# At least one mode must be selected.
# Custom modeline!
# To use custom modeline you need to disable all the below resolutions
# and setup your own!
# For more information check our wiki:
# http://odroid.com/dokuwiki/doku.php?id=en:c2_hdmi_autosetting
# Example below:
# setenv m "custombuilt"
# setenv modeline "1920,1200,154000,74040,60,1920,1968,2000,2080,1200,1202,1208,1235,1,0,1"
# 480 Lines (720x480)
# setenv m "480i60hz" # Interlaced 60Hz
# setenv m "480i_rpt" # Interlaced for Rear Projection Televisions 60Hz
# setenv m "480p60hz" # 480 Progressive 60Hz
# setenv m "480p_rpt" # 480 Progressive for Rear Projection Televisions 60Hz
# 576 Lines (720x576)
# setenv m "576i50hz" # Interlaced 50Hz
# setenv m "576i_rpt" # Interlaced for Rear Projection Televisions 50Hz
# setenv m "576p50hz" # Progressive 50Hz
# setenv m "576p_rpt" # Progressive for Rear Projection Televisions 50Hz
# 720 Lines (1280x720)
# setenv m "720p50hz" # 50Hz
# setenv m "720p60hz" # 60Hz
# 1080 Lines (1920x1080)
# setenv m "1080i60hz" # Interlaced 60Hz
setenv m "1080p60hz" # Progressive 60Hz
# setenv m "1080i50hz" # Interlaced 50Hz
# setenv m "1080p50hz" # Progressive 50Hz
# setenv m "1080p24hz" # Progressive 24Hz
# 4K (3840x2160)
# setenv m "2160p30hz" # Progressive 30Hz
# setenv m "2160p25hz" # Progressive 25Hz
# setenv m "2160p24hz" # Progressive 24Hz
# setenv m "smpte24hz" # Progressive 24Hz SMPTE
# setenv m "2160p50hz" # Progressive 50Hz
# setenv m "2160p60hz" # Progressive 60Hz
# setenv m "2160p50hz420" # Progressive 50Hz with YCbCr 4:2:0 (Requires TV/Monitor that supports it)
# setenv m "2160p60hz420" # Progressive 60Hz with YCbCr 4:2:0 (Requires TV/Monitor that supports it)
### VESA modes ###
# setenv m "640x480p60hz"
# setenv m "800x480p60hz"
# setenv m "480x800p60hz"
# setenv m "800x600p60hz"
# setenv m "1024x600p60hz"
# setenv m "1024x768p60hz"
# setenv m "1280x800p60hz"
# setenv m "1280x1024p60hz"
# setenv m "1360x768p60hz"
# setenv m "1440x900p60hz"
# setenv m "1600x900p60hz"
# setenv m "1680x1050p60hz"
# setenv m "1600x1200p60hz"
# setenv m "1920x1200p60hz"
# setenv m "2560x1080p60hz"
# setenv m "2560x1440p60hz"
# setenv m "2560x1600p60hz"
# setenv m "3440x1440p60hz"
# HDMI BPP Mode
setenv m_bpp "32"
# setenv m_bpp "24"
# setenv m_bpp "16"
# HDMI DVI/VGA modes
# By default its set to HDMI, if needed change below.
# Uncomment only a single Line.
# setenv vout "dvi"
# setenv vout "vga"
# HDMI HotPlug Detection control
# Allows you to force HDMI thinking that the cable is connected.
# true = HDMI will believe that cable is always connected
# false = will let board/monitor negotiate the connection status
setenv hpd "true"
# setenv hpd "false"
# Monitor output
# Controls if HDMI PHY should output anything to the monitor
setenv monitor_onoff "false" # true or false
# Server Mode (aka. No Graphics)
# Setting nographics to 1 will disable all video subsystem
# This mode is ideal of server type usage. (Saves ~300Mb of RAM)
setenv nographics "0"
# Meson Timer
# 1 - Meson Timer
# 0 - Arch Timer
# Using meson_timer improves the video playback however it breaks KVM (virtualization).
# Using arch timer allows KVM/Virtualization to work however you'll experience poor video
setenv mesontimer "1"
# UHS (Ultra High Speed) MicroSD mode enable/disable
setenv disableuhs "false"
# MicroSD Card Detection enable/disable
# Force the MMC controlled to believe that a card is connected.
setenv mmc_removable "true"
# USB Multi WebCam tweak
# Only enable this if you use it.
setenv usbmulticam "false"
# Default Console Device Setting
setenv condev "console=ttyS0,115200n8 console=tty0" # on both
# CPU Frequency / Cores control
###########################################
### WARNING!!! WARNING!!! WARNING!!!
# Before changing anything here please read the wiki entry:
# http://odroid.com/dokuwiki/doku.php?id=en:c2_set_cpu_freq
#
# MAX CPU's
# setenv maxcpus "1"
# setenv maxcpus "2"
# setenv maxcpus "3"
setenv maxcpus "4"
# MAX Frequency
# setenv max_freq "2016" # 2.016GHz
# setenv max_freq "1944" # 1.944GHz
# setenv max_freq "1944" # 1.944GHz
# setenv max_freq "1920" # 1.920GHz
# setenv max_freq "1896" # 1.896GHz
# setenv max_freq "1752" # 1.752GHz
# setenv max_freq "1680" # 1.680GHz
# setenv max_freq "1656" # 1.656GHz
setenv max_freq "1536" # 1.536GHz
###########################################
# Boot Arguments
if test "${m}" = "custombuilt"; then setenv cmode "modeline=${modeline}"; fi
setenv bootargs "root=/dev/mmcblk0p2 rootwait ro ${condev} no_console_suspend hdmimode=${m} ${cmode} m_bpp=${m_bpp} vout=${vout} fsck.repair=yes net.ifnames=0 elevator=noop disablehpd=${hpd} max_freq=${max_freq} maxcpus=${maxcpus} monitor_onoff=${monitor_onoff} disableuhs=${disableuhs} mmc_removable=${mmc_removable} usbmulticam=${usbmulticam}"
# Booting
setenv loadaddr "0x11000000"
setenv dtb_loadaddr "0x1000000"
setenv initrd_loadaddr "0x13000000"
fatload mmc 0:1 ${loadaddr} Image
fatload mmc 0:1 ${dtb_loadaddr} meson64_odroidc2.dtb
fdt addr ${dtb_loadaddr}
if test "${mesontimer}" = "0"; then fdt rm /meson_timer; fdt rm /cpus/cpu@0/timer; fdt rm /cpus/cpu@1/timer; fdt rm /cpus/cpu@2/timer; fdt rm /cpus/cpu@3/timer; fi
if test "${mesontimer}" = "1"; then fdt rm /timer; fi
if test "${nographics}" = "1"; then fdt rm /reserved-memory; fdt rm /aocec; fi
if test "${nographics}" = "1"; then fdt rm /meson-fb; fdt rm /amhdmitx; fdt rm /picdec; fdt rm /ppmgr; fi
if test "${nographics}" = "1"; then fdt rm /meson-vout; fdt rm /mesonstream; fdt rm /meson-fb; fi
if test "${nographics}" = "1"; then fdt rm /deinterlace; fdt rm /codec_mm; fi
booti ${loadaddr} - ${dtb_loadaddr}

View File

@@ -1,11 +0,0 @@
diff -Nur linux-3.14.orig/drivers/net/ethernet/xilinx/xilinx_emaclite.c linux-3.14/drivers/net/ethernet/xilinx/xilinx_emaclite.c
--- linux-3.14.orig/drivers/net/ethernet/xilinx/xilinx_emaclite.c 2014-03-31 05:40:15.000000000 +0200
+++ linux-3.14/drivers/net/ethernet/xilinx/xilinx_emaclite.c 2014-04-11 22:53:42.000000000 +0200
@@ -1249,6 +1249,7 @@
{ .compatible = "xlnx,opb-ethernetlite-1.01.b", },
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.00.a", },
+ { .compatible = "xlnx,xps-ethernetlite-2.00.b", },
{ .compatible = "xlnx,xps-ethernetlite-2.01.a", },
{ .compatible = "xlnx,xps-ethernetlite-3.00.a", },
{ /* end of list */ },

View File

@@ -1,11 +0,0 @@
diff -Nur linux-3.14.orig/drivers/net/ethernet/xilinx/xilinx_emaclite.c linux-3.14/drivers/net/ethernet/xilinx/xilinx_emaclite.c
--- linux-3.14.orig/drivers/net/ethernet/xilinx/xilinx_emaclite.c 2014-03-31 05:40:15.000000000 +0200
+++ linux-3.14/drivers/net/ethernet/xilinx/xilinx_emaclite.c 2014-04-11 22:53:42.000000000 +0200
@@ -1249,6 +1249,7 @@
{ .compatible = "xlnx,opb-ethernetlite-1.01.b", },
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.00.a", },
+ { .compatible = "xlnx,xps-ethernetlite-2.00.b", },
{ .compatible = "xlnx,xps-ethernetlite-2.01.a", },
{ .compatible = "xlnx,xps-ethernetlite-3.00.a", },
{ /* end of list */ },

View File

@@ -1,36 +0,0 @@
CONFIG_44x=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_EBONY is not set
CONFIG_XILINX_VIRTEX440_GENERIC_BOARD=y
CONFIG_MATH_EMULATION=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_XILINX_SYSACE=y
CONFIG_NETDEVICES=y
CONFIG_SERIO_XILINX_XPS_PS2=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_XILINX_HWICAP=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_XILINX=y
CONFIG_FB=y
CONFIG_FB_XILINX=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y

View File

@@ -1,7 +0,0 @@
Run the emulation with:
qemu-system-ppc -M virtex-ml507 -kernel output/images/vmlinux -m 256 -nographic -append "console=ttyS0" -dtb output/images/virtex440-ml507.dtb
The login prompt will appear in the terminal that started Qemu.
Tested with QEMU 2.12.0

View File

@@ -1,66 +0,0 @@
From 15f62343916fcb3cae82e618da28eaa82bc8c007 Mon Sep 17 00:00:00 2001
From: Alistair Francis <alistair.francis@wdc.com>
Date: Tue, 16 Apr 2019 09:03:41 -0700
Subject: [PATCH] Revert "riscv: Use latest system call ABI"
This reverts commit d4c08b9776b392e20efc6198ebe1bc8ec1911d9b.
The latest RISC-V 32bit glibc submission doesn't work with this patch,
so let's revert it. This revert can be reverted when the glibc
submission is updated to work on the 5.1 kernel.
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
---
arch/riscv/Kconfig | 1 +
arch/riscv/include/uapi/asm/unistd.h | 5 ++++-
arch/riscv/kernel/vdso/Makefile | 2 --
3 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index eb56c82d8aa1..43dd2680c696 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -11,6 +11,7 @@ config 32BIT
config RISCV
def_bool y
+ select ARCH_32BIT_OFF_T if !64BIT
# even on 32-bit, physical (and DMA) addresses are > 32-bits
select PHYS_ADDR_T_64BIT
select OF
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
index 0e2eeeb1fd27..486a288b454c 100644
--- a/arch/riscv/include/uapi/asm/unistd.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -17,8 +17,11 @@
#ifdef __LP64__
#define __ARCH_WANT_NEW_STAT
-#define __ARCH_WANT_SET_GET_RLIMIT
#endif /* __LP64__ */
+#define __ARCH_WANT_SET_GET_RLIMIT
+#ifndef __LP64__
+#define __ARCH_WANT_TIME32_SYSCALLS
+#endif
#include <asm-generic/unistd.h>
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index fec62b24df89..eed1c137f618 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -2,11 +2,9 @@
# Symbols present in the vdso
vdso-syms = rt_sigreturn
-ifdef CONFIG_64BIT
vdso-syms += gettimeofday
vdso-syms += clock_gettime
vdso-syms += clock_getres
-endif
vdso-syms += getcpu
vdso-syms += flush_icache
--
2.21.0

View File

@@ -1,61 +0,0 @@
From aaaced6d2d6f796ed77e4725ed7ccbedb189c180 Mon Sep 17 00:00:00 2001
From: Romain Naour <romain.naour@gmail.com>
Date: Fri, 15 Jun 2018 17:43:38 +0200
Subject: [PATCH] Revert: serial: sh-sci: increase RX FIFO trigger defaults for
(H)SCIF
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This reverts commit 18e8cf159177100e69d528293f8cf6875c0b1bca (kernel)
The last Qemu kernel update [1] introduced a regresion in sh4 SCIF
serial device. Some keyboard presses are very slow to be taken into
account, perhaps not even taken into account at all. This would
explain why our test infrastructure doesnt manage to login as root
[2][3][4].
git bisect reported a kernel patch from 4.11, increasing RX FIFO
trigger defaults value for sh-sci (H)SCIF. The kernel patch itself
looks good but the Qemu emulation is not ready to handle this new
setting.
From Qemu (2.12.0): target/sh4/README.sh4
"Configuration of the second serial port (SCIF) is supported. FIFO
handling infrastructure has been started but is not completed yet."
In order to be able to test sh4 architecture with newer kernel,
revert to the old behaviour.
[1] https://git.buildroot.net/buildroot/commit/?id=03fb00f2175cdb4565e26fcb9b3da1c1059de1bd
[2] https://gitlab.com/free-electrons/toolchains-builder/-/jobs/72006425
[3] https://gitlab.com/free-electrons/toolchains-builder/-/jobs/72006427
[4] https://gitlab.com/free-electrons/toolchains-builder/-/jobs/72006426
Signed-off-by: Romain Naour <romain.naour@gmail.com>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
---
drivers/tty/serial/sh-sci.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ab757546c6db..138e4dec44fb 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2793,11 +2793,8 @@ static int sci_init_single(struct platform_device *dev,
sci_port->rx_trigger = 32;
break;
case PORT_SCIF:
- if (p->regtype == SCIx_SH7705_SCIF_REGTYPE)
- /* RX triggering not implemented for this IP */
- sci_port->rx_trigger = 1;
- else
- sci_port->rx_trigger = 8;
+ /* RX triggering not implemented in Qemu emulation */
+ sci_port->rx_trigger = 1;
break;
default:
sci_port->rx_trigger = 1;
--
2.14.4

View File

@@ -1,61 +0,0 @@
From aaaced6d2d6f796ed77e4725ed7ccbedb189c180 Mon Sep 17 00:00:00 2001
From: Romain Naour <romain.naour@gmail.com>
Date: Fri, 15 Jun 2018 17:43:38 +0200
Subject: [PATCH] Revert: serial: sh-sci: increase RX FIFO trigger defaults for
(H)SCIF
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This reverts commit 18e8cf159177100e69d528293f8cf6875c0b1bca (kernel)
The last Qemu kernel update [1] introduced a regresion in sh4 SCIF
serial device. Some keyboard presses are very slow to be taken into
account, perhaps not even taken into account at all. This would
explain why our test infrastructure doesnt manage to login as root
[2][3][4].
git bisect reported a kernel patch from 4.11, increasing RX FIFO
trigger defaults value for sh-sci (H)SCIF. The kernel patch itself
looks good but the Qemu emulation is not ready to handle this new
setting.
From Qemu (2.12.0): target/sh4/README.sh4
"Configuration of the second serial port (SCIF) is supported. FIFO
handling infrastructure has been started but is not completed yet."
In order to be able to test sh4 architecture with newer kernel,
revert to the old behaviour.
[1] https://git.buildroot.net/buildroot/commit/?id=03fb00f2175cdb4565e26fcb9b3da1c1059de1bd
[2] https://gitlab.com/free-electrons/toolchains-builder/-/jobs/72006425
[3] https://gitlab.com/free-electrons/toolchains-builder/-/jobs/72006427
[4] https://gitlab.com/free-electrons/toolchains-builder/-/jobs/72006426
Signed-off-by: Romain Naour <romain.naour@gmail.com>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
---
drivers/tty/serial/sh-sci.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ab757546c6db..138e4dec44fb 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2793,11 +2793,8 @@ static int sci_init_single(struct platform_device *dev,
sci_port->rx_trigger = 32;
break;
case PORT_SCIF:
- if (p->regtype == SCIx_SH7705_SCIF_REGTYPE)
- /* RX triggering not implemented for this IP */
- sci_port->rx_trigger = 1;
- else
- sci_port->rx_trigger = 8;
+ /* RX triggering not implemented in Qemu emulation */
+ sci_port->rx_trigger = 1;
break;
default:
sci_port->rx_trigger = 1;
--
2.14.4

View File

@@ -1,31 +0,0 @@
[PATCH] unbreak xhci-trace compilation
When commit 84a99f6fc5d4 (xhci: add traces for debug messages in
xhci_address_device) was backported to the owl tree as part of commit
5b2ea66f15396 (usb: add owl usb driver), the specific CFLAGS needed for
xhci-trace.c were missed, causing build breakage:
CC drivers/usb/host/xhci-trace.o
In file included from drivers/usb/host/xhci-trace.h:151:0,
from drivers/usb/host/xhci-trace.c:15:
include/trace/define_trace.h:79:43: fatal error: ./xhci-trace.h: No such file or directory
Signed-off-by: Peter Korsgaard <peter@korsgaard.com>
---
drivers/usb/host/Makefile | 3 +++
1 file changed, 3 insertions(+)
Index: linux-59dbf6a4998e967eb9c6bdcc9b506c0d96acb26b/drivers/usb/host/Makefile
===================================================================
--- linux-59dbf6a4998e967eb9c6bdcc9b506c0d96acb26b.orig/drivers/usb/host/Makefile
+++ linux-59dbf6a4998e967eb9c6bdcc9b506c0d96acb26b/drivers/usb/host/Makefile
@@ -4,6 +4,9 @@
ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
+# tell define_trace.h where to find the xhci trace header
+CFLAGS_xhci-trace.o := -I$(src)
+
isp1760-y := isp1760-hcd.o isp1760-if.o
fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o

View File

@@ -1,99 +0,0 @@
From 96b4fb75d15be65edc5494579e4a944534042f99 Mon Sep 17 00:00:00 2001
From: Sasha Levin <sasha.levin@oracle.com>
Date: Mon, 13 Oct 2014 15:51:05 -0700
Subject: [PATCH] kernel: add support for gcc 5
We're missing include/linux/compiler-gcc5.h which is required now
because gcc branched off to v5 in trunk.
Just copy the relevant bits out of include/linux/compiler-gcc4.h,
no new code is added as of now.
This fixes a build error when using gcc 5.
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 71458cfc782eafe4b27656e078d379a34e472adf)
Signed-off-by: Peter Korsgaard <peter@korsgaard.com>
---
include/linux/compiler-gcc5.h | 66 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 66 insertions(+)
create mode 100644 include/linux/compiler-gcc5.h
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
new file mode 100644
index 0000000..cdd1cc2
--- /dev/null
+++ b/include/linux/compiler-gcc5.h
@@ -0,0 +1,66 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ * Fixed in GCC 4.8.2 and later versions.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
--
2.10.2

View File

@@ -1,54 +0,0 @@
From 8ac9a3f6dbf00d861134bea3f2c930defed6311a Mon Sep 17 00:00:00 2001
From: Behan Webster <behanw@converseincode.com>
Date: Wed, 24 Sep 2014 01:06:46 +0100
Subject: [PATCH] ARM: 8158/1: LLVMLinux: use static inline in ARM ftrace.h
With compilers which follow the C99 standard (like modern versions of gcc and
clang), "extern inline" does the wrong thing (emits code for an externally
linkable version of the inline function). In this case using static inline
and removing the NULL version of return_address in return_address.c does
the right thing.
Signed-off-by: Behan Webster <behanw@converseincode.com>
Reviewed-by: Mark Charlebois <charlebm@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
(cherry picked from commit aeea3592a13bf12861943e44fc48f1f270941f8d)
Signed-off-by: Peter Korsgaard <peter@korsgaard.com>
---
arch/arm/include/asm/ftrace.h | 2 +-
arch/arm/kernel/return_address.c | 5 -----
2 files changed, 1 insertion(+), 6 deletions(-)
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index f89515a..2bb8cac 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -45,7 +45,7 @@ void *return_address(unsigned int);
#else
-extern inline void *return_address(unsigned int level)
+static inline void *return_address(unsigned int level)
{
return NULL;
}
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 2f8f523..a3b587f 100755
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -63,11 +63,6 @@ void *return_address(unsigned int level)
//#warning "TODO: return_address should use unwind tables"
//#endif
-void *return_address(unsigned int level)
-{
- return NULL;
-}
-
#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
EXPORT_SYMBOL_GPL(return_address);
--
2.10.2

View File

@@ -1,121 +0,0 @@
From ef34609395c72a1e777ef9c3fb6ce60aa92976bb Mon Sep 17 00:00:00 2001
From: Marco Franceschetti <vonfritz1@gmail.com>
Date: Sun, 13 Mar 2016 09:44:54 +0100
Subject: [PATCH] Fix compile errors with gcc5
(cherry picked from commit e824d45043efd49607f66c89c921b07c1523bf9e)
Signed-off-by: Peter Korsgaard <peter@korsgaard.com>
---
drivers/net/wireless/actions/rtl8188etv/include/ieee80211.h | 6 +++---
drivers/net/wireless/actions/rtl8723bs/include/ieee80211.h | 6 +++---
drivers/net/wireless/actions/rtl8723bs_vq0/include/ieee80211.h | 6 +++---
drivers/net/wireless/actions/rtl8723bu/include/ieee80211.h | 6 +++---
4 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/drivers/net/wireless/actions/rtl8188etv/include/ieee80211.h b/drivers/net/wireless/actions/rtl8188etv/include/ieee80211.h
index fc293c4..0898cd4 100755
--- a/drivers/net/wireless/actions/rtl8188etv/include/ieee80211.h
+++ b/drivers/net/wireless/actions/rtl8188etv/include/ieee80211.h
@@ -1314,18 +1314,18 @@ enum ieee80211_state {
(((Addr[2]) & 0xff) == 0xff) && (((Addr[3]) & 0xff) == 0xff) && (((Addr[4]) & 0xff) == 0xff) && \
(((Addr[5]) & 0xff) == 0xff))
#else
-extern __inline int is_multicast_mac_addr(const u8 *addr)
+static __inline int is_multicast_mac_addr(const u8 *addr)
{
return ((addr[0] != 0xff) && (0x01 & addr[0]));
}
-extern __inline int is_broadcast_mac_addr(const u8 *addr)
+static __inline int is_broadcast_mac_addr(const u8 *addr)
{
return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
}
-extern __inline int is_zero_mac_addr(const u8 *addr)
+static __inline int is_zero_mac_addr(const u8 *addr)
{
return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
(addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/net/wireless/actions/rtl8723bs/include/ieee80211.h b/drivers/net/wireless/actions/rtl8723bs/include/ieee80211.h
index 09475ef..56e0f5a 100755
--- a/drivers/net/wireless/actions/rtl8723bs/include/ieee80211.h
+++ b/drivers/net/wireless/actions/rtl8723bs/include/ieee80211.h
@@ -1318,18 +1318,18 @@ enum ieee80211_state {
(((Addr[2]) & 0xff) == 0xff) && (((Addr[3]) & 0xff) == 0xff) && (((Addr[4]) & 0xff) == 0xff) && \
(((Addr[5]) & 0xff) == 0xff))
#else
-extern __inline int is_multicast_mac_addr(const u8 *addr)
+static __inline int is_multicast_mac_addr(const u8 *addr)
{
return ((addr[0] != 0xff) && (0x01 & addr[0]));
}
-extern __inline int is_broadcast_mac_addr(const u8 *addr)
+static __inline int is_broadcast_mac_addr(const u8 *addr)
{
return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
}
-extern __inline int is_zero_mac_addr(const u8 *addr)
+static __inline int is_zero_mac_addr(const u8 *addr)
{
return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
(addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/net/wireless/actions/rtl8723bs_vq0/include/ieee80211.h b/drivers/net/wireless/actions/rtl8723bs_vq0/include/ieee80211.h
index 5dfc421..95144b6 100755
--- a/drivers/net/wireless/actions/rtl8723bs_vq0/include/ieee80211.h
+++ b/drivers/net/wireless/actions/rtl8723bs_vq0/include/ieee80211.h
@@ -1314,18 +1314,18 @@ enum ieee80211_state {
(((Addr[2]) & 0xff) == 0xff) && (((Addr[3]) & 0xff) == 0xff) && (((Addr[4]) & 0xff) == 0xff) && \
(((Addr[5]) & 0xff) == 0xff))
#else
-extern __inline int is_multicast_mac_addr(const u8 *addr)
+static __inline int is_multicast_mac_addr(const u8 *addr)
{
return ((addr[0] != 0xff) && (0x01 & addr[0]));
}
-extern __inline int is_broadcast_mac_addr(const u8 *addr)
+static __inline int is_broadcast_mac_addr(const u8 *addr)
{
return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
}
-extern __inline int is_zero_mac_addr(const u8 *addr)
+static __inline int is_zero_mac_addr(const u8 *addr)
{
return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
(addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/net/wireless/actions/rtl8723bu/include/ieee80211.h b/drivers/net/wireless/actions/rtl8723bu/include/ieee80211.h
index 09475ef..56e0f5a 100755
--- a/drivers/net/wireless/actions/rtl8723bu/include/ieee80211.h
+++ b/drivers/net/wireless/actions/rtl8723bu/include/ieee80211.h
@@ -1318,18 +1318,18 @@ enum ieee80211_state {
(((Addr[2]) & 0xff) == 0xff) && (((Addr[3]) & 0xff) == 0xff) && (((Addr[4]) & 0xff) == 0xff) && \
(((Addr[5]) & 0xff) == 0xff))
#else
-extern __inline int is_multicast_mac_addr(const u8 *addr)
+static __inline int is_multicast_mac_addr(const u8 *addr)
{
return ((addr[0] != 0xff) && (0x01 & addr[0]));
}
-extern __inline int is_broadcast_mac_addr(const u8 *addr)
+static __inline int is_broadcast_mac_addr(const u8 *addr)
{
return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
}
-extern __inline int is_zero_mac_addr(const u8 *addr)
+static __inline int is_zero_mac_addr(const u8 *addr)
{
return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
(addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
--
2.10.2

View File

@@ -1,363 +0,0 @@
From 21be6b635320321216dde9201fa57a3aed60ee12 Mon Sep 17 00:00:00 2001
From: Joe Perches <joe@perches.com>
Date: Thu, 25 Jun 2015 15:01:02 -0700
Subject: [PATCH] compiler-gcc: integrate the various compiler-gcc[345].h files
As gcc major version numbers are going to advance rather rapidly in the
future, there's no real value in separate files for each compiler
version.
Deduplicate some of the macros #defined in each file too.
Neaten comments using normal kernel commenting style.
Signed-off-by: Joe Perches <joe@perches.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Segher Boessenkool <segher@kernel.crashing.org>
Cc: Sasha Levin <levinsasha928@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Alan Modra <amodra@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit cb984d101b30eb7478d32df56a0023e4603cba7f)
Signed-off-by: Peter Korsgaard <peter@korsgaard.com>
---
include/linux/compiler-gcc.h | 120 ++++++++++++++++++++++++++++++++++++++++--
include/linux/compiler-gcc3.h | 23 --------
include/linux/compiler-gcc4.h | 88 -------------------------------
include/linux/compiler-gcc5.h | 66 -----------------------
4 files changed, 116 insertions(+), 181 deletions(-)
delete mode 100644 include/linux/compiler-gcc3.h
delete mode 100644 include/linux/compiler-gcc4.h
delete mode 100644 include/linux/compiler-gcc5.h
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 24545cd90a25..0c5d746850c2 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -97,10 +97,122 @@
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
-#define __gcc_header(x) #x
-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
-#define gcc_header(x) _gcc_header(x)
-#include gcc_header(__GNUC__)
+/* gcc version specific checks */
+
+#if GCC_VERSION < 30200
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if GCC_VERSION < 30300
+# define __used __attribute__((__unused__))
+#else
+# define __used __attribute__((__used__))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if GCC_VERSION < 30400
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
+
+#if GCC_VERSION >= 30400
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
+#if GCC_VERSION >= 40000
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
+# error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used __attribute__((__used__))
+#define __compiler_offsetof(a, b) \
+ __builtin_offsetof(a, b)
+
+#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+
+#if GCC_VERSION >= 40300
+/* Mark functions as cold. gcc will assume any path leading to a call
+ * to them will be unlikely. This means a lot of manual unlikely()s
+ * are unnecessary now for any paths leading to the usual suspects
+ * like BUG(), printk(), panic() etc. [but let's keep them for now for
+ * older compilers]
+ *
+ * Early snapshots of gcc 4.3 don't support this and we can't detect this
+ * in the preprocessor, but we can live with this because they're unreleased.
+ * Maketime probing would be overkill here.
+ *
+ * gcc also has a __attribute__((__hot__)) to move hot functions into
+ * a special section, but I don't see any sense in this right now in
+ * the kernel context
+ */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+#endif /* GCC_VERSION >= 40300 */
+
+#if GCC_VERSION >= 40500
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+#endif /* GCC_VERSION >= 40500 */
+
+#if GCC_VERSION >= 40600
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+#endif
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if GCC_VERSION >= 40400
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if GCC_VERSION >= 50000
+#define KASAN_ABI_VERSION 4
+#elif GCC_VERSION >= 40902
+#define KASAN_ABI_VERSION 3
+#endif
+
+#endif /* gcc version >= 40000 specific checks */
#if !defined(__noclone)
#define __noclone /* not needed */
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
deleted file mode 100644
index 7d89febe4d79..000000000000
--- a/include/linux/compiler-gcc3.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#if GCC_VERSION < 30200
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
-
-#if GCC_VERSION >= 30300
-# define __used __attribute__((__used__))
-#else
-# define __used __attribute__((__unused__))
-#endif
-
-#if GCC_VERSION >= 30400
-#define __must_check __attribute__((warn_unused_result))
-#endif
-
-#ifdef CONFIG_GCOV_KERNEL
-# if GCC_VERSION < 30400
-# error "GCOV profiling support for gcc versions below 3.4 not included"
-# endif /* __GNUC_MINOR__ */
-#endif /* CONFIG_GCOV_KERNEL */
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
deleted file mode 100644
index 2507fd2a1eb4..000000000000
--- a/include/linux/compiler-gcc4.h
+++ /dev/null
@@ -1,88 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
-#endif
-
-/* GCC 4.1.[01] miscompiles __weak */
-#ifdef __KERNEL__
-# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
-# error Your version of gcc miscompiles the __weak directive
-# endif
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
-
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-#endif
-
-#if GCC_VERSION >= 40300
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-#endif /* GCC_VERSION >= 40300 */
-
-#if GCC_VERSION >= 40500
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-#endif /* GCC_VERSION >= 40500 */
-
-#if GCC_VERSION >= 40600
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-#endif
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- * Fixed in GCC 4.8.2 and later versions.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#if GCC_VERSION >= 40400
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
-#define __HAVE_BUILTIN_BSWAP16__
-#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
deleted file mode 100644
index cdd1cc202d51..000000000000
--- a/include/linux/compiler-gcc5.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- * Fixed in GCC 4.8.2 and later versions.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#define __HAVE_BUILTIN_BSWAP16__
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
--
2.11.0

View File

@@ -1,17 +0,0 @@
image sdcard.img {
hdimage {
gpt = true
}
partition bootloader {
image = "fw_payload.bin"
offset = 1M
size = 32M
partition-type-uuid = 2E54B353-1271-4842-806F-E436D6AF6985
}
partition rootfs {
image = "rootfs.ext4"
partition-type-uuid = 0FC63DAF-8483-4772-8E79-3D69D8477DE4
}
}

View File

@@ -1,7 +0,0 @@
setenv finduuid "part uuid mmc 0:1 uuid"
run finduuid
run findfdt
setenv bootargs "console=ttymxc0,115200 root=PARTUUID=${uuid} rootwait rootfstype=ext4"
load mmc 0:1 ${fdt_addr} boot/${fdtfile}
load mmc 0:1 ${loadaddr} boot/zImage
bootz ${loadaddr} - ${fdt_addr}

View File

@@ -1,29 +0,0 @@
# Minimal SD card image for the MX6 Cubox/Hummingboard
#
# U-Boot SPL and u-boot.img are flashed in the first 1MB raw partition.
# A single root filesystem partition is used (Ext4 in this case).
#
image sdcard.img {
hdimage {
}
partition spl {
in-partition-table = "no"
image = "SPL"
offset = 1K
}
partition u-boot {
in-partition-table = "no"
image = "u-boot.img"
offset = 69K
}
partition rootfs {
partition-type = 0x83
image = "rootfs.ext4"
offset = 1M
size = 512M
}
}

View File

@@ -1,8 +0,0 @@
#!/bin/sh
BOARD_DIR="$(dirname $0)"
$HOST_DIR/bin/mkimage -A arm -O linux -T script -C none \
-n "boot script" -d $BOARD_DIR/boot.scr.txt $BOARD_DIR/boot.scr
install -m 0644 -D $BOARD_DIR/boot.scr $TARGET_DIR/boot/boot.scr

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env bash
BOARD_DIR="$(dirname $0)"
GENIMAGE_CFG="${BOARD_DIR}/genimage.cfg"
GENIMAGE_TMP="${BUILD_DIR}/genimage.tmp"
rm -rf "${GENIMAGE_TMP}"
genimage \
--rootpath "${TARGET_DIR}" \
--tmppath "${GENIMAGE_TMP}" \
--inputpath "${BINARIES_DIR}" \
--outputpath "${BINARIES_DIR}" \
--config "${GENIMAGE_CFG}"

View File

@@ -1,23 +0,0 @@
image sdcard.img {
hdimage {
gpt = "true"
}
partition fsbl1 {
image = "u-boot-spl.stm32"
}
partition fsbl2 {
image = "u-boot-spl.stm32"
}
partition ssbl {
image = "u-boot.img"
}
partition rootfs {
image = "rootfs.ext4"
partition-type = 0x83
bootable = "yes"
}
}

View File

@@ -1,33 +0,0 @@
From c8f8f33c2f0460a34c9545b01a7972a7ed2df0e9 Mon Sep 17 00:00:00 2001
From: Christophe Priouzeau <christophe.priouzeau@st.com>
Date: Mon, 29 May 2017 13:38:16 +0200
Subject: [PATCH] ARM: stm32f249-disco: don't force init= in /chosen/bootargs
There is no reason to override the kernel's default init= value, as
this breaks userspace that assumes the kernel default of /init is
used. Since stm32 is often used with a minimal bootloader
(afboot-stm32) that doesn't provide any mechanism to override the DTB,
we need to adjust the kernel command line in the Device Tree source.
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: Christophe Priouzeau <christophe.priouzeau@st.com>
---
arch/arm/boot/dts/stm32f469-disco.dts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
index 0dd56ef..93ee1b2 100644
--- a/arch/arm/boot/dts/stm32f469-disco.dts
+++ b/arch/arm/boot/dts/stm32f469-disco.dts
@@ -53,7 +53,7 @@
compatible = "st,stm32f469i-disco", "st,stm32f469";
chosen {
- bootargs = "root=/dev/ram rdinit=/linuxrc";
+ bootargs = "root=/dev/ram";
stdout-path = "serial0:115200n8";
};
--
2.7.4

View File

@@ -1 +0,0 @@
# CONFIG_STM32MP_WATCHDOG is not set

View File

@@ -1 +0,0 @@
# CONFIG_STM32MP_WATCHDOG is not set

View File

@@ -1 +0,0 @@
# CONFIG_USB is not set

View File

@@ -1,7 +0,0 @@
setenv finduuid "part uuid mmc 0:1 uuid"
run finduuid
run findfdt
setenv bootargs "console=ttymxc0,115200 root=PARTUUID=${uuid} rootwait rootfstype=ext4"
load mmc 0:1 ${fdt_addr} boot/${fdtfile}
load mmc 0:1 ${loadaddr} boot/zImage
bootz ${loadaddr} - ${fdt_addr}

View File

@@ -1,32 +0,0 @@
# Minimal SD card image for the MX6SX Udoo Neo board
#
# The SD card must have at least 1 MB free at the beginning.
# U-Boot and its environment are dumped as is.
# A single root filesystem partition is required (Ext4 in this case).
#
# For details about the layout, see:
# http://wiki.wandboard.org/index.php/Boot-process
image sdcard.img {
hdimage {
}
partition spl {
in-partition-table = "no"
image = "SPL"
offset = 1K
}
partition u-boot {
in-partition-table = "no"
image = "u-boot.img"
offset = 69K
}
partition rootfs {
partition-type = 0x83
image = "rootfs.ext4"
offset = 1M
size = 512M
}
}

View File

@@ -1,8 +0,0 @@
#!/bin/sh
BOARD_DIR="$(dirname $0)"
$HOST_DIR/bin/mkimage -A arm -O linux -T script -C none \
-n "boot script" -d $BOARD_DIR/boot.scr.txt $BOARD_DIR/boot.scr
install -m 0644 -D $BOARD_DIR/boot.scr $TARGET_DIR/boot/boot.scr

View File

@@ -1,15 +0,0 @@
#!/usr/bin/env bash
BOARD_DIR="$(dirname $0)"
GENIMAGE_CFG="${BOARD_DIR}/genimage.cfg"
GENIMAGE_TMP="${BUILD_DIR}/genimage.tmp"
rm -rf "${GENIMAGE_TMP}"
genimage \
--rootpath "${TARGET_DIR}" \
--tmppath "${GENIMAGE_TMP}" \
--inputpath "${BINARIES_DIR}" \
--outputpath "${BINARIES_DIR}" \
--config "${GENIMAGE_CFG}"

View File

@@ -1,7 +0,0 @@
setenv finduuid "part uuid mmc 0:1 uuid"
run finduuid
run findfdt
setenv bootargs "console=ttymxc0,115200 root=PARTUUID=${uuid} rootwait rootfstype=ext4"
load mmc 0:1 ${fdt_addr} boot/${fdtfile}
load mmc 0:1 ${loadaddr} boot/zImage
bootz ${loadaddr} - ${fdt_addr}

View File

@@ -1,32 +0,0 @@
# Minimal SD card image for the Wandboard
#
# The SD card must have at least 1 MB free at the beginning.
# U-Boot and its environment are dumped as is.
# A single root filesystem partition is required (Ext4 in this case).
#
# For details about the layout, see:
# http://wiki.wandboard.org/index.php/Boot-process
image sdcard.img {
hdimage {
}
partition spl {
in-partition-table = "no"
image = "SPL"
offset = 1024
}
partition u-boot {
in-partition-table = "no"
image = "u-boot.img"
offset = 70656
}
partition rootfs {
partition-type = 0x83
image = "rootfs.ext4"
offset = 1M
size = 512M
}
}

View File

@@ -1,8 +0,0 @@
#!/bin/sh
BOARD_DIR="$(dirname $0)"
$HOST_DIR/bin/mkimage -A arm -O linux -T script -C none \
-n "boot script" -d $BOARD_DIR/boot.scr.txt $BOARD_DIR/boot.scr
install -m 0644 -D $BOARD_DIR/boot.scr $TARGET_DIR/boot/boot.scr

View File

@@ -1,31 +0,0 @@
From 5867fcad6e88af3d843bfa831648d84a53732d57 Mon Sep 17 00:00:00 2001
From: Heinrich Schuchardt <xypron.glpk@gmx.de>
Date: Wed, 19 Dec 2018 17:17:13 +0100
Subject: [PATCH] mv_ddr: mv_ddr4_training_leveling.c: uninitialized variable
With GCC 8.2 uninitialized variables lead to a build error.
Signed-off-by: Heinrich Schuchardt <xypron.glpk@gmx.de>
Signed-off-by: Baruch Siach <baruch@tkos.co.il>
---
Upstream status: commit 5867fcad6e8
mv_ddr4_training_leveling.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mv_ddr4_training_leveling.c b/mv_ddr4_training_leveling.c
index 144e21a03c01..cb95389f5466 100644
--- a/mv_ddr4_training_leveling.c
+++ b/mv_ddr4_training_leveling.c
@@ -368,7 +368,7 @@ static int mv_ddr4_dynamic_pb_wl_supp(u32 dev_num, enum mv_wl_supp_mode ecc_mode
u32 subphy_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
u8 compare_result = 0;
u32 orig_phase;
- u32 rd_data, wr_data;
+ u32 rd_data, wr_data = 0;
u32 flag, step;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
u32 ecc_phy_access_id;
--
2.20.1

View File

@@ -1,37 +0,0 @@
From f5871e1f3650d6c8a032928cb5d8ca00c275c377 Mon Sep 17 00:00:00 2001
From: Alistair Francis <alistair.francis@wdc.com>
Date: Fri, 15 Feb 2019 14:57:41 -0800
Subject: [PATCH] Makefile: Don't specify mabi or march
To avoid
can't link double-float modules with soft-float modules
errors when building 32-bit openSBI don't specify mabi or march.
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
---
Makefile | 2 --
1 file changed, 2 deletions(-)
diff --git a/Makefile b/Makefile
index ae68f55..10851fc 100644
--- a/Makefile
+++ b/Makefile
@@ -145,7 +145,6 @@ GENFLAGS += $(firmware-genflags-y)
CFLAGS = -g -Wall -Werror -nostdlib -fno-strict-aliasing -O2
CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
CFLAGS += -mno-save-restore -mstrict-align
-CFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)
CFLAGS += -mcmodel=$(PLATFORM_RISCV_CODE_MODEL)
CFLAGS += $(GENFLAGS)
CFLAGS += $(platform-cflags-y)
@@ -158,7 +157,6 @@ CPPFLAGS += $(firmware-cppflags-y)
ASFLAGS = -g -Wall -nostdlib -D__ASSEMBLY__
ASFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
ASFLAGS += -mno-save-restore -mstrict-align
-ASFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)
ASFLAGS += -mcmodel=$(PLATFORM_RISCV_CODE_MODEL)
ASFLAGS += $(GENFLAGS)
ASFLAGS += $(platform-asflags-y)
--
2.20.1

View File

@@ -1,44 +0,0 @@
# Architecture
BR2_arm=y
BR2_cortex_a7=y
BR2_ARM_FPU_VFPV4=y
# Linux headers same as kernel, a 5.0 series
BR2_PACKAGE_HOST_LINUX_HEADERS_CUSTOM_5_0=y
# System configuration
BR2_TARGET_GENERIC_HOSTNAME="nanopi-m1"
BR2_TARGET_GENERIC_ISSUE="Welcome to Buildroot for the NanoPi M1"
# Bootloaders
BR2_TARGET_UBOOT=y
BR2_TARGET_UBOOT_BUILD_SYSTEM_KCONFIG=y
BR2_TARGET_UBOOT_CUSTOM_VERSION=y
BR2_TARGET_UBOOT_CUSTOM_VERSION_VALUE="2019.01"
BR2_TARGET_UBOOT_BOARD_DEFCONFIG="nanopi_m1"
BR2_TARGET_UBOOT_NEEDS_DTC=y
BR2_TARGET_UBOOT_NEEDS_PYLIBFDT=y
BR2_TARGET_UBOOT_FORMAT_CUSTOM=y
BR2_TARGET_UBOOT_FORMAT_CUSTOM_NAME="u-boot-sunxi-with-spl.bin"
# Kernel
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="5.0"
BR2_LINUX_KERNEL_DEFCONFIG="sunxi"
BR2_LINUX_KERNEL_DTS_SUPPORT=y
BR2_LINUX_KERNEL_INTREE_DTS_NAME="sun8i-h3-nanopi-m1"
# Filesystem
BR2_TARGET_ROOTFS_EXT2=y
BR2_TARGET_ROOTFS_EXT2_4=y
BR2_TARGET_UBOOT_BOOT_SCRIPT=y
BR2_TARGET_UBOOT_BOOT_SCRIPT_SOURCE="board/friendlyarm/nanopi-m1/boot.cmd"
BR2_ROOTFS_POST_IMAGE_SCRIPT="support/scripts/genimage.sh"
BR2_ROOTFS_POST_SCRIPT_ARGS="-c board/friendlyarm/nanopi-m1/genimage.cfg"
# BR2_TARGET_ROOTFS_TAR is not set
# Additional tools
BR2_PACKAGE_HOST_DOSFSTOOLS=y
BR2_PACKAGE_HOST_GENIMAGE=y
BR2_PACKAGE_HOST_MTOOLS=y

View File

@@ -1,44 +0,0 @@
# Architecture
BR2_arm=y
BR2_cortex_a7=y
BR2_ARM_FPU_VFPV4=y
# Linux headers same as kernel, a 5.0 series
BR2_PACKAGE_HOST_LINUX_HEADERS_CUSTOM_5_0=y
# System configuration
BR2_TARGET_GENERIC_HOSTNAME="nanopi-m1-plus"
BR2_TARGET_GENERIC_ISSUE="Welcome to Buildroot for the NanoPi M1 Plus"
# Bootloaders
BR2_TARGET_UBOOT=y
BR2_TARGET_UBOOT_BUILD_SYSTEM_KCONFIG=y
BR2_TARGET_UBOOT_CUSTOM_VERSION=y
BR2_TARGET_UBOOT_CUSTOM_VERSION_VALUE="2019.01"
BR2_TARGET_UBOOT_BOARD_DEFCONFIG="nanopi_m1_plus"
BR2_TARGET_UBOOT_NEEDS_DTC=y
BR2_TARGET_UBOOT_NEEDS_PYLIBFDT=y
BR2_TARGET_UBOOT_FORMAT_CUSTOM=y
BR2_TARGET_UBOOT_FORMAT_CUSTOM_NAME="u-boot-sunxi-with-spl.bin"
# Kernel
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="5.0"
BR2_LINUX_KERNEL_DEFCONFIG="sunxi"
BR2_LINUX_KERNEL_DTS_SUPPORT=y
BR2_LINUX_KERNEL_INTREE_DTS_NAME="sun8i-h3-nanopi-m1-plus"
# Filesystem
BR2_TARGET_ROOTFS_EXT2=y
BR2_TARGET_ROOTFS_EXT2_4=y
BR2_TARGET_UBOOT_BOOT_SCRIPT=y
BR2_TARGET_UBOOT_BOOT_SCRIPT_SOURCE="board/friendlyarm/nanopi-m1-plus/boot.cmd"
BR2_ROOTFS_POST_IMAGE_SCRIPT="support/scripts/genimage.sh"
BR2_ROOTFS_POST_SCRIPT_ARGS="-c board/friendlyarm/nanopi-m1-plus/genimage.cfg"
# BR2_TARGET_ROOTFS_TAR is not set
# Additional tools
BR2_PACKAGE_HOST_DOSFSTOOLS=y
BR2_PACKAGE_HOST_GENIMAGE=y
BR2_PACKAGE_HOST_MTOOLS=y

View File

@@ -1,42 +0,0 @@
BR2_arm=y
BR2_cortex_a7=y
BR2_ARM_FPU_VFPV4=y
BR2_TARGET_GENERIC_HOSTNAME="nanopi-neo"
BR2_TARGET_GENERIC_ISSUE="Welcome to Buildroot for the NanoPi NEO"
# Linux headers same as kernel, a 5.3 series
BR2_PACKAGE_HOST_LINUX_HEADERS_CUSTOM_5_3=y
# Use a -rc kernel to get the DTS
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="5.3.13"
BR2_LINUX_KERNEL_DEFCONFIG="sunxi"
BR2_LINUX_KERNEL_DTS_SUPPORT=y
BR2_LINUX_KERNEL_INTREE_DTS_NAME="sun8i-h3-nanopi-neo"
# Use an -rc tag because the defconfig is very recent
BR2_TARGET_UBOOT=y
BR2_TARGET_UBOOT_BUILD_SYSTEM_KCONFIG=y
BR2_TARGET_UBOOT_CUSTOM_VERSION=y
BR2_TARGET_UBOOT_CUSTOM_VERSION_VALUE="2019.10"
BR2_TARGET_UBOOT_BOARD_DEFCONFIG="nanopi_neo"
BR2_TARGET_UBOOT_NEEDS_DTC=y
BR2_TARGET_UBOOT_NEEDS_PYLIBFDT=y
BR2_TARGET_UBOOT_FORMAT_CUSTOM=y
BR2_TARGET_UBOOT_FORMAT_CUSTOM_NAME="u-boot-sunxi-with-spl.bin"
# Build an sdcard image
BR2_TARGET_ROOTFS_EXT2=y
BR2_TARGET_ROOTFS_EXT2_4=y
BR2_TARGET_UBOOT_BOOT_SCRIPT=y
BR2_TARGET_UBOOT_BOOT_SCRIPT_SOURCE="board/friendlyarm/nanopi-neo/boot.cmd"
BR2_ROOTFS_POST_IMAGE_SCRIPT="support/scripts/genimage.sh"
BR2_ROOTFS_POST_SCRIPT_ARGS="-c board/friendlyarm/nanopi-neo/genimage.cfg"
# BR2_TARGET_ROOTFS_TAR is not set
# Additional tools
BR2_PACKAGE_HOST_DOSFSTOOLS=y
BR2_PACKAGE_HOST_GENIMAGE=y
BR2_PACKAGE_HOST_MTOOLS=y

View File

@@ -1,30 +0,0 @@
# Architecture
BR2_powerpc=y
BR2_powerpc_440=y
# Filesystem
# BR2_TARGET_ROOTFS_TAR is not set
BR2_TARGET_ROOTFS_INITRAMFS=y
# Linux headers same as kernel, a 4.19 series
BR2_PACKAGE_HOST_LINUX_HEADERS_CUSTOM_4_19=y
# Use soft float
BR2_SOFT_FLOAT=y
# Kernel
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="4.19.91"
BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG=y
BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE="board/qemu/ppc-virtex-ml507/linux.config"
BR2_LINUX_KERNEL_VMLINUX=y
BR2_LINUX_KERNEL_DTS_SUPPORT=y
BR2_LINUX_KERNEL_INTREE_DTS_NAME="virtex440-ml507"
# Kernel needs mkimage
BR2_PACKAGE_HOST_UBOOT_TOOLS=y
# host-qemu for gitlab testing
BR2_PACKAGE_HOST_QEMU=y
BR2_PACKAGE_HOST_QEMU_SYSTEM_MODE=y

View File

@@ -1,46 +0,0 @@
BR2_aarch64=y
BR2_PACKAGE_HOST_LINUX_HEADERS_CUSTOM_5_2=y
BR2_TARGET_GENERIC_HOSTNAME="mcbin"
BR2_TARGET_GENERIC_ISSUE="Welcome to Buildroot for Marvell MacchiatoBin"
# Firmware
BR2_TARGET_ARM_TRUSTED_FIRMWARE=y
BR2_TARGET_ARM_TRUSTED_FIRMWARE_CUSTOM_GIT=y
BR2_TARGET_ARM_TRUSTED_FIRMWARE_CUSTOM_REPO_URL="https://github.com/MarvellEmbeddedProcessors/atf-marvell.git"
# This commit corresponds to release armada-18.12.2
BR2_TARGET_ARM_TRUSTED_FIRMWARE_CUSTOM_REPO_VERSION="9484123bfe29011ed44b717a23eb53a70b97ce3d"
BR2_TARGET_ARM_TRUSTED_FIRMWARE_PLATFORM="a80x0_mcbin"
BR2_TARGET_ARM_TRUSTED_FIRMWARE_FIP=y
BR2_TARGET_ARM_TRUSTED_FIRMWARE_UBOOT_AS_BL33=y
BR2_TARGET_ARM_TRUSTED_FIRMWARE_ADDITIONAL_VARIABLES="USE_COHERENT_MEM=0"
BR2_TARGET_BINARIES_MARVELL=y
BR2_TARGET_MV_DDR_MARVELL=y
# U-Boot
BR2_TARGET_UBOOT=y
BR2_TARGET_UBOOT_CUSTOM_VERSION=y
BR2_TARGET_UBOOT_CUSTOM_VERSION_VALUE="2019.07"
BR2_TARGET_UBOOT_BUILD_SYSTEM_KCONFIG=y
BR2_TARGET_UBOOT_USE_DEFCONFIG=y
BR2_TARGET_UBOOT_BOARD_DEFCONFIG="mvebu_mcbin-88f8040"
BR2_TARGET_UBOOT_CONFIG_FRAGMENT_FILES="board/solidrun/macchiatobin/uboot-fragment.config"
# Kernel
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="5.2.14"
BR2_LINUX_KERNEL_USE_ARCH_DEFAULT_CONFIG=y
BR2_LINUX_KERNEL_DTS_SUPPORT=y
BR2_LINUX_KERNEL_INTREE_DTS_NAME="marvell/armada-8040-mcbin"
BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES="board/solidrun/macchiatobin/linux-extras.config"
BR2_LINUX_KERNEL_INSTALL_TARGET=y
BR2_LINUX_KERNEL_NEEDS_HOST_OPENSSL=y
# rootfs
BR2_TARGET_ROOTFS_TAR=y
BR2_TARGET_ROOTFS_EXT2=y
BR2_TARGET_ROOTFS_EXT2_4=y
BR2_PACKAGE_HOST_GENIMAGE=y
BR2_ROOTFS_POST_IMAGE_SCRIPT="support/scripts/genimage.sh"
BR2_ROOTFS_POST_SCRIPT_ARGS="-c board/solidrun/macchiatobin/genimage.cfg"
BR2_ROOTFS_POST_BUILD_SCRIPT="board/solidrun/macchiatobin/post-build-mainline.sh"

View File

@@ -1,44 +0,0 @@
BR2_aarch64=y
BR2_PACKAGE_HOST_LINUX_HEADERS_CUSTOM_4_4=y
BR2_TARGET_GENERIC_HOSTNAME="mcbin"
BR2_TARGET_GENERIC_ISSUE="Welcome to Buildroot for Marvell MacchiatoBin"
# Firmware
BR2_TARGET_ARM_TRUSTED_FIRMWARE=y
BR2_TARGET_ARM_TRUSTED_FIRMWARE_CUSTOM_GIT=y
BR2_TARGET_ARM_TRUSTED_FIRMWARE_CUSTOM_REPO_URL="https://github.com/MarvellEmbeddedProcessors/atf-marvell.git"
# This commit corresponds to release armada-18.09.4
BR2_TARGET_ARM_TRUSTED_FIRMWARE_CUSTOM_REPO_VERSION="711ecd32afe465b38052b5ba374c825b158eea18"
BR2_TARGET_ARM_TRUSTED_FIRMWARE_PLATFORM="a80x0_mcbin"
BR2_TARGET_ARM_TRUSTED_FIRMWARE_FIP=y
BR2_TARGET_ARM_TRUSTED_FIRMWARE_UBOOT_AS_BL33=y
BR2_TARGET_ARM_TRUSTED_FIRMWARE_ADDITIONAL_VARIABLES="USE_COHERENT_MEM=0"
BR2_TARGET_BINARIES_MARVELL=y
BR2_TARGET_MV_DDR_MARVELL=y
# U-Boot
BR2_TARGET_UBOOT=y
BR2_TARGET_UBOOT_CUSTOM_GIT=y
BR2_TARGET_UBOOT_CUSTOM_REPO_URL="https://github.com/MarvellEmbeddedProcessors/u-boot-marvell"
# This commit corresponds to armada-18.09.1, based on u-boot 2018.03
BR2_TARGET_UBOOT_CUSTOM_REPO_VERSION="8fe403172c58440bcfbb3724242301c0108eff5b"
BR2_TARGET_UBOOT_BOARDNAME="mvebu_mcbin-88f8040"
BR2_TARGET_UBOOT_NEEDS_DTC=y
# Kernel
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_TARBALL=y
BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION="$(call github,MarvellEmbeddedProcessors,linux-marvell,linux-4.4.52-armada-17.10)/linux-linux-4.4.120-armada-18.09.tar.gz"
BR2_LINUX_KERNEL_DEFCONFIG="mvebu_v8_lsp"
BR2_LINUX_KERNEL_DTS_SUPPORT=y
BR2_LINUX_KERNEL_INTREE_DTS_NAME="marvell/armada-8040-mcbin"
BR2_LINUX_KERNEL_INSTALL_TARGET=y
# rootfs
BR2_TARGET_ROOTFS_TAR=y
BR2_TARGET_ROOTFS_EXT2=y
BR2_TARGET_ROOTFS_EXT2_4=y
BR2_PACKAGE_HOST_GENIMAGE=y
BR2_ROOTFS_POST_IMAGE_SCRIPT="support/scripts/genimage.sh"
BR2_ROOTFS_POST_SCRIPT_ARGS="-c board/solidrun/macchiatobin/genimage.cfg"
BR2_ROOTFS_POST_BUILD_SCRIPT="board/solidrun/macchiatobin/post-build.sh"

View File

@@ -1,22 +0,0 @@
BR2_x86_64=y
BR2_TOOLCHAIN_BUILDROOT_WCHAR=y
BR2_TOOLCHAIN_BUILDROOT_CXX=y
BR2_TARGET_GENERIC_HOSTNAME="shredos"
BR2_TARGET_GENERIC_ISSUE="Welcome To ShredOS"
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_DEFCONFIG="buildroot"
BR2_PACKAGE_BUSYBOX_SHOW_OTHERS=y
BR2_PACKAGE_BUSYBOX_INDIVIDUAL_BINARIES=y
BR2_PACKAGE_DMIDECODE=y
BR2_PACKAGE_HDPARM=y
BR2_PACKAGE_KBD=y
BR2_PACKAGE_NVME=y
BR2_PACKAGE_SMARTMONTOOLS=y
BR2_PACKAGE_TRIGGERHAPPY=y
BR2_PACKAGE_COREUTILS=y
BR2_PACKAGE_COREUTILS_INDIVIDUAL_BINARIES=y
BR2_PACKAGE_VIM=y
BR2_TARGET_ROOTFS_CPIO=y
BR2_TARGET_ROOTFS_EXT2=y
BR2_TARGET_ROOTFS_EXT2_SIZE="512M"
BR2_TARGET_GRUB2=y

View File

@@ -1 +0,0 @@
website/images

View File

@@ -1,38 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[adding-board-support]]
== Adding support for a particular board
Buildroot contains basic configurations for several publicly available
hardware boards, so that users of such a board can easily build a system
that is known to work. You are welcome to add support for other boards
to Buildroot too.
To do so, you need to create a normal Buildroot configuration that
builds a basic system for the hardware: toolchain, kernel, bootloader,
filesystem and a simple BusyBox-only userspace. No specific package
should be selected: the configuration should be as minimal as
possible, and should only build a working basic BusyBox system for the
target platform. You can of course use more complicated configurations
for your internal projects, but the Buildroot project will only
integrate basic board configurations. This is because package
selections are highly application-specific.
Once you have a known working configuration, run +make
savedefconfig+. This will generate a minimal +defconfig+ file at the
root of the Buildroot source tree. Move this file into the +configs/+
directory, and rename it +<boardname>_defconfig+.
It is recommended to use as much as possible upstream versions of the
Linux kernel and bootloaders, and to use as much as possible default
kernel and bootloader configurations. If they are incorrect for your
board, or no default exists, we encourage you to send fixes to the
corresponding upstream projects.
However, in the mean time, you may want to store kernel or bootloader
configuration or patches specific to your target platform. To do so,
create a directory +board/<manufacturer>+ and a subdirectory
+board/<manufacturer>/<boardname>+. You can then store your patches
and configurations in these directories, and reference them from the main
Buildroot configuration. Refer to xref:customize[] for more details.

View File

@@ -1,175 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for autotools-based packages
[[autotools-package-tutorial]]
==== +autotools-package+ tutorial
First, let's see how to write a +.mk+ file for an autotools-based
package, with an example :
------------------------
01: ################################################################################
02: #
03: # libfoo
04: #
05: ################################################################################
06:
07: LIBFOO_VERSION = 1.0
08: LIBFOO_SOURCE = libfoo-$(LIBFOO_VERSION).tar.gz
09: LIBFOO_SITE = http://www.foosoftware.org/download
10: LIBFOO_INSTALL_STAGING = YES
11: LIBFOO_INSTALL_TARGET = NO
12: LIBFOO_CONF_OPTS = --disable-shared
13: LIBFOO_DEPENDENCIES = libglib2 host-pkgconf
14:
15: $(eval $(autotools-package))
------------------------
On line 7, we declare the version of the package.
On line 8 and 9, we declare the name of the tarball (xz-ed tarball recommended)
and the location of the tarball on the Web. Buildroot will automatically
download the tarball from this location.
On line 10, we tell Buildroot to install the package to the staging
directory. The staging directory, located in +output/staging/+
is the directory where all the packages are installed, including their
development files, etc. By default, packages are not installed to the
staging directory, since usually, only libraries need to be installed in
the staging directory: their development files are needed to compile
other libraries or applications depending on them. Also by default, when
staging installation is enabled, packages are installed in this location
using the +make install+ command.
On line 11, we tell Buildroot to not install the package to the
target directory. This directory contains what will become the root
filesystem running on the target. For purely static libraries, it is
not necessary to install them in the target directory because they will
not be used at runtime. By default, target installation is enabled; setting
this variable to NO is almost never needed. Also by default, packages are
installed in this location using the +make install+ command.
On line 12, we tell Buildroot to pass a custom configure option, that
will be passed to the +./configure+ script before configuring
and building the package.
On line 13, we declare our dependencies, so that they are built
before the build process of our package starts.
Finally, on line line 15, we invoke the +autotools-package+
macro that generates all the Makefile rules that actually allows the
package to be built.
[[autotools-package-reference]]
==== +autotools-package+ reference
The main macro of the autotools package infrastructure is
+autotools-package+. It is similar to the +generic-package+ macro. The ability to
have target and host packages is also available, with the
+host-autotools-package+ macro.
Just like the generic infrastructure, the autotools infrastructure
works by defining a number of variables before calling the
+autotools-package+ macro.
First, all the package metadata information variables that exist in the
generic infrastructure also exist in the autotools infrastructure:
+LIBFOO_VERSION+, +LIBFOO_SOURCE+,
+LIBFOO_PATCH+, +LIBFOO_SITE+,
+LIBFOO_SUBDIR+, +LIBFOO_DEPENDENCIES+,
+LIBFOO_INSTALL_STAGING+, +LIBFOO_INSTALL_TARGET+.
A few additional variables, specific to the autotools infrastructure,
can also be defined. Many of them are only useful in very specific
cases, typical packages will therefore only use a few of them.
* +LIBFOO_SUBDIR+ may contain the name of a subdirectory
inside the package that contains the configure script. This is useful,
if for example, the main configure script is not at the root of the
tree extracted by the tarball. If +HOST_LIBFOO_SUBDIR+ is
not specified, it defaults to +LIBFOO_SUBDIR+.
* +LIBFOO_CONF_ENV+, to specify additional environment
variables to pass to the configure script. By default, empty.
* +LIBFOO_CONF_OPTS+, to specify additional configure
options to pass to the configure script. By default, empty.
* +LIBFOO_MAKE+, to specify an alternate +make+
command. This is typically useful when parallel make is enabled in
the configuration (using +BR2_JLEVEL+) but that this
feature should be disabled for the given package, for one reason or
another. By default, set to +$(MAKE)+. If parallel building
is not supported by the package, then it should be set to
+LIBFOO_MAKE=$(MAKE1)+.
* +LIBFOO_MAKE_ENV+, to specify additional environment
variables to pass to make in the build step. These are passed before
the +make+ command. By default, empty.
* +LIBFOO_MAKE_OPTS+, to specify additional variables to
pass to make in the build step. These are passed after the
+make+ command. By default, empty.
* +LIBFOO_AUTORECONF+, tells whether the package should
be autoreconfigured or not (i.e. if the configure script and
Makefile.in files should be re-generated by re-running autoconf,
automake, libtool, etc.). Valid values are +YES+ and
+NO+. By default, the value is +NO+
* +LIBFOO_AUTORECONF_ENV+, to specify additional environment
variables to pass to the 'autoreconf' program if
+LIBFOO_AUTORECONF=YES+. These are passed in the environment of
the 'autoreconf' command. By default, empty.
* +LIBFOO_AUTORECONF_OPTS+ to specify additional options
passed to the 'autoreconf' program if
+LIBFOO_AUTORECONF=YES+. By default, empty.
* +LIBFOO_GETTEXTIZE+, tells whether the package should be
gettextized or not (i.e. if the package uses a different gettext
version than Buildroot provides, and it is needed to run
'gettextize'.) Only valid when +LIBFOO_AUTORECONF=YES+. Valid
values are +YES+ and +NO+. The default is +NO+.
* +LIBFOO_GETTEXTIZE_OPTS+, to specify additional options passed to
the 'gettextize' program, if +LIBFOO_GETTEXTIZE=YES+. You may
use that if, for example, the +.po+ files are not located in the
standard place (i.e. in +po/+ at the root of the package.) By
default, '-f'.
* +LIBFOO_LIBTOOL_PATCH+ tells whether the Buildroot
patch to fix libtool cross-compilation issues should be applied or
not. Valid values are +YES+ and +NO+. By
default, the value is +YES+
* +LIBFOO_INSTALL_STAGING_OPTS+ contains the make options
used to install the package to the staging directory. By default, the
value is +DESTDIR=$(STAGING_DIR) install+, which is
correct for most autotools packages. It is still possible to override
it.
* +LIBFOO_INSTALL_TARGET_OPTS+ contains the make options
used to install the package to the target directory. By default, the
value is +DESTDIR=$(TARGET_DIR) install+. The default
value is correct for most autotools packages, but it is still possible
to override it if needed.
With the autotools infrastructure, all the steps required to build
and install the packages are already defined, and they generally work
well for most autotools-based packages. However, when required, it is
still possible to customize what is done in any particular step:
* By adding a post-operation hook (after extract, patch, configure,
build or install). See xref:hooks[] for details.
* By overriding one of the steps. For example, even if the autotools
infrastructure is used, if the package +.mk+ file defines its
own +LIBFOO_CONFIGURE_CMDS+ variable, it will be used
instead of the default autotools one. However, using this method
should be restricted to very specific cases. Do not use it in the
general case.

View File

@@ -1,110 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Integration of Cargo-based packages
Cargo is the package manager for the Rust programming language. It allows the
user to build programs or libraries written in Rust, but it also downloads and
manages their dependencies, to ensure repeatable builds. Cargo packages are
called "crates".
[[cargo-package-tutorial]]
==== Cargo-based package's +Config.in+ file
The +Config.in+ file of Cargo-based package 'foo' should contain:
---------------------------
01: config BR2_PACKAGE_FOO
02: bool "foo"
03: depends on BR2_PACKAGE_HOST_RUSTC_TARGET_ARCH_SUPPORTS
04: select BR2_PACKAGE_HOST_CARGO
05: help
06: This is a comment that explains what foo is.
07:
08: http://foosoftware.org/foo/
---------------------------
==== Cargo-based package's +.mk+ file
Buildroot does not (yet) provide a dedicated package infrastructure for
Cargo-based packages. So, we will explain how to write a +.mk+ file for such a
package. Let's start with an example:
------------------------------
01: ################################################################################
02: #
03: # foo
04: #
05: ################################################################################
06:
07: FOO_VERSION = 1.0
08: FOO_SOURCE = foo-$(FOO_VERSION).tar.gz
09: FOO_SITE = http://www.foosoftware.org/download
10: FOO_LICENSE = GPL-3.0+
11: FOO_LICENSE_FILES = COPYING
12:
13: FOO_DEPENDENCIES = host-cargo
14:
15: FOO_CARGO_ENV = CARGO_HOME=$(HOST_DIR)/share/cargo
16: FOO_CARGO_MODE = $(if $(BR2_ENABLE_DEBUG),debug,release)
17:
18: FOO_BIN_DIR = target/$(RUSTC_TARGET_NAME)/$(FOO_CARGO_MODE)
19:
20: FOO_CARGO_OPTS = \
21: --$(FOO_CARGO_MODE) \
22: --target=$(RUSTC_TARGET_NAME) \
23: --manifest-path=$(@D)/Cargo.toml
24:
25: define FOO_BUILD_CMDS
26: $(TARGET_MAKE_ENV) $(FOO_CARGO_ENV) \
27: cargo build $(FOO_CARGO_OPTS)
28: endef
29:
30: define FOO_INSTALL_TARGET_CMDS
31: $(INSTALL) -D -m 0755 $(@D)/$(FOO_BIN_DIR)/foo \
32: $(TARGET_DIR)/usr/bin/foo
33: endef
34:
35: $(eval $(generic-package))
--------------------------------
The Makefile starts with the definition of the standard variables for package
declaration (lines 7 to 11).
As seen in line 35, it is based on the
xref:generic-package-tutorial[+generic-package+ infrastructure]. So, it defines
the variables required by this particular infrastructure, where Cargo is
invoked:
* +FOO_BUILD_CMDS+: Cargo is invoked to perform the build. The options required
to configure the cross-compilation of the package are passed via
+FOO_CONF_OPTS+.
* +FOO_INSTALL_TARGET_CMDS+: The binary executable generated is installed on
the target.
In order to have Cargo available for the build, +FOO_DEPENDENCIES+ needs to
contain +host-cargo+.
To sum it up, to add a new Cargo-based package, the Makefile example can be
copied verbatim then edited to replace all occurences of +FOO+ with the
uppercase name of the new package and update the values of the standard
variables.
==== About Dependencies Management
A crate can depend on other libraries from crates.io or git repositories, listed
in its Cargo.toml file. Before starting a build, Cargo usually downloads
automatically them. This step can also be performed independently, via the
+cargo fetch+ command.
Cargo maintains a local cache of the registry index and of git checkouts of the
crates, whose location is given by +$CARGO_HOME+. As seen in the package
Makefile example at line 15, this environment variable is set to
+$(HOST_DIR)/share/cargo+.
This dependency download mechanism is not convenient when performing an offline
build, as Cargo will fail to fetch the dependencies. In that case, it is advised
to generate a tarball of the dependencies using the +cargo vendor+ and add it to
+FOO_EXTRA_DOWNLOADS+.

View File

@@ -1,152 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for CMake-based packages
[[cmake-package-tutorial]]
==== +cmake-package+ tutorial
First, let's see how to write a +.mk+ file for a CMake-based package,
with an example :
------------------------
01: ################################################################################
02: #
03: # libfoo
04: #
05: ################################################################################
06:
07: LIBFOO_VERSION = 1.0
08: LIBFOO_SOURCE = libfoo-$(LIBFOO_VERSION).tar.gz
09: LIBFOO_SITE = http://www.foosoftware.org/download
10: LIBFOO_INSTALL_STAGING = YES
11: LIBFOO_INSTALL_TARGET = NO
12: LIBFOO_CONF_OPTS = -DBUILD_DEMOS=ON
13: LIBFOO_DEPENDENCIES = libglib2 host-pkgconf
14:
15: $(eval $(cmake-package))
------------------------
On line 7, we declare the version of the package.
On line 8 and 9, we declare the name of the tarball (xz-ed tarball recommended)
and the location of the tarball on the Web. Buildroot will automatically
download the tarball from this location.
On line 10, we tell Buildroot to install the package to the staging
directory. The staging directory, located in +output/staging/+
is the directory where all the packages are installed, including their
development files, etc. By default, packages are not installed to the
staging directory, since usually, only libraries need to be installed in
the staging directory: their development files are needed to compile
other libraries or applications depending on them. Also by default, when
staging installation is enabled, packages are installed in this location
using the +make install+ command.
On line 11, we tell Buildroot to not install the package to the
target directory. This directory contains what will become the root
filesystem running on the target. For purely static libraries, it is
not necessary to install them in the target directory because they will
not be used at runtime. By default, target installation is enabled; setting
this variable to NO is almost never needed. Also by default, packages are
installed in this location using the +make install+ command.
On line 12, we tell Buildroot to pass custom options to CMake when it is
configuring the package.
On line 13, we declare our dependencies, so that they are built
before the build process of our package starts.
Finally, on line line 15, we invoke the +cmake-package+
macro that generates all the Makefile rules that actually allows the
package to be built.
[[cmake-package-reference]]
==== +cmake-package+ reference
The main macro of the CMake package infrastructure is
+cmake-package+. It is similar to the +generic-package+ macro. The ability to
have target and host packages is also available, with the
+host-cmake-package+ macro.
Just like the generic infrastructure, the CMake infrastructure works
by defining a number of variables before calling the +cmake-package+
macro.
First, all the package metadata information variables that exist in
the generic infrastructure also exist in the CMake infrastructure:
+LIBFOO_VERSION+, +LIBFOO_SOURCE+, +LIBFOO_PATCH+, +LIBFOO_SITE+,
+LIBFOO_SUBDIR+, +LIBFOO_DEPENDENCIES+, +LIBFOO_INSTALL_STAGING+,
+LIBFOO_INSTALL_TARGET+.
A few additional variables, specific to the CMake infrastructure, can
also be defined. Many of them are only useful in very specific cases,
typical packages will therefore only use a few of them.
* +LIBFOO_SUBDIR+ may contain the name of a subdirectory inside the
package that contains the main CMakeLists.txt file. This is useful,
if for example, the main CMakeLists.txt file is not at the root of
the tree extracted by the tarball. If +HOST_LIBFOO_SUBDIR+ is not
specified, it defaults to +LIBFOO_SUBDIR+.
* +LIBFOO_CONF_ENV+, to specify additional environment variables to
pass to CMake. By default, empty.
* +LIBFOO_CONF_OPTS+, to specify additional configure options to pass
to CMake. By default, empty. A number of common CMake options are
set by the +cmake-package+ infrastructure; so it is normally not
necessary to set them in the package's +*.mk+ file unless you want
to override them:
** +CMAKE_BUILD_TYPE+ is driven by +BR2_ENABLE_DEBUG+;
** +CMAKE_INSTALL_PREFIX+;
** +BUILD_SHARED_LIBS+ is driven by +BR2_STATIC_LIBS+;
** +BUILD_DOC+, +BUILD_DOCS+ are disabled;
** +BUILD_EXAMPLE+, +BUILD_EXAMPLES+ are disabled;
** +BUILD_TEST+, +BUILD_TESTS+, +BUILD_TESTING+ are disabled.
* +LIBFOO_SUPPORTS_IN_SOURCE_BUILD = NO+ should be set when the package
cannot be built inside the source tree but needs a separate build
directory.
* +LIBFOO_MAKE+, to specify an alternate +make+ command. This is
typically useful when parallel make is enabled in the configuration
(using +BR2_JLEVEL+) but that this feature should be disabled for
the given package, for one reason or another. By default, set to
+$(MAKE)+. If parallel building is not supported by the package,
then it should be set to +LIBFOO_MAKE=$(MAKE1)+.
* +LIBFOO_MAKE_ENV+, to specify additional environment variables to
pass to make in the build step. These are passed before the +make+
command. By default, empty.
* +LIBFOO_MAKE_OPTS+, to specify additional variables to pass to make
in the build step. These are passed after the +make+ command. By
default, empty.
* +LIBFOO_INSTALL_STAGING_OPTS+ contains the make options used to
install the package to the staging directory. By default, the value
is +DESTDIR=$(STAGING_DIR) install+, which is correct for most
CMake packages. It is still possible to override it.
* +LIBFOO_INSTALL_TARGET_OPTS+ contains the make options used to
install the package to the target directory. By default, the value
is +DESTDIR=$(TARGET_DIR) install+. The default value is correct
for most CMake packages, but it is still possible to override it if
needed.
With the CMake infrastructure, all the steps required to build and
install the packages are already defined, and they generally work well
for most CMake-based packages. However, when required, it is still
possible to customize what is done in any particular step:
* By adding a post-operation hook (after extract, patch, configure,
build or install). See xref:hooks[] for details.
* By overriding one of the steps. For example, even if the CMake
infrastructure is used, if the package +.mk+ file defines its own
+LIBFOO_CONFIGURE_CMDS+ variable, it will be used instead of the
default CMake one. However, using this method should be restricted
to very specific cases. Do not use it in the general case.

View File

@@ -1,13 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Conclusion
As you can see, adding a software package to Buildroot is simply a
matter of writing a Makefile using an existing example and modifying it
according to the compilation process required by the package.
If you package software that might be useful for other people, don't
forget to send a patch to the Buildroot mailing list (see
xref:submitting-patches[])!

View File

@@ -1,556 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Package directory
First of all, create a directory under the +package+ directory for
your software, for example +libfoo+.
Some packages have been grouped by topic in a sub-directory:
+x11r7+, +qt5+ and +gstreamer+. If your package fits in
one of these categories, then create your package directory in these.
New subdirectories are discouraged, however.
=== Config files
For the package to be displayed in the configuration tool, you need to
create a Config file in your package directory. There are two types:
+Config.in+ and +Config.in.host+.
==== +Config.in+ file
For packages used on the target, create a file named +Config.in+. This
file will contain the option descriptions related to our +libfoo+ software
that will be used and displayed in the configuration tool. It should basically
contain:
---------------------------
config BR2_PACKAGE_LIBFOO
bool "libfoo"
help
This is a comment that explains what libfoo is. The help text
should be wrapped.
http://foosoftware.org/libfoo/
---------------------------
The +bool+ line, +help+ line and other metadata information about the
configuration option must be indented with one tab. The help text
itself should be indented with one tab and two spaces, lines should
be wrapped to fit 72 columns, where tab counts for 8, so 62 characters
in the text itself. The help text must mention the upstream URL of the
project after an empty line.
As a convention specific to Buildroot, the ordering of the attributes
is as follows:
1. The type of option: +bool+, +string+... with the prompt
2. If needed, the +default+ value(s)
3. Any dependencies on the target in +depends on+ form
4. Any dependencies on the toolchain in +depends on+ form
5. Any dependencies on other packages in +depends on+ form
6. Any dependency of the +select+ form
7. The help keyword and help text.
You can add other sub-options into a +if BR2_PACKAGE_LIBFOO...endif+
statement to configure particular things in your software. You can look at
examples in other packages. The syntax of the +Config.in+ file is the same
as the one for the kernel Kconfig file. The documentation for this syntax is
available at http://kernel.org/doc/Documentation/kbuild/kconfig-language.txt[]
Finally you have to add your new +libfoo/Config.in+ to
+package/Config.in+ (or in a category subdirectory if you decided to
put your package in one of the existing categories). The files
included there are 'sorted alphabetically' per category and are 'NOT'
supposed to contain anything but the 'bare' name of the package.
--------------------------
source "package/libfoo/Config.in"
--------------------------
==== +Config.in.host+ file
Some packages also need to be built for the host system. There are two
options here:
* The host package is only required to satisfy build-time
dependencies of one or more target packages. In this case, add
+host-foo+ to the target package's +BAR_DEPENDENCIES+ variable. No
+Config.in.host+ file should be created.
* The host package should be explicitly selectable by the user from
the configuration menu. In this case, create a +Config.in.host+ file
for that host package:
+
---------------------------
config BR2_PACKAGE_HOST_FOO
bool "host foo"
help
This is a comment that explains what foo for the host is.
http://foosoftware.org/foo/
---------------------------
+
The same coding style and options as for the +Config.in+ file are valid.
+
Finally you have to add your new +libfoo/Config.in.host+ to
+package/Config.in.host+. The files included there are 'sorted alphabetically'
and are 'NOT' supposed to contain anything but the 'bare' name of the package.
+
--------------------------
source "package/foo/Config.in.host"
--------------------------
+
The host package will then be available from the +Host utilities+ menu.
[[depends-on-vs-select]]
==== Choosing +depends on+ or +select+
The +Config.in+ file of your package must also ensure that
dependencies are enabled. Typically, Buildroot uses the following
rules:
* Use a +select+ type of dependency for dependencies on
libraries. These dependencies are generally not obvious and it
therefore make sense to have the kconfig system ensure that the
dependencies are selected. For example, the _libgtk2_ package uses
+select BR2_PACKAGE_LIBGLIB2+ to make sure this library is also
enabled.
The +select+ keyword expresses the dependency with a backward
semantic.
* Use a +depends on+ type of dependency when the user really needs to
be aware of the dependency. Typically, Buildroot uses this type of
dependency for dependencies on target architecture, MMU support and
toolchain options (see xref:dependencies-target-toolchain-options[]),
or for dependencies on "big" things, such as the X.org system.
The +depends on+ keyword expresses the dependency with a forward
semantic.
.Note
The current problem with the _kconfig_ language is that these two
dependency semantics are not internally linked. Therefore, it may be
possible to select a package, whom one of its dependencies/requirement
is not met.
An example illustrates both the usage of +select+ and +depends on+.
--------------------------
config BR2_PACKAGE_RRDTOOL
bool "rrdtool"
depends on BR2_USE_WCHAR
select BR2_PACKAGE_FREETYPE
select BR2_PACKAGE_LIBART
select BR2_PACKAGE_LIBPNG
select BR2_PACKAGE_ZLIB
help
RRDtool is the OpenSource industry standard, high performance
data logging and graphing system for time series data.
http://oss.oetiker.ch/rrdtool/
comment "rrdtool needs a toolchain w/ wchar"
depends on !BR2_USE_WCHAR
--------------------------
Note that these two dependency types are only transitive with the
dependencies of the same kind.
This means, in the following example:
--------------------------
config BR2_PACKAGE_A
bool "Package A"
config BR2_PACKAGE_B
bool "Package B"
depends on BR2_PACKAGE_A
config BR2_PACKAGE_C
bool "Package C"
depends on BR2_PACKAGE_B
config BR2_PACKAGE_D
bool "Package D"
select BR2_PACKAGE_B
config BR2_PACKAGE_E
bool "Package E"
select BR2_PACKAGE_D
--------------------------
* Selecting +Package C+ will be visible if +Package B+ has been
selected, which in turn is only visible if +Package A+ has been
selected.
* Selecting +Package E+ will select +Package D+, which will select
+Package B+, it will not check for the dependencies of +Package B+,
so it will not select +Package A+.
* Since +Package B+ is selected but +Package A+ is not, this violates
the dependency of +Package B+ on +Package A+. Therefore, in such a
situation, the transitive dependency has to be added explicitly:
--------------------------
config BR2_PACKAGE_D
bool "Package D"
select BR2_PACKAGE_B
depends on BR2_PACKAGE_A
config BR2_PACKAGE_E
bool "Package E"
select BR2_PACKAGE_D
depends on BR2_PACKAGE_A
--------------------------
Overall, for package library dependencies, +select+ should be
preferred.
Note that such dependencies will ensure that the dependency option
is also enabled, but not necessarily built before your package. To do
so, the dependency also needs to be expressed in the +.mk+ file of the
package.
Further formatting details: see xref:writing-rules-config-in[the
coding style].
[[dependencies-target-toolchain-options]]
==== Dependencies on target and toolchain options
Many packages depend on certain options of the toolchain: the choice of
C library, C++ support, thread support, RPC support, wchar support,
or dynamic library support. Some packages can only be built on certain
target architectures, or if an MMU is available in the processor.
These dependencies have to be expressed with the appropriate 'depends
on' statements in the Config.in file. Additionally, for dependencies on
toolchain options, a +comment+ should be displayed when the option is
not enabled, so that the user knows why the package is not available.
Dependencies on target architecture or MMU support should not be
made visible in a comment: since it is unlikely that the user can
freely choose another target, it makes little sense to show these
dependencies explicitly.
The +comment+ should only be visible if the +config+ option itself would
be visible when the toolchain option dependencies are met. This means
that all other dependencies of the package (including dependencies on
target architecture and MMU support) have to be repeated on the
+comment+ definition. To keep it clear, the +depends on+ statement for
these non-toolchain option should be kept separate from the +depends on+
statement for the toolchain options.
If there is a dependency on a config option in that same file (typically
the main package) it is preferable to have a global +if ... endif+
construct rather than repeating the +depends on+ statement on the
comment and other config options.
The general format of a dependency +comment+ for package foo is:
--------------------------
foo needs a toolchain w/ featA, featB, featC
--------------------------
for example:
--------------------------
mpd needs a toolchain w/ C++, threads, wchar
--------------------------
or
--------------------------
crda needs a toolchain w/ threads
--------------------------
Note that this text is kept brief on purpose, so that it will fit on a
80-character terminal.
The rest of this section enumerates the different target and toolchain
options, the corresponding config symbols to depend on, and the text to
use in the comment.
* Target architecture
** Dependency symbol: +BR2_powerpc+, +BR2_mips+, ... (see +arch/Config.in+)
** Comment string: no comment to be added
* MMU support
** Dependency symbol: +BR2_USE_MMU+
** Comment string: no comment to be added
* Gcc +__sync_*+ built-ins used for atomic operations. They are
available in variants operating on 1 byte, 2 bytes, 4 bytes and 8
bytes. Since different architectures support atomic operations on
different sizes, one dependency symbol is available for each size:
** Dependency symbol: +BR2_TOOLCHAIN_HAS_SYNC_1+ for 1 byte,
+BR2_TOOLCHAIN_HAS_SYNC_2+ for 2 bytes,
+BR2_TOOLCHAIN_HAS_SYNC_4+ for 4 bytes, +BR2_TOOLCHAIN_HAS_SYNC_8+
for 8 bytes.
** Comment string: no comment to be added
* Gcc +__atomic_*+ built-ins used for atomic operations.
** Dependency symbol: +BR2_TOOLCHAIN_HAS_ATOMIC+.
** Comment string: no comment to be added
* Kernel headers
** Dependency symbol: +BR2_TOOLCHAIN_HEADERS_AT_LEAST_X_Y+, (replace
+X_Y+ with the proper version, see +toolchain/Config.in+)
** Comment string: +headers >= X.Y+ and/or `headers <= X.Y` (replace
+X.Y+ with the proper version)
* GCC version
** Dependency symbol: +BR2_TOOLCHAIN_GCC_AT_LEAST_X_Y+, (replace
+X_Y+ with the proper version, see +toolchain/Config.in+)
** Comment string: +gcc >= X.Y+ and/or `gcc <= X.Y` (replace
+X.Y+ with the proper version)
* Host GCC version
** Dependency symbol: +BR2_HOST_GCC_AT_LEAST_X_Y+, (replace
+X_Y+ with the proper version, see +Config.in+)
** Comment string: no comment to be added
** Note that it is usually not the package itself that has a minimum
host GCC version, but rather a host-package on which it depends.
* C library
** Dependency symbol: +BR2_TOOLCHAIN_USES_GLIBC+,
+BR2_TOOLCHAIN_USES_MUSL+, +BR2_TOOLCHAIN_USES_UCLIBC+
** Comment string: for the C library, a slightly different comment text
is used: +foo needs a glibc toolchain+, or `foo needs a glibc
toolchain w/ C++`
* C++ support
** Dependency symbol: +BR2_INSTALL_LIBSTDCPP+
** Comment string: `C++`
* D support
** Dependency symbol: +BR2_TOOLCHAIN_HAS_DLANG+
** Comment string: `Dlang`
* Fortran support
** Dependency symbol: +BR2_TOOLCHAIN_HAS_FORTRAN+
** Comment string: `fortran`
* thread support
** Dependency symbol: +BR2_TOOLCHAIN_HAS_THREADS+
** Comment string: +threads+ (unless +BR2_TOOLCHAIN_HAS_THREADS_NPTL+
is also needed, in which case, specifying only +NPTL+ is sufficient)
* NPTL thread support
** Dependency symbol: +BR2_TOOLCHAIN_HAS_THREADS_NPTL+
** Comment string: +NPTL+
* RPC support
** Dependency symbol: +BR2_TOOLCHAIN_HAS_NATIVE_RPC+
** Comment string: +RPC+
* wchar support
** Dependency symbol: +BR2_USE_WCHAR+
** Comment string: +wchar+
* dynamic library
** Dependency symbol: +!BR2_STATIC_LIBS+
** Comment string: +dynamic library+
==== Dependencies on a Linux kernel built by buildroot
Some packages need a Linux kernel to be built by buildroot. These are
typically kernel modules or firmware. A comment should be added in the
Config.in file to express this dependency, similar to dependencies on
toolchain options. The general format is:
--------------------------
foo needs a Linux kernel to be built
--------------------------
If there is a dependency on both toolchain options and the Linux
kernel, use this format:
--------------------------
foo needs a toolchain w/ featA, featB, featC and a Linux kernel to be built
--------------------------
==== Dependencies on udev /dev management
If a package needs udev /dev management, it should depend on symbol
+BR2_PACKAGE_HAS_UDEV+, and the following comment should be added:
--------------------------
foo needs udev /dev management
--------------------------
If there is a dependency on both toolchain options and udev /dev
management, use this format:
--------------------------
foo needs udev /dev management and a toolchain w/ featA, featB, featC
--------------------------
==== Dependencies on features provided by virtual packages
Some features can be provided by more than one package, such as the
openGL libraries.
See xref:virtual-package-tutorial[] for more on the virtual packages.
=== The +.mk+ file
[[adding-packages-mk]]
Finally, here's the hardest part. Create a file named +libfoo.mk+. It
describes how the package should be downloaded, configured, built,
installed, etc.
Depending on the package type, the +.mk+ file must be written in a
different way, using different infrastructures:
* *Makefiles for generic packages* (not using autotools or CMake):
These are based on an infrastructure similar to the one used for
autotools-based packages, but require a little more work from the
developer. They specify what should be done for the configuration,
compilation and installation of the package. This
infrastructure must be used for all packages that do not use the
autotools as their build system. In the future, other specialized
infrastructures might be written for other build systems. We cover
them through in a xref:generic-package-tutorial[tutorial] and a
xref:generic-package-reference[reference].
* *Makefiles for autotools-based software* (autoconf, automake, etc.):
We provide a dedicated infrastructure for such packages, since
autotools is a very common build system. This infrastructure 'must'
be used for new packages that rely on the autotools as their build
system. We cover them through a xref:autotools-package-tutorial[tutorial]
and xref:autotools-package-reference[reference].
* *Makefiles for cmake-based software*: We provide a dedicated
infrastructure for such packages, as CMake is a more and more
commonly used build system and has a standardized behaviour. This
infrastructure 'must' be used for new packages that rely on
CMake. We cover them through a xref:cmake-package-tutorial[tutorial]
and xref:cmake-package-reference[reference].
* *Makefiles for Python modules*: We have a dedicated infrastructure
for Python modules that use either the +distutils+ or the
+setuptools+ mechanism. We cover them through a
xref:python-package-tutorial[tutorial] and a
xref:python-package-reference[reference].
* *Makefiles for Lua modules*: We have a dedicated infrastructure for
Lua modules available through the LuaRocks web site. We cover them
through a xref:luarocks-package-tutorial[tutorial] and a
xref:luarocks-package-reference[reference].
Further formatting details: see xref:writing-rules-mk[the writing
rules].
[[adding-packages-hash]]
=== The +.hash+ file
When possible, you must add a third file, named +libfoo.hash+, that
contains the hashes of the downloaded files for the +libfoo+
package. The only reason for not adding a +.hash+ file is when hash
checking is not possible due to how the package is downloaded.
When a package has a version selection choice, then the hash file may be
stored in a subdirectory named after the version, e.g.
+package/libfoo/1.2.3/libfoo.hash+. This is especially important if the
different versions have different licensing terms, but they are stored
in the same file. Otherwise, the hash file should stay in the package's
directory.
The hashes stored in that file are used to validate the integrity of the
downloaded files and of the license files.
The format of this file is one line for each file for which to check the
hash, each line with the following three fields separated by two spaces:
* the type of hash, one of:
** +md5+, +sha1+, +sha224+, +sha256+, +sha384+, +sha512+, +none+
* the hash of the file:
** for +none+, one or more non-space chars, usually just the string +xxx+
** for +md5+, 32 hexadecimal characters
** for +sha1+, 40 hexadecimal characters
** for +sha224+, 56 hexadecimal characters
** for +sha256+, 64 hexadecimal characters
** for +sha384+, 96 hexadecimal characters
** for +sha512+, 128 hexadecimal characters
* the name of the file:
** for a source archive: the basename of the file, without any directory
component,
** for a license file: the path as it appears in +FOO_LICENSE_FILES+.
Lines starting with a +#+ sign are considered comments, and ignored. Empty
lines are ignored.
There can be more than one hash for a single file, each on its own line. In
this case, all hashes must match.
.Note
Ideally, the hashes stored in this file should match the hashes published by
upstream, e.g. on their website, in the e-mail announcement... If upstream
provides more than one type of hash (e.g. +sha1+ and +sha512+), then it is
best to add all those hashes in the +.hash+ file. If upstream does not
provide any hash, or only provides an +md5+ hash, then compute at least one
strong hash yourself (preferably +sha256+, but not +md5+), and mention
this in a comment line above the hashes.
.Note
The hashes for license files are used to detect a license change when a
package version is bumped. The hashes are checked during the make legal-info
target run. For a package with multiple versions (like Qt5),
create the hash file in a subdirectory +<packageversion>+ of that package
(see also xref:patch-apply-order[]).
The +none+ hash type is reserved to those archives downloaded from a
repository, like a 'git clone', a 'subversion checkout'...
The example below defines a +sha1+ and a +sha256+ published by upstream for
the main +libfoo-1.2.3.tar.bz2+ tarball, an +md5+ from upstream and a
locally-computed +sha256+ hashes for a binary blob, a +sha256+ for a
downloaded patch, and an archive with no hash:
----
# Hashes from: http://www.foosoftware.org/download/libfoo-1.2.3.tar.bz2.{sha1,sha256}:
sha1 486fb55c3efa71148fe07895fd713ea3a5ae343a libfoo-1.2.3.tar.bz2
sha256 efc8103cc3bcb06bda6a781532d12701eb081ad83e8f90004b39ab81b65d4369 libfoo-1.2.3.tar.bz2
# md5 from: http://www.foosoftware.org/download/libfoo-1.2.3.tar.bz2.md5, sha256 locally computed:
md5 2d608f3c318c6b7557d551a5a09314f03452f1a1 libfoo-data.bin
sha256 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b libfoo-data.bin
# Locally computed:
sha256 ff52101fb90bbfc3fe9475e425688c660f46216d7e751c4bbdb1dc85cdccacb9 libfoo-fix-blabla.patch
# No hash for 1234:
none xxx libfoo-1234.tar.gz
# Hash for license files:
sha256 a45a845012742796534f7e91fe623262ccfb99460a2bd04015bd28d66fba95b8 COPYING
sha256 01b1f9f2c8ee648a7a596a1abe8aa4ed7899b1c9e5551bda06da6e422b04aa55 doc/COPYING.LGPL
----
If the +.hash+ file is present, and it contains one or more hashes for a
downloaded file, the hash(es) computed by Buildroot (after download) must
match the hash(es) stored in the +.hash+ file. If one or more hashes do
not match, Buildroot considers this an error, deletes the downloaded file,
and aborts.
If the +.hash+ file is present, but it does not contain a hash for a
downloaded file, Buildroot considers this an error and aborts. However,
the downloaded file is left in the download directory since this
typically indicates that the +.hash+ file is wrong but the downloaded
file is probably OK.
Hashes are currently checked for files fetched from http/ftp servers,
Git repositories, files copied using scp and local files. Hashes are
not checked for other version control systems (such as Subversion,
CVS, etc.) because Buildroot currently does not generate reproducible
tarballs when source code is fetched from such version control
systems.
Hashes should only be added in +.hash+ files for files that are
guaranteed to be stable. For example, patches auto-generated by Github
are not guaranteed to be stable, and therefore their hashes can change
over time. Such patches should not be downloaded, and instead be added
locally to the package folder.
If the +.hash+ file is missing, then no check is done at all.

View File

@@ -1,618 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for packages with specific build systems
By 'packages with specific build systems' we mean all the packages
whose build system is not one of the standard ones, such as
'autotools' or 'CMake'. This typically includes packages whose build
system is based on hand-written Makefiles or shell scripts.
[[generic-package-tutorial]]
==== +generic-package+ tutorial
------------------------------
01: ################################################################################
02: #
03: # libfoo
04: #
05: ################################################################################
06:
07: LIBFOO_VERSION = 1.0
08: LIBFOO_SOURCE = libfoo-$(LIBFOO_VERSION).tar.gz
09: LIBFOO_SITE = http://www.foosoftware.org/download
10: LIBFOO_LICENSE = GPL-3.0+
11: LIBFOO_LICENSE_FILES = COPYING
12: LIBFOO_INSTALL_STAGING = YES
13: LIBFOO_CONFIG_SCRIPTS = libfoo-config
14: LIBFOO_DEPENDENCIES = host-libaaa libbbb
15:
16: define LIBFOO_BUILD_CMDS
17: $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) all
18: endef
19:
20: define LIBFOO_INSTALL_STAGING_CMDS
21: $(INSTALL) -D -m 0755 $(@D)/libfoo.a $(STAGING_DIR)/usr/lib/libfoo.a
22: $(INSTALL) -D -m 0644 $(@D)/foo.h $(STAGING_DIR)/usr/include/foo.h
23: $(INSTALL) -D -m 0755 $(@D)/libfoo.so* $(STAGING_DIR)/usr/lib
24: endef
25:
26: define LIBFOO_INSTALL_TARGET_CMDS
27: $(INSTALL) -D -m 0755 $(@D)/libfoo.so* $(TARGET_DIR)/usr/lib
28: $(INSTALL) -d -m 0755 $(TARGET_DIR)/etc/foo.d
29: endef
30:
31: define LIBFOO_USERS
32: foo -1 libfoo -1 * - - - LibFoo daemon
33: endef
34:
35: define LIBFOO_DEVICES
36: /dev/foo c 666 0 0 42 0 - - -
37: endef
38:
39: define LIBFOO_PERMISSIONS
40: /bin/foo f 4755 foo libfoo - - - - -
41: endef
42:
43: $(eval $(generic-package))
--------------------------------
The Makefile begins on line 7 to 11 with metadata information: the
version of the package (+LIBFOO_VERSION+), the name of the
tarball containing the package (+LIBFOO_SOURCE+) (xz-ed tarball recommended)
the Internet location at which the tarball can be downloaded from
(+LIBFOO_SITE+), the license (+LIBFOO_LICENSE+) and file with the
license text (+LIBFOO_LICENSE_FILES+). All variables must start with
the same prefix, +LIBFOO_+ in this case. This prefix is always the
uppercased version of the package name (see below to understand where
the package name is defined).
On line 12, we specify that this package wants to install something to
the staging space. This is often needed for libraries, since they must
install header files and other development files in the staging space.
This will ensure that the commands listed in the
+LIBFOO_INSTALL_STAGING_CMDS+ variable will be executed.
On line 13, we specify that there is some fixing to be done to some
of the 'libfoo-config' files that were installed during
+LIBFOO_INSTALL_STAGING_CMDS+ phase.
These *-config files are executable shell script files that are
located in '$(STAGING_DIR)/usr/bin' directory and are executed
by other 3rd party packages to find out the location and the linking
flags of this particular package.
The problem is that all these *-config files by default give wrong,
host system linking flags that are unsuitable for cross-compiling.
For example: '-I/usr/include' instead of '-I$(STAGING_DIR)/usr/include'
or: '-L/usr/lib' instead of '-L$(STAGING_DIR)/usr/lib'
So some sed magic is done to these scripts to make them give correct
flags.
The argument to be given to +LIBFOO_CONFIG_SCRIPTS+ is the file name(s)
of the shell script(s) needing fixing. All these names are relative to
'$(STAGING_DIR)/usr/bin' and if needed multiple names can be given.
In addition, the scripts listed in +LIBFOO_CONFIG_SCRIPTS+ are removed
from +$(TARGET_DIR)/usr/bin+, since they are not needed on the target.
.Config script: 'divine' package
================================
Package divine installs shell script '$(STAGING_DIR)/usr/bin/divine-config'.
So its fixup would be:
--------------------------------
DIVINE_CONFIG_SCRIPTS = divine-config
--------------------------------
================================
.Config script: 'imagemagick' package:
================================
Package imagemagick installs the following scripts:
'$(STAGING_DIR)/usr/bin/{Magick,Magick++,MagickCore,MagickWand,Wand}-config'
So it's fixup would be:
--------------------------------
IMAGEMAGICK_CONFIG_SCRIPTS = \
Magick-config Magick++-config \
MagickCore-config MagickWand-config Wand-config
--------------------------------
================================
On line 14, we specify the list of dependencies this package relies
on. These dependencies are listed in terms of lower-case package names,
which can be packages for the target (without the +host-+
prefix) or packages for the host (with the +host-+) prefix).
Buildroot will ensure that all these packages are built and installed
'before' the current package starts its configuration.
The rest of the Makefile, lines 16..29, defines what should be done
at the different steps of the package configuration, compilation and
installation.
+LIBFOO_BUILD_CMDS+ tells what steps should be performed to
build the package. +LIBFOO_INSTALL_STAGING_CMDS+ tells what
steps should be performed to install the package in the staging space.
+LIBFOO_INSTALL_TARGET_CMDS+ tells what steps should be
performed to install the package in the target space.
All these steps rely on the +$(@D)+ variable, which
contains the directory where the source code of the package has been
extracted.
On lines 31..33, we define a user that is used by this package (e.g.
to run a daemon as non-root) (+LIBFOO_USERS+).
On line 35..37, we define a device-node file used by this package
(+LIBFOO_DEVICES+).
On line 39..41, we define the permissions to set to specific files
installed by this package (+LIBFOO_PERMISSIONS+).
Finally, on line 43, we call the +generic-package+ function, which
generates, according to the variables defined previously, all the
Makefile code necessary to make your package working.
[[generic-package-reference]]
==== +generic-package+ reference
There are two variants of the generic target. The +generic-package+ macro is
used for packages to be cross-compiled for the target. The
+host-generic-package+ macro is used for host packages, natively compiled
for the host. It is possible to call both of them in a single +.mk+
file: once to create the rules to generate a target
package and once to create the rules to generate a host package:
----------------------
$(eval $(generic-package))
$(eval $(host-generic-package))
----------------------
This might be useful if the compilation of the target package requires
some tools to be installed on the host. If the package name is
+libfoo+, then the name of the package for the target is also
+libfoo+, while the name of the package for the host is
+host-libfoo+. These names should be used in the DEPENDENCIES
variables of other packages, if they depend on +libfoo+ or
+host-libfoo+.
The call to the +generic-package+ and/or +host-generic-package+ macro
*must* be at the end of the +.mk+ file, after all variable definitions.
The call to +host-generic-package+ *must* be after the call to
+generic-package+, if any.
For the target package, the +generic-package+ uses the variables defined by
the .mk file and prefixed by the uppercased package name:
+LIBFOO_*+. +host-generic-package+ uses the +HOST_LIBFOO_*+ variables. For
'some' variables, if the +HOST_LIBFOO_+ prefixed variable doesn't
exist, the package infrastructure uses the corresponding variable
prefixed by +LIBFOO_+. This is done for variables that are likely to
have the same value for both the target and host packages. See below
for details.
The list of variables that can be set in a +.mk+ file to give metadata
information is (assuming the package name is +libfoo+) :
* +LIBFOO_VERSION+, mandatory, must contain the version of the
package. Note that if +HOST_LIBFOO_VERSION+ doesn't exist, it is
assumed to be the same as +LIBFOO_VERSION+. It can also be a
revision number or a tag for packages that are fetched directly
from their version control system. Examples:
** a version for a release tarball: +LIBFOO_VERSION = 0.1.2+
** a sha1 for a git tree: +LIBFOO_VERSION = cb9d6aa9429e838f0e54faa3d455bcbab5eef057+
** a tag for a git tree +LIBFOO_VERSION = v0.1.2+
+
.Note:
Using a branch name as +FOO_VERSION+ is not supported, because it does
not and can not work as people would expect it should:
+
1. due to local caching, Buildroot will not re-fetch the repository,
so people who expect to be able to follow the remote repository
would be quite surprised and disappointed;
2. because two builds can never be perfectly simultaneous, and because
the remote repository may get new commits on the branch anytime,
two users, using the same Buildroot tree and building the same
configuration, may get different source, thus rendering the build
non reproducible, and people would be quite surprised and
disappointed.
* +LIBFOO_SOURCE+ may contain the name of the tarball of the package,
which Buildroot will use to download the tarball from
+LIBFOO_SITE+. If +HOST_LIBFOO_SOURCE+ is not specified, it defaults
to +LIBFOO_SOURCE+. If none are specified, then the value is assumed
to be +libfoo-$(LIBFOO_VERSION).tar.gz+. +
Example: +LIBFOO_SOURCE = foobar-$(LIBFOO_VERSION).tar.bz2+
* +LIBFOO_PATCH+ may contain a space-separated list of patch file
names, that Buildroot will download and apply to the package source
code. If an entry contains +://+, then Buildroot will assume it is a
full URL and download the patch from this location. Otherwise,
Buildroot will assume that the patch should be downloaded from
+LIBFOO_SITE+. If +HOST_LIBFOO_PATCH+ is not specified, it defaults
to +LIBFOO_PATCH+. Note that patches that are included in Buildroot
itself use a different mechanism: all files of the form
+*.patch+ present in the package directory inside
Buildroot will be applied to the package after extraction (see
xref:patch-policy[patching a package]). Finally, patches listed in
the +LIBFOO_PATCH+ variable are applied _before_ the patches stored
in the Buildroot package directory.
* +LIBFOO_SITE+ provides the location of the package, which can be a
URL or a local filesystem path. HTTP, FTP and SCP are supported URL
types for retrieving package tarballs. In these cases don't include a
trailing slash: it will be added by Buildroot between the directory
and the filename as appropriate. Git, Subversion, Mercurial,
and Bazaar are supported URL types for retrieving packages directly
from source code management systems. There is a helper function to make
it easier to download source tarballs from GitHub (refer to
xref:github-download-url[] for details). A filesystem path may be used
to specify either a tarball or a directory containing the package
source code. See +LIBFOO_SITE_METHOD+ below for more details on how
retrieval works. +
Note that SCP URLs should be of the form
+scp://[user@]host:filepath+, and that filepath is relative to the
user's home directory, so you may want to prepend the path with a
slash for absolute paths:
+scp://[user@]host:/absolutepath+. +
If +HOST_LIBFOO_SITE+ is not specified, it defaults to
+LIBFOO_SITE+.
Examples: +
+LIBFOO_SITE=http://www.libfoosoftware.org/libfoo+ +
+LIBFOO_SITE=http://svn.xiph.org/trunk/Tremor+ +
+LIBFOO_SITE=/opt/software/libfoo.tar.gz+ +
+LIBFOO_SITE=$(TOPDIR)/../src/libfoo+
* +LIBFOO_DL_OPTS+ is a space-separated list of additional options to
pass to the downloader. Useful for retrieving documents with
server-side checking for user logins and passwords, or to use a proxy.
All download methods valid for +LIBFOO_SITE_METHOD+ are supported;
valid options depend on the download method (consult the man page
for the respective download utilities).
* +LIBFOO_EXTRA_DOWNLOADS+ is a space-separated list of additional
files that Buildroot should download. If an entry contains +://+
then Buildroot will assume it is a complete URL and will download
the file using this URL. Otherwise, Buildroot will assume the file
to be downloaded is located at +LIBFOO_SITE+. Buildroot will not do
anything with those additional files, except download them: it will
be up to the package recipe to use them from +$(LIBFOO_DL_DIR)+.
* +LIBFOO_SITE_METHOD+ determines the method used to fetch or copy the
package source code. In many cases, Buildroot guesses the method
from the contents of +LIBFOO_SITE+ and setting +LIBFOO_SITE_METHOD+
is unnecessary. When +HOST_LIBFOO_SITE_METHOD+ is not specified, it
defaults to the value of +LIBFOO_SITE_METHOD+. +
The possible values of +LIBFOO_SITE_METHOD+ are:
** +wget+ for normal FTP/HTTP downloads of tarballs. Used by
default when +LIBFOO_SITE+ begins with +http://+, +https://+ or
+ftp://+.
** +scp+ for downloads of tarballs over SSH with scp. Used by
default when +LIBFOO_SITE+ begins with +scp://+.
** +svn+ for retrieving source code from a Subversion repository.
Used by default when +LIBFOO_SITE+ begins with +svn://+. When a
+http://+ Subversion repository URL is specified in
+LIBFOO_SITE+, one 'must' specify +LIBFOO_SITE_METHOD=svn+.
Buildroot performs a checkout which is preserved as a tarball in
the download cache; subsequent builds use the tarball instead of
performing another checkout.
** +cvs+ for retrieving source code from a CVS repository.
Used by default when +LIBFOO_SITE+ begins with +cvs://+.
The downloaded source code is cached as with the +svn+ method.
Anonymous pserver mode is assumed otherwise explicitly defined
on +LIBFOO_SITE+. Both
+LIBFOO_SITE=cvs://libfoo.net:/cvsroot/libfoo+ and
+LIBFOO_SITE=cvs://:ext:libfoo.net:/cvsroot/libfoo+
are accepted, on the former anonymous pserver access mode is
assumed.
+LIBFOO_SITE+ 'must' contain the source URL as well as the remote
repository directory. The module is the package name.
+LIBFOO_VERSION+ is 'mandatory' and 'must' be a tag, a branch, or
a date (e.g. "2014-10-20", "2014-10-20 13:45", "2014-10-20
13:45+01" see "man cvs" for further details).
** +git+ for retrieving source code from a Git repository. Used by
default when +LIBFOO_SITE+ begins with +git://+. The downloaded
source code is cached as with the +svn+
method.
** +hg+ for retrieving source code from a Mercurial repository. One
'must' specify +LIBFOO_SITE_METHOD=hg+ when +LIBFOO_SITE+
contains a Mercurial repository URL. The downloaded source code
is cached as with the +svn+ method.
** +bzr+ for retrieving source code from a Bazaar repository. Used
by default when +LIBFOO_SITE+ begins with +bzr://+. The
downloaded source code is cached as with the +svn+ method.
** +file+ for a local tarball. One should use this when
+LIBFOO_SITE+ specifies a package tarball as a local filename.
Useful for software that isn't available publicly or in version
control.
** +local+ for a local source code directory. One should use this
when +LIBFOO_SITE+ specifies a local directory path containing
the package source code. Buildroot copies the contents of the
source directory into the package's build directory. Note that
for +local+ packages, no patches are applied. If you need to
still patch the source code, use +LIBFOO_POST_RSYNC_HOOKS+, see
xref:hooks-rsync[].
* +LIBFOO_GIT_SUBMODULES+ can be set to +YES+ to create an archive
with the git submodules in the repository. This is only available
for packages downloaded with git (i.e. when
+LIBFOO_SITE_METHOD=git+). Note that we try not to use such git
submodules when they contain bundled libraries, in which case we
prefer to use those libraries from their own package.
* +LIBFOO_STRIP_COMPONENTS+ is the number of leading components
(directories) that tar must strip from file names on extraction.
The tarball for most packages has one leading component named
"<pkg-name>-<pkg-version>", thus Buildroot passes
--strip-components=1 to tar to remove it.
For non-standard packages that don't have this component, or
that have more than one leading component to strip, set this
variable with the value to be passed to tar. Default: 1.
* +LIBFOO_EXCLUDES+ is a space-separated list of patterns to exclude
when extracting the archive. Each item from that list is passed as
a tar's +--exclude+ option. By default, empty.
* +LIBFOO_DEPENDENCIES+ lists the dependencies (in terms of package
name) that are required for the current target package to
compile. These dependencies are guaranteed to be compiled and
installed before the configuration of the current package starts.
However, modifications to configuration of these dependencies will
not force a rebuild of the current package. In a similar way,
+HOST_LIBFOO_DEPENDENCIES+ lists the dependencies for the current
host package.
* +LIBFOO_EXTRACT_DEPENDENCIES+ lists the dependencies (in terms of
package name) that are required for the current target package to be
extracted. These dependencies are guaranteed to be compiled and
installed before the extract step of the current package
starts. This is only used internally by the package infrastructure,
and should typically not be used directly by packages.
* +LIBFOO_PATCH_DEPENDENCIES+ lists the dependencies (in terms of
package name) that are required for the current package to be
patched. These dependencies are guaranteed to be extracted and
patched (but not necessarily built) before the current package is
patched. In a similar way, +HOST_LIBFOO_PATCH_DEPENDENCIES+ lists
the dependencies for the current host package.
This is seldom used; usually, +LIBFOO_DEPENDENCIES+ is what you
really want to use.
* +LIBFOO_PROVIDES+ lists all the virtual packages +libfoo+ is an
implementation of. See xref:virtual-package-tutorial[].
* +LIBFOO_INSTALL_STAGING+ can be set to +YES+ or +NO+ (default). If
set to +YES+, then the commands in the +LIBFOO_INSTALL_STAGING_CMDS+
variables are executed to install the package into the staging
directory.
* +LIBFOO_INSTALL_TARGET+ can be set to +YES+ (default) or +NO+. If
set to +YES+, then the commands in the +LIBFOO_INSTALL_TARGET_CMDS+
variables are executed to install the package into the target
directory.
* +LIBFOO_INSTALL_IMAGES+ can be set to +YES+ or +NO+ (default). If
set to +YES+, then the commands in the +LIBFOO_INSTALL_IMAGES_CMDS+
variable are executed to install the package into the images
directory.
* +LIBFOO_CONFIG_SCRIPTS+ lists the names of the files in
'$(STAGING_DIR)/usr/bin' that need some special fixing to make them
cross-compiling friendly. Multiple file names separated by space can
be given and all are relative to '$(STAGING_DIR)/usr/bin'. The files
listed in +LIBFOO_CONFIG_SCRIPTS+ are also removed from
+$(TARGET_DIR)/usr/bin+ since they are not needed on the target.
* +LIBFOO_DEVICES+ lists the device files to be created by Buildroot
when using the static device table. The syntax to use is the
makedevs one. You can find some documentation for this syntax in the
xref:makedev-syntax[]. This variable is optional.
* +LIBFOO_PERMISSIONS+ lists the changes of permissions to be done at
the end of the build process. The syntax is once again the makedevs one.
You can find some documentation for this syntax in the xref:makedev-syntax[].
This variable is optional.
* +LIBFOO_USERS+ lists the users to create for this package, if it installs
a program you want to run as a specific user (e.g. as a daemon, or as a
cron-job). The syntax is similar in spirit to the makedevs one, and is
described in the xref:makeuser-syntax[]. This variable is optional.
* +LIBFOO_LICENSE+ defines the license (or licenses) under which the package
is released.
This name will appear in the manifest file produced by +make legal-info+.
If the license appears in https://spdx.org/licenses/[the SPDX License List],
use the SPDX short identifier to make the manifest file uniform.
Otherwise, describe the license in a precise and concise way, avoiding
ambiguous names such as +BSD+ which actually name a family of licenses.
This variable is optional. If it is not defined, +unknown+ will appear in
the +license+ field of the manifest file for this package. +
The expected format for this variable must comply with the following rules:
** If different parts of the package are released under different
licenses, then +comma+ separate licenses (e.g. +`LIBFOO_LICENSE =
GPL-2.0+, LGPL-2.1+`+). If there is clear distinction between which
component is licensed under what license, then annotate the license
with that component, between parenthesis (e.g. +`LIBFOO_LICENSE =
GPL-2.0+ (programs), LGPL-2.1+ (libraries)`+).
** If some licenses are conditioned on a sub-option being enabled, append
the conditional licenses with a comma (e.g.: `FOO_LICENSE += , GPL-2.0+
(programs)`); the infrastructure will internally remove the space before
the comma.
** If the package is dual licensed, then separate licenses with the
+or+ keyword (e.g. +`LIBFOO_LICENSE = AFL-2.1 or GPL-2.0+`+).
* +LIBFOO_LICENSE_FILES+ is a space-separated list of files in the package
tarball that contain the license(s) under which the package is released.
+make legal-info+ copies all of these files in the +legal-info+ directory.
See xref:legal-info[] for more information.
This variable is optional. If it is not defined, a warning will be produced
to let you know, and +not saved+ will appear in the +license files+ field
of the manifest file for this package.
* +LIBFOO_ACTUAL_SOURCE_TARBALL+ only applies to packages whose
+LIBFOO_SITE+ / +LIBTOO_SOURCE+ pair points to an archive that does
not actually contain source code, but binary code. This a very
uncommon case, only known to apply to external toolchains which come
already compiled, although theoretically it might apply to other
packages. In such cases a separate tarball is usually available with
the actual source code. Set +LIBFOO_ACTUAL_SOURCE_TARBALL+ to the
name of the actual source code archive and Buildroot will download
it and use it when you run +make legal-info+ to collect
legally-relevant material. Note this file will not be downloaded
during regular builds nor by +make source+.
* +LIBFOO_ACTUAL_SOURCE_SITE+ provides the location of the actual
source tarball. The default value is +LIBFOO_SITE+, so you don't
need to set this variable if the binary and source archives are
hosted on the same directory. If +LIBFOO_ACTUAL_SOURCE_TARBALL+ is
not set, it doesn't make sense to define
+LIBFOO_ACTUAL_SOURCE_SITE+.
* +LIBFOO_REDISTRIBUTE+ can be set to +YES+ (default) or +NO+ to indicate if
the package source code is allowed to be redistributed. Set it to +NO+ for
non-opensource packages: Buildroot will not save the source code for this
package when collecting the +legal-info+.
* +LIBFOO_FLAT_STACKSIZE+ defines the stack size of an application built into
the FLAT binary format. The application stack size on the NOMMU architecture
processors can't be enlarged at run time. The default stack size for the
FLAT binary format is only 4k bytes. If the application consumes more stack,
append the required number here.
* +LIBFOO_BIN_ARCH_EXCLUDE+ is a space-separated list of paths (relative
to the target directory) to ignore when checking that the package
installs correctly cross-compiled binaries. You seldom need to set this
variable, unless the package installs binary blobs outside the default
locations, `/lib/firmware`, `/usr/lib/firmware`, `/lib/modules`,
`/usr/lib/modules`, and `/usr/share`, which are automatically excluded.
* +LIBFOO_IGNORE_CVES+ is a space-separated list of CVEs that tells
Buildroot CVE tracking tools which CVEs should be ignored for this
package. This is typically used when the CVE is fixed by a patch in
the package, or when the CVE for some reason does not affect the
Buildroot package. A Makefile comment must always precede the
addition of a CVE to this variable. Example:
----------------------
# 0001-fix-cve-2020-12345.patch
LIBFOO_IGNORE_CVES += CVE-2020-12345
# only when built with libbaz, which Buildroot doesn't support
LIBFOO_IGNORE_CVES += CVE-2020-54321
----------------------
The recommended way to define these variables is to use the following
syntax:
----------------------
LIBFOO_VERSION = 2.32
----------------------
Now, the variables that define what should be performed at the
different steps of the build process.
* +LIBFOO_EXTRACT_CMDS+ lists the actions to be performed to extract
the package. This is generally not needed as tarballs are
automatically handled by Buildroot. However, if the package uses a
non-standard archive format, such as a ZIP or RAR file, or has a
tarball with a non-standard organization, this variable allows to
override the package infrastructure default behavior.
* +LIBFOO_CONFIGURE_CMDS+ lists the actions to be performed to
configure the package before its compilation.
* +LIBFOO_BUILD_CMDS+ lists the actions to be performed to
compile the package.
* +HOST_LIBFOO_INSTALL_CMDS+ lists the actions to be performed
to install the package, when the package is a host package. The
package must install its files to the directory given by
+$(HOST_DIR)+. All files, including development files such as
headers should be installed, since other packages might be compiled
on top of this package.
* +LIBFOO_INSTALL_TARGET_CMDS+ lists the actions to be
performed to install the package to the target directory, when the
package is a target package. The package must install its files to
the directory given by +$(TARGET_DIR)+. Only the files required for
'execution' of the package have to be
installed. Header files, static libraries and documentation will be
removed again when the target filesystem is finalized.
* +LIBFOO_INSTALL_STAGING_CMDS+ lists the actions to be
performed to install the package to the staging directory, when the
package is a target package. The package must install its files to
the directory given by +$(STAGING_DIR)+. All development files
should be installed, since they might be needed to compile other
packages.
* +LIBFOO_INSTALL_IMAGES_CMDS+ lists the actions to be performed to
install the package to the images directory, when the package is a
target package. The package must install its files to the directory
given by +$(BINARIES_DIR)+. Only files that are binary images (aka
images) that do not belong in the +TARGET_DIR+ but are necessary
for booting the board should be placed here. For example, a package
should utilize this step if it has binaries which would be similar
to the kernel image, bootloader or root filesystem images.
* +LIBFOO_INSTALL_INIT_SYSV+, +LIBFOO_INSTALL_INIT_OPENRC+ and
+LIBFOO_INSTALL_INIT_SYSTEMD+ list the actions to install init
scripts either for the systemV-like init systems (busybox,
sysvinit, etc.), openrc or for the systemd units. These commands
will be run only when the relevant init system is installed (i.e.
if systemd is selected as the init system in the configuration,
only +LIBFOO_INSTALL_INIT_SYSTEMD+ will be run). The only exception
is when openrc is chosen as init system and +LIBFOO_INSTALL_INIT_OPENRC+
has not been set, in such situation +LIBFOO_INSTALL_INIT_SYSV+ will
be called, since openrc supports sysv init scripts.
When systemd is used as the init system, buildroot will automatically enable
all services using the +systemctl preset-all+ command in the final phase of
image building. You can add preset files to prevent a particular unit from
being automatically enabled by buildroot.
* +LIBFOO_HELP_CMDS+ lists the actions to print the package help, which
is included to the main +make help+ output. These commands can print
anything in any format.
This is seldom used, as packages rarely have custom rules. *Do not use
this variable*, unless you really know that you need to print help.
The preferred way to define these variables is:
----------------------
define LIBFOO_CONFIGURE_CMDS
action 1
action 2
action 3
endef
----------------------
In the action definitions, you can use the following variables:
* +$(LIBFOO_PKGDIR)+ contains the path to the directory containing the
+libfoo.mk+ and +Config.in+ files. This variable is useful when it is
necessary to install a file bundled in Buildroot, like a runtime
configuration file, a splashscreen image...
* +$(@D)+, which contains the directory in which the package source
code has been uncompressed.
* +$(LIBFOO_DL_DIR)+ contains the path to the directory where all the downloads
made by Buildroot for +libfoo+ are stored in.
* +$(TARGET_CC)+, +$(TARGET_LD)+, etc. to get the target
cross-compilation utilities
* +$(TARGET_CROSS)+ to get the cross-compilation toolchain prefix
* Of course the +$(HOST_DIR)+, +$(STAGING_DIR)+ and +$(TARGET_DIR)+
variables to install the packages properly. Those variables point to
the global _host_, _staging_ and _target_ directories, unless
_per-package directory_ support is used, in which case they point to
the current package _host_, _staging_ and _target_ directories. In
both cases, it doesn't make any difference from the package point of
view: it should simply use +HOST_DIR+, +STAGING_DIR+ and
+TARGET_DIR+. See xref:top-level-parallel-build[] for more details
about _per-package directory_ support.
Finally, you can also use hooks. See xref:hooks[] for more information.

View File

@@ -1,62 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Gettext integration and interaction with packages
Many packages that support internationalization use the gettext
library. Dependencies for this library are fairly complicated and
therefore, deserve some explanation.
The 'glibc' C library integrates a full-blown implementation of
'gettext', supporting translation. Native Language Support is
therefore built-in in 'glibc'.
On the other hand, the 'uClibc' and 'musl' C libraries only provide a
stub implementation of the gettext functionality, which allows to
compile libraries and programs using gettext functions, but without
providing the translation capabilities of a full-blown gettext
implementation. With such C libraries, if real Native Language Support
is necessary, it can be provided by the +libintl+ library of the
+gettext+ package.
Due to this, and in order to make sure that Native Language Support is
properly handled, packages in Buildroot that can use NLS support
should:
1. Ensure NLS support is enabled when +BR2_SYSTEM_ENABLE_NLS=y+. This
is done automatically for 'autotools' packages and therefore should
only be done for packages using other package infrastructures.
1. Add +$(TARGET_NLS_DEPENDENCIES)+ to the package
+<pkg>_DEPENDENCIES+ variable. This addition should be done
unconditionally: the value of this variable is automatically
adjusted by the core infrastructure to contain the relevant list of
packages. If NLS support is disabled, this variable is empty. If
NLS support is enabled, this variable contains +host-gettext+ so
that tools needed to compile translation files are available on the
host. In addition, if 'uClibc' or 'musl' are used, this variable
also contains +gettext+ in order to get the full-blown 'gettext'
implementation.
1. If needed, add +$(TARGET_NLS_LIBS)+ to the linker flags, so that
the package gets linked with +libintl+. This is generally not
needed with 'autotools' packages as they usually detect
automatically that they should link with +libintl+. However,
packages using other build systems, or problematic autotools-based
packages may need this. +$(TARGET_NLS_LIBS)+ should be added
unconditionally to the linker flags, as the core automatically
makes it empty or defined to +-lintl+ depending on the
configuration.
No changes should be made to the +Config.in+ file to support NLS.
Finally, certain packages need some gettext utilities on the target,
such as the +gettext+ program itself, which allows to retrieve
translated strings, from the command line. In such a case, the package
should:
* use +select BR2_PACKAGE_GETTEXT+ in their +Config.in+ file,
indicating in a comment above that it's a runtime dependency only.
* not add any +gettext+ dependency in the +DEPENDENCIES+ variable of
their +.mk+ file.

View File

@@ -1,124 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for Go packages
This infrastructure applies to Go packages that use the standard
build system and use bundled dependencies.
[[golang-package-tutorial]]
==== +golang-package+ tutorial
First, let's see how to write a +.mk+ file for a go package,
with an example :
------------------------
01: ################################################################################
02: #
03: # foo
04: #
05: ################################################################################
06:
07: FOO_VERSION = 1.0
08: FOO_SITE = $(call github,bar,foo,$(FOO_VERSION))
09: FOO_LICENSE = BSD-3-Clause
10: FOO_LICENSE_FILES = LICENSE
11:
12: $(eval $(golang-package))
------------------------
On line 7, we declare the version of the package.
On line 8, we declare the upstream location of the package, here
fetched from Github, since a large number of Go packages are hosted on
Github.
On line 9 and 10, we give licensing details about the package.
Finally, on line 12, we invoke the +golang-package+ macro that
generates all the Makefile rules that actually allow the package to be
built.
[[golang-package-reference]]
==== +golang-package+ reference
In their +Config.in+ file, packages using the +golang-package+
infrastructure should depend on +BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS+
because Buildroot will automatically add a dependency on +host-go+
to such packages.
If you need CGO support in your package, you must add a dependency on
+BR2_PACKAGE_HOST_GO_TARGET_CGO_LINKING_SUPPORTS+.
The main macro of the Go package infrastructure is
+golang-package+. It is similar to the +generic-package+ macro. The
ability to build host packages is also available, with the
+host-golang-package+ macro.
Host packages built by +host-golang-package+ macro should depend on
BR2_PACKAGE_HOST_GO_HOST_ARCH_SUPPORTS.
Just like the generic infrastructure, the Go infrastructure works
by defining a number of variables before calling the +golang-package+.
All the package metadata information variables that exist in the
xref:generic-package-reference[generic package infrastructure] also
exist in the Go infrastructure: +FOO_VERSION+, +FOO_SOURCE+,
+FOO_PATCH+, +FOO_SITE+, +FOO_SUBDIR+, +FOO_DEPENDENCIES+,
+FOO_LICENSE+, +FOO_LICENSE_FILES+, +FOO_INSTALL_STAGING+, etc.
Note that it is not necessary to add +host-go+ in the
+FOO_DEPENDENCIES+ variable of a package, since this basic dependency
is automatically added as needed by the Go package infrastructure.
A few additional variables, specific to the Go infrastructure, can
optionally be defined, depending on the package's needs. Many of them
are only useful in very specific cases, typical packages will
therefore only use a few of them, or none.
* If your package need a custom +GOPATH+ to be compiled in, you can
use the +FOO_WORKSPACE+ variable. The +GOPATH+ being used will be
+<package-srcdir>/<FOO_WORKSPACE>+. If +FOO_WORKSPACE+ is not
specified, it defaults to +_gopath+.
* +FOO_SRC_SUBDIR+ is the sub-directory where your source will be
compiled relatively to the +GOPATH+. An example value is
+github.com/bar/foo+. If +FOO_SRC_SUBDIR+ is not specified, it
defaults to a value infered from the +FOO_SITE+ variable.
* +FOO_LDFLAGS+ and +FOO_TAGS+ can be used to pass respectively the
+LDFLAGS+ or the +TAGS+ to the +go+ build command.
* +FOO_BUILD_TARGETS+ can be used to pass the list of targets that
should be built. If +FOO_BUILD_TARGETS+ is not specified, it
defaults to +.+. We then have two cases:
** +FOO_BUILD_TARGETS+ is +.+. In this case, we assume only one binary
will be produced, and that by default we name it after the package
name. If that is not appropriate, the name of the produced binary
can be overridden using +FOO_BIN_NAME+.
** +FOO_BUILD_TARGETS+ is not +.+. In this case, we iterate over the
values to build each target, and for each produced a binary that is
the non-directory component of the target. For example if
+FOO_BUILD_TARGETS = cmd/docker cmd/dockerd+ the binaries produced
are +docker+ and +dockerd+.
* +FOO_INSTALL_BINS+ can be used to pass the list of binaries that
should be installed in +/usr/bin+ on the target. If
+FOO_INSTALL_BINS+ is not specified, it defaults to the lower-case
name of package.
With the Go infrastructure, all the steps required to build and
install the packages are already defined, and they generally work well
for most Go-based packages. However, when required, it is still
possible to customize what is done in any particular step:
* By adding a post-operation hook (after extract, patch, configure,
build or install). See xref:hooks[] for details.
* By overriding one of the steps. For example, even if the Go
infrastructure is used, if the package +.mk+ file defines its own
+FOO_BUILD_CMDS+ variable, it will be used instead of the default Go
one. However, using this method should be restricted to very
specific cases. Do not use it in the general case.

View File

@@ -1,87 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[hooks]]
=== Hooks available in the various build steps
The generic infrastructure (and as a result also the derived autotools
and cmake infrastructures) allow packages to specify hooks.
These define further actions to perform after existing steps.
Most hooks aren't really useful for generic packages, since the +.mk+
file already has full control over the actions performed in each step
of the package construction.
The following hook points are available:
* +LIBFOO_PRE_DOWNLOAD_HOOKS+
* +LIBFOO_POST_DOWNLOAD_HOOKS+
* +LIBFOO_PRE_EXTRACT_HOOKS+
* +LIBFOO_POST_EXTRACT_HOOKS+
* +LIBFOO_PRE_RSYNC_HOOKS+
* +LIBFOO_POST_RSYNC_HOOKS+
* +LIBFOO_PRE_PATCH_HOOKS+
* +LIBFOO_POST_PATCH_HOOKS+
* +LIBFOO_PRE_CONFIGURE_HOOKS+
* +LIBFOO_POST_CONFIGURE_HOOKS+
* +LIBFOO_PRE_BUILD_HOOKS+
* +LIBFOO_POST_BUILD_HOOKS+
* +LIBFOO_PRE_INSTALL_HOOKS+ (for host packages only)
* +LIBFOO_POST_INSTALL_HOOKS+ (for host packages only)
* +LIBFOO_PRE_INSTALL_STAGING_HOOKS+ (for target packages only)
* +LIBFOO_POST_INSTALL_STAGING_HOOKS+ (for target packages only)
* +LIBFOO_PRE_INSTALL_TARGET_HOOKS+ (for target packages only)
* +LIBFOO_POST_INSTALL_TARGET_HOOKS+ (for target packages only)
* +LIBFOO_PRE_INSTALL_IMAGES_HOOKS+
* +LIBFOO_POST_INSTALL_IMAGES_HOOKS+
* +LIBFOO_PRE_LEGAL_INFO_HOOKS+
* +LIBFOO_POST_LEGAL_INFO_HOOKS+
These variables are 'lists' of variable names containing actions to be
performed at this hook point. This allows several hooks to be
registered at a given hook point. Here is an example:
----------------------
define LIBFOO_POST_PATCH_FIXUP
action1
action2
endef
LIBFOO_POST_PATCH_HOOKS += LIBFOO_POST_PATCH_FIXUP
----------------------
[[hooks-rsync]]
==== Using the +POST_RSYNC+ hook
The +POST_RSYNC+ hook is run only for packages that use a local source,
either through the +local+ site method or the +OVERRIDE_SRCDIR+
mechanism. In this case, package sources are copied using +rsync+ from
the local location into the buildroot build directory. The +rsync+
command does not copy all files from the source directory, though.
Files belonging to a version control system, like the directories
+.git+, +.hg+, etc. are not copied. For most packages this is
sufficient, but a given package can perform additional actions using
the +POST_RSYNC+ hook.
In principle, the hook can contain any command you want. One specific
use case, though, is the intentional copying of the version control
directory using +rsync+. The +rsync+ command you use in the hook can, among
others, use the following variables:
* +$(SRCDIR)+: the path to the overridden source directory
* +$(@D)+: the path to the build directory
==== Target-finalize hook
Packages may also register hooks in +LIBFOO_TARGET_FINALIZE_HOOKS+.
These hooks are run after all packages are built, but before the
filesystem images are generated. They are seldom used, and your
package probably do not need them.

View File

@@ -1,93 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for packages using kconfig for configuration files
A popular way for a software package to handle user-specified
configuration is +kconfig+. Among others, it is used by the Linux
kernel, Busybox, and Buildroot itself. The presence of a .config file
and a +menuconfig+ target are two well-known symptoms of kconfig being
used.
Buildroot features an infrastructure for packages that use kconfig for
their configuration. This infrastructure provides the necessary logic to
expose the package's +menuconfig+ target as +foo-menuconfig+ in
Buildroot, and to handle the copying back and forth of the configuration
file in a correct way.
The +kconfig-package+ infrastructure is based on the +generic-package+
infrastructure. All variables supported by +generic-package+ are
available in +kconfig-package+ as well. See
xref:generic-package-reference[] for more details.
In order to use the +kconfig-package+ infrastructure for a Buildroot
package, the minimally required lines in the +.mk+ file, in addition to
the variables required by the +generic-package+ infrastructure, are:
------------------------------
FOO_KCONFIG_FILE = reference-to-source-configuration-file
$(eval $(kconfig-package))
------------------------------
This snippet creates the following make targets:
* +foo-menuconfig+, which calls the package's +menuconfig+ target
* +foo-update-config+, which copies the configuration back to the
source configuration file. It is not possible to use this target
when fragment files are set.
* +foo-update-defconfig+, which copies the configuration back to the
source configuration file. The configuration file will only list the
options that differ from the default values. It is not possible to
use this target when fragment files are set.
* +foo-diff-config+, which outputs the differences between the current
configuration and the one defined in the Buildroot configuration for
this kconfig package. The output is useful to identify the
configuration changes that may have to be propagated to
configuration fragments for example.
and ensures that the source configuration file is copied to the build
directory at the right moment.
There are two options to specify a configuration file to use, either
+FOO_KCONFIG_FILE+ (as in the example, above) or +FOO_KCONFIG_DEFCONFIG+.
It is mandatory to provide either, but not both:
* +FOO_KCONFIG_FILE+ specifies the path to a defconfig or full-config file
to be used to configure the package.
* +FOO_KCONFIG_DEFCONFIG+ specifies the defconfig 'make' rule to call to
configure the package.
In addition to these minimally required lines, several optional variables can
be set to suit the needs of the package under consideration:
* +FOO_KCONFIG_EDITORS+: a space-separated list of kconfig editors to
support, for example 'menuconfig xconfig'. By default, 'menuconfig'.
* +FOO_KCONFIG_FRAGMENT_FILES+: a space-separated list of configuration
fragment files that are merged to the main configuration file.
Fragment files are typically used when there is a desire to stay in sync
with an upstream (def)config file, with some minor modifications.
* +FOO_KCONFIG_OPTS+: extra options to pass when calling the kconfig
editors. This may need to include '$(FOO_MAKE_OPTS)', for example. By
default, empty.
* +FOO_KCONFIG_FIXUP_CMDS+: a list of shell commands needed to fixup the
configuration file after copying it or running a kconfig editor. Such
commands may be needed to ensure a configuration consistent with other
configuration of Buildroot, for example. By default, empty.
* +FOO_KCONFIG_DOTCONFIG+: path (with filename) of the +.config+ file,
relative to the package source tree. The default, +.config+, should
be well suited for all packages that use the standard kconfig
infrastructure as inherited from the Linux kernel; some packages use
a derivative of kconfig that use a different location.
* +FOO_KCONFIG_DEPENDENCIES+: the list of packages (most probably, host
packages) that need to be built before this package's kconfig is
interpreted. Seldom used. By default, empty.

View File

@@ -1,143 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for packages building kernel modules
Buildroot offers a helper infrastructure to make it easy to write packages that
build and install Linux kernel modules. Some packages only contain a kernel
module, other packages contain programs and libraries in addition to kernel
modules. Buildroot's helper infrastructure supports either case.
[[kernel-module-tutorial]]
==== +kernel-module+ tutorial
Let's start with an example on how to prepare a simple package that only
builds a kernel module, and no other component:
----
01: ################################################################################
02: #
03: # foo
04: #
05: ################################################################################
06:
07: FOO_VERSION = 1.2.3
08: FOO_SOURCE = foo-$(FOO_VERSION).tar.xz
09: FOO_SITE = http://www.foosoftware.org/download
10: FOO_LICENSE = GPL-2.0
11: FOO_LICENSE_FILES = COPYING
12:
13: $(eval $(kernel-module))
14: $(eval $(generic-package))
----
Lines 7-11 define the usual meta-data to specify the version, archive name,
remote URI where to find the package source, licensing information.
On line 13, we invoke the +kernel-module+ helper infrastructure, that
generates all the appropriate Makefile rules and variables to build
that kernel module.
Finally, on line 14, we invoke the
xref:generic-package-tutorial[+generic-package+ infrastructure].
The dependency on +linux+ is automatically added, so it is not needed to
specify it in +FOO_DEPENDENCIES+.
What you may have noticed is that, unlike other package infrastructures,
we explicitly invoke a second infrastructure. This allows a package to
build a kernel module, but also, if needed, use any one of other package
infrastructures to build normal userland components (libraries,
executables...). Using the +kernel-module+ infrastructure on its own is
not sufficient; another package infrastructure *must* be used.
Let's look at a more complex example:
----
01: ################################################################################
02: #
03: # foo
04: #
05: ################################################################################
06:
07: FOO_VERSION = 1.2.3
08: FOO_SOURCE = foo-$(FOO_VERSION).tar.xz
09: FOO_SITE = http://www.foosoftware.org/download
10: FOO_LICENSE = GPL-2.0
11: FOO_LICENSE_FILES = COPYING
12:
13: FOO_MODULE_SUBDIRS = driver/base
14: FOO_MODULE_MAKE_OPTS = KVERSION=$(LINUX_VERSION_PROBED)
15:
16: ifeq ($(BR2_PACKAGE_LIBBAR),y)
17: FOO_DEPENDENCIES = libbar
18: FOO_CONF_OPTS = --enable-bar
19: FOO_MODULE_SUBDIRS += driver/bar
20: else
21: FOO_CONF_OPTS = --disable-bar
22: endif
23:
24: $(eval $(kernel-module))
26: $(eval $(autotools-package))
----
Here, we see that we have an autotools-based package, that also builds
the kernel module located in sub-directory +driver/base+ and, if libbar
is enabled, the kernel module located in sub-directory +driver/bar+, and
defines the variable +KVERSION+ to be passed to the Linux buildsystem
when building the module(s).
[[kernel-module-reference]]
==== +kernel-module+ reference
The main macro for the kernel module infrastructure is +kernel-module+.
Unlike other package infrastructures, it is not stand-alone, and requires
any of the other +*-package+ macros be called after it.
The +kernel-module+ macro defines post-build and post-target-install
hooks to build the kernel modules. If the package's +.mk+ needs access
to the built kernel modules, it should do so in a post-build hook,
*registered after* the call to +kernel-module+. Similarly, if the
package's +.mk+ needs access to the kernel module after it has been
installed, it should do so in a post-install hook, *registered after*
the call to +kernel-module+. Here's an example:
----
$(eval $(kernel-module))
define FOO_DO_STUFF_WITH_KERNEL_MODULE
# Do something with it...
endef
FOO_POST_BUILD_HOOKS += FOO_DO_STUFF_WITH_KERNEL_MODULE
$(eval $(generic-package))
----
Finally, unlike the other package infrastructures, there is no
+host-kernel-module+ variant to build a host kernel module.
The following additional variables can optionally be defined to further
configure the build of the kernel module:
* +FOO_MODULE_SUBDIRS+ may be set to one or more sub-directories (relative
to the package source top-directory) where the kernel module sources are.
If empty or not set, the sources for the kernel module(s) are considered
to be located at the top of the package source tree.
* +FOO_MODULE_MAKE_OPTS+ may be set to contain extra variable definitions
to pass to the Linux buildsystem.
[[kernel-variables]]
You may also reference (but you may *not* set!) those variables:
* +LINUX_DIR+ contains the path to where the Linux kernel has been
extracted and built.
* +LINUX_VERSION+ contains the version string as configured by the user.
* +LINUX_VERSION_PROBED+ contains the real version string of the kernel,
retrieved with running `make -C $(LINUX_DIR) kernelrelease`
* +KERNEL_ARCH+ contains the name of the current architecture, like `arm`,
`mips`...

View File

@@ -1,152 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[linux-kernel-specific-infra]]
=== Infrastructure specific to the Linux kernel package
The Linux kernel package can use some specific infrastructures based on package
hooks for building Linux kernel tools or/and building Linux kernel extensions.
[[linux-kernel-tools]]
==== linux-kernel-tools
Buildroot offers a helper infrastructure to build some userspace tools
for the target available within the Linux kernel sources. Since their
source code is part of the kernel source code, a special package,
+linux-tools+, exists and re-uses the sources of the Linux kernel that
runs on the target.
Let's look at an example of a Linux tool. For a new Linux tool named
+foo+, create a new menu entry in the existing
+package/linux-tools/Config.in+. This file will contain the option
descriptions related to each kernel tool that will be used and
displayed in the configuration tool. It would basically look like:
------------------------------
01: config BR2_PACKAGE_LINUX_TOOLS_FOO
02: bool "foo"
03: select BR2_PACKAGE_LINUX_TOOLS
04: help
05: This is a comment that explains what foo kernel tool is.
06:
07: http://foosoftware.org/foo/
------------------------------
The name of the option starts with the prefix +BR2_PACKAGE_LINUX_TOOLS_+,
followed by the uppercase name of the tool (like is done for packages).
.Note
Unlike other packages, the +linux-tools+ package options appear in the
+linux+ kernel menu, under the `Linux Kernel Tools` sub-menu, not under
the `Target packages` main menu.
Then for each linux tool, add a new +.mk.in+ file named
+package/linux-tools/linux-tool-foo.mk.in+. It would basically look like:
------------------------------
01: ################################################################################
02: #
03: # foo
04: #
05: ################################################################################
06:
07: LINUX_TOOLS += foo
08:
09: FOO_DEPENDENCIES = libbbb
10:
11: define FOO_BUILD_CMDS
12: $(TARGET_MAKE_ENV) $(MAKE) -C $(LINUX_DIR)/tools foo
13: endef
14:
15: define FOO_INSTALL_STAGING_CMDS
16: $(TARGET_MAKE_ENV) $(MAKE) -C $(LINUX_DIR)/tools \
17: DESTDIR=$(STAGING_DIR) \
18: foo_install
19: endef
20:
21: define FOO_INSTALL_TARGET_CMDS
22: $(TARGET_MAKE_ENV) $(MAKE) -C $(LINUX_DIR)/tools \
23: DESTDIR=$(TARGET_DIR) \
24: foo_install
25: endef
--------------------------------
On line 7, we register the Linux tool +foo+ to the list of available
Linux tools.
On line 9, we specify the list of dependencies this tool relies on. These
dependencies are added to the Linux package dependencies list only when the
+foo+ tool is selected.
The rest of the Makefile, lines 11-25 defines what should be done at the
different steps of the Linux tool build process like for a
xref:generic-package-tutorial[+generic package+]. They will actually be
used only when the +foo+ tool is selected. The only supported commands are
+_BUILD_CMDS+, +_INSTALL_STAGING_CMDS+ and +_INSTALL_TARGET_CMDS+.
.Note
One *must not* call +$(eval $(generic-package))+ or any other
package infrastructure! Linux tools are not packages by themselves,
they are part of the +linux-tools+ package.
[[linux-kernel-ext]]
==== linux-kernel-extensions
Some packages provide new features that require the Linux kernel tree
to be modified. This can be in the form of patches to be applied on
the kernel tree, or in the form of new files to be added to the
tree. The Buildroot's Linux kernel extensions infrastructure provides
a simple solution to automatically do this, just after the kernel
sources are extracted and before the kernel patches are
applied. Examples of extensions packaged using this mechanism are the
real-time extensions Xenomai and RTAI, as well as the set of
out-of-tree LCD screens drivers +fbtft+.
Let's look at an example on how to add a new Linux extension +foo+.
First, create the package +foo+ that provides the extension: this
package is a standard package; see the previous chapters on how to
create such a package. This package is in charge of downloading the
sources archive, checking the hash, defining the licence informations
and building user space tools if any.
Then create the 'Linux extension' proper: create a new menu entry in
the existing +linux/Config.ext.in+. This file contains the option
descriptions related to each kernel extension that will be used and
displayed in the configuration tool. It would basically look like:
------------------------------
01: config BR2_LINUX_KERNEL_EXT_FOO
02: bool "foo"
03: help
04: This is a comment that explains what foo kernel extension is.
05:
06: http://foosoftware.org/foo/
------------------------------
Then for each linux extension, add a new +.mk+ file named
+linux/linux-ext-foo.mk+. It should basically contain:
------------------------------
01: ################################################################################
02: #
03: # foo
04: #
05: ################################################################################
06:
07: LINUX_EXTENSIONS += foo
08:
09: define FOO_PREPARE_KERNEL
10: $(FOO_DIR)/prepare-kernel-tree.sh --linux-dir=$(@D)
11: endef
--------------------------------
On line 7, we add the Linux extension +foo+ to the list of available
Linux extensions.
On line 9-11, we define what should be done by the extension to modify
the Linux kernel tree; this is specific to the linux extension and can
use the variables defined by the +foo+ package, like: +$(FOO_DIR)+ or
+$(FOO_VERSION)+... as well as all the Linux variables, like:
+$(LINUX_VERSION)+ or +$(LINUX_VERSION_PROBED)+, +$(KERNEL_ARCH)+...
See the xref:kernel-variables[definition of those kernel variables].

View File

@@ -1,111 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for LuaRocks-based packages
[[luarocks-package-tutorial]]
==== +luarocks-package+ tutorial
First, let's see how to write a +.mk+ file for a LuaRocks-based package,
with an example :
------------------------
01: ################################################################################
02: #
03: # lua-foo
04: #
05: ################################################################################
06:
07: LUA_FOO_VERSION = 1.0.2-1
08: LUA_FOO_NAME_UPSTREAM = foo
09: LUA_FOO_DEPENDENCIES = bar
10:
11: LUA_FOO_BUILD_OPTS += BAR_INCDIR=$(STAGING_DIR)/usr/include
12: LUA_FOO_BUILD_OPTS += BAR_LIBDIR=$(STAGING_DIR)/usr/lib
13: LUA_FOO_LICENSE = luaFoo license
14: LUA_FOO_LICENSE_FILES = $(LUA_FOO_SUBDIR)/COPYING
15:
16: $(eval $(luarocks-package))
------------------------
On line 7, we declare the version of the package (the same as in the rockspec,
which is the concatenation of the upstream version and the rockspec revision,
separated by a hyphen '-').
On line 8, we declare that the package is called "foo" on LuaRocks. In
Buildroot, we give Lua-related packages a name that starts with "lua", so the
Buildroot name is different from the upstream name. +LUA_FOO_NAME_UPSTREAM+
makes the link between the two names.
On line 9, we declare our dependencies against native libraries, so that they
are built before the build process of our package starts.
On lines 11-12, we tell Buildroot to pass custom options to LuaRocks when it is
building the package.
On lines 13-14, we specify the licensing terms for the package.
Finally, on line 16, we invoke the +luarocks-package+
macro that generates all the Makefile rules that actually allows the
package to be built.
Most of these details can be retrieved from the +rock+ and +rockspec+.
So, this file and the Config.in file can be generated by running the
command +luarocks buildroot foo lua-foo+ in the Buildroot
directory. This command runs a specific Buildroot addon of +luarocks+
that will automatically generate a Buildroot package. The result must
still be manually inspected and possibly modified.
* The +package/Config.in+ file has to be updated manually to include the
generated Config.in files.
[[luarocks-package-reference]]
==== +luarocks-package+ reference
LuaRocks is a deployment and management system for Lua modules, and supports
various +build.type+: +builtin+, +make+ and +cmake+. In the context of
Buildroot, the +luarocks-package+ infrastructure only supports the +builtin+
mode. LuaRocks packages that use the +make+ or +cmake+ build mechanisms
should instead be packaged using the +generic-package+ and +cmake-package+
infrastructures in Buildroot, respectively.
The main macro of the LuaRocks package infrastructure is +luarocks-package+:
like +generic-package+ it works by defining a number of variables providing
metadata information about the package, and then calling +luarocks-package+. It
is worth mentioning that building LuaRocks packages for the host is not
supported, so the macro +host-luarocks-package+ is not implemented.
Just like the generic infrastructure, the LuaRocks infrastructure works
by defining a number of variables before calling the +luarocks-package+
macro.
First, all the package metadata information variables that exist in
the generic infrastructure also exist in the LuaRocks infrastructure:
+LUA_FOO_VERSION+, +LUA_FOO_SOURCE+, +LUA_FOO_SITE+,
+LUA_FOO_DEPENDENCIES+, +LUA_FOO_LICENSE+, +LUA_FOO_LICENSE_FILES+.
Two of them are populated by the LuaRocks infrastructure (for the
+download+ step). If your package is not hosted on the LuaRocks mirror
+$(BR2_LUAROCKS_MIRROR)+, you can override them:
* +LUA_FOO_SITE+, which defaults to +$(BR2_LUAROCKS_MIRROR)+
* +LUA_FOO_SOURCE+, which defaults to
+$(lowercase LUA_FOO_NAME_UPSTREAM)-$(LUA_FOO_VERSION).src.rock+
A few additional variables, specific to the LuaRocks infrastructure, are
also defined. They can be overridden in specific cases.
* +LUA_FOO_NAME_UPSTREAM+, which defaults to +lua-foo+, i.e. the Buildroot
package name
* +LUA_FOO_ROCKSPEC+, which defaults to
+$(lowercase LUA_FOO_NAME_UPSTREAM)-$(LUA_FOO_VERSION).rockspec+
* +LUA_FOO_SUBDIR+, which defaults to
+$(LUA_FOO_NAME_UPSTREAM)-$(LUA_FOO_VERSION_WITHOUT_ROCKSPEC_REVISION)+
* +LUA_FOO_BUILD_OPTS+ contains additional build options for the
+luarocks build+ call.

View File

@@ -1,133 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for Meson-based packages
[[meson-package-tutorial]]
==== +meson-package+ tutorial
http://mesonbuild.com[Meson] is an open source build system meant to be both
extremely fast, and, even more importantly, as user friendly as possible. It
uses https://ninja-build.org[Ninja] as a companion tool to perform the actual
build operations.
Let's see how to write a +.mk+ file for a Meson-based package, with an example:
------------------------------
01: ################################################################################
02: #
03: # foo
04: #
05: ################################################################################
06:
07: FOO_VERSION = 1.0
08: FOO_SOURCE = foo-$(FOO_VERSION).tar.gz
09: FOO_SITE = http://www.foosoftware.org/download
10: FOO_LICENSE = GPL-3.0+
11: FOO_LICENSE_FILES = COPYING
12: FOO_INSTALL_STAGING = YES
13:
14: FOO_DEPENDENCIES = host-pkgconf bar
15:
16: ifeq ($(BR2_PACKAGE_BAZ),y)
17: FOO_CONF_OPTS += -Dbaz=true
18: FOO_DEPENDENCIES += baz
19: else
20: FOO_CONF_OPTS += -Dbaz=false
21: endif
22:
23: $(eval $(meson-package))
--------------------------------
The Makefile starts with the definition of the standard variables for package
declaration (lines 7 to 11).
On line line 23, we invoke the +meson-package+ macro that generates all the
Makefile rules that actually allows the package to be built.
In the example, +host-pkgconf+ and +bar+ are declared as dependencies in
+FOO_DEPENDENCIES+ at line 14 because the Meson build file of +foo+ uses
`pkg-config` to determine the compilation flags and libraries of package +bar+.
Note that it is not necessary to add +host-meson+ in the +FOO_DEPENDENCIES+
variable of a package, since this basic dependency is automatically added as
needed by the Meson package infrastructure.
If the "baz" package is selected, then support for the "baz" feature in "foo" is
activated by adding +-Dbaz=true+ to +FOO_CONF_OPTS+ at line 17, as specified in
the +meson_options.txt+ file in "foo" source tree. The "baz" package is also
added to +FOO_DEPENDENCIES+. Note that the support for +baz+ is explicitly
disabled at line 20, if the package is not selected.
To sum it up, to add a new meson-based package, the Makefile example can be
copied verbatim then edited to replace all occurences of +FOO+ with the
uppercase name of the new package and update the values of the standard
variables.
[[meson-package-reference]]
==== +meson-package+ reference
The main macro of the Meson package infrastructure is +meson-package+. It is
similar to the +generic-package+ macro. The ability to have target and host
packages is also available, with the +host-meson-package+ macro.
Just like the generic infrastructure, the Meson infrastructure works by defining
a number of variables before calling the +meson-package+ macro.
First, all the package metadata information variables that exist in the generic
infrastructure also exist in the Meson infrastructure: +FOO_VERSION+,
+FOO_SOURCE+, +FOO_PATCH+, +FOO_SITE+, +FOO_SUBDIR+, +FOO_DEPENDENCIES+,
+FOO_INSTALL_STAGING+, +FOO_INSTALL_TARGET+.
A few additional variables, specific to the Meson infrastructure, can also be
defined. Many of them are only useful in very specific cases, typical packages
will therefore only use a few of them.
* +FOO_SUBDIR+ may contain the name of a subdirectory inside the
package that contains the main meson.build file. This is useful,
if for example, the main meson.build file is not at the root of
the tree extracted by the tarball. If +HOST_FOO_SUBDIR+ is not
specified, it defaults to +FOO_SUBDIR+.
* +FOO_CONF_ENV+, to specify additional environment variables to pass to
+meson+ for the configuration step. By default, empty.
* +FOO_CONF_OPTS+, to specify additional options to pass to +meson+ for the
configuration step. By default, empty.
* +FOO_CFLAGS+, to specify compiler arguments added to the package specific
+cross-compile.conf+ file +c_args+ property. By default, the value of
+TARGET_CFLAGS+.
* +FOO_CXXFLAGS+, to specify compiler arguments added to the package specific
+cross-compile.conf+ file +cpp_args+ property. By default, the value of
+TARGET_CXXFLAGS+.
* +FOO_LDFLAGS+, to specify compiler arguments added to the package specific
+cross-compile.conf+ file +c_link_args+ and +cpp_link_args+ properties. By
default, the value of +TARGET_LDFLAGS+.
* +FOO_MESON_EXTRA_BINARIES+, to specify a space-separated list of programs
to add to the `[binaries]` section of the meson `cross-compilation.conf`
configuration file. The format is `program-name='/path/to/program'`, with
no space around the +=+ sign, and with the path of the program between
single quotes. By default, empty. Note that Buildroot already sets the
correct values for +c+, +cpp+, +ar+, +strip+, and +pkgconfig+.
* +FOO_MESON_EXTRA_PROPERTIES+, to specify a space-separated list of
properties to add to the `[properties]` section of the meson
`cross-compilation.conf` configuration file. The format is
`property-name=<value>` with no space around the +=+ sign, and with
single quotes around string values. By default, empty. Note that
Buildroot already sets values for +needs_exe_wrapper+, +c_args+,
+c_link_args+, +cpp_args+, +cpp_link_args+, +sys_root+, and
+pkg_config_libdir+.
* +FOO_NINJA_ENV+, to specify additional environment variables to pass to
+ninja+, meson companion tool in charge of the build operations. By default,
empty.
* +FOO_NINJA_OPTS+, to specify a space-separated list of targets to build. By
default, empty, to build the default target(s).

View File

@@ -1,129 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for Perl/CPAN packages
[[perl-package-tutorial]]
==== +perl-package+ tutorial
First, let's see how to write a +.mk+ file for a Perl/CPAN package,
with an example :
------------------------
01: ################################################################################
02: #
03: # perl-foo-bar
04: #
05: ################################################################################
06:
07: PERL_FOO_BAR_VERSION = 0.02
08: PERL_FOO_BAR_SOURCE = Foo-Bar-$(PERL_FOO_BAR_VERSION).tar.gz
09: PERL_FOO_BAR_SITE = $(BR2_CPAN_MIRROR)/authors/id/M/MO/MONGER
10: PERL_FOO_BAR_DEPENDENCIES = perl-strictures
11: PERL_FOO_BAR_LICENSE = Artistic or GPL-1.0+
12: PERL_FOO_BAR_LICENSE_FILES = LICENSE
13: PERL_FOO_BAR_DISTNAME = Foo-Bar
14:
15: $(eval $(perl-package))
------------------------
On line 7, we declare the version of the package.
On line 8 and 9, we declare the name of the tarball and the location
of the tarball on a CPAN server. Buildroot will automatically download
the tarball from this location.
On line 10, we declare our dependencies, so that they are built
before the build process of our package starts.
On line 11 and 12, we give licensing details about the package (its
license on line 11, and the file containing the license text on line
12).
On line 13, the name of the distribution as needed by the script
+utils/scancpan+ (in order to regenerate/upgrade these package files).
Finally, on line 15, we invoke the +perl-package+ macro that
generates all the Makefile rules that actually allow the package to be
built.
Most of these data can be retrieved from https://metacpan.org/.
So, this file and the Config.in can be generated by running
the script +utils/scancpan Foo-Bar+ in the Buildroot directory
(or in a br2-external tree).
This script creates a Config.in file and foo-bar.mk file for the
requested package, and also recursively for all dependencies specified by
CPAN. You should still manually edit the result. In particular, the
following things should be checked.
* If the perl module links with a shared library that is provided by
another (non-perl) package, this dependency is not added automatically.
It has to be added manually to +PERL_FOO_BAR_DEPENDENCIES+.
* The +package/Config.in+ file has to be updated manually to include the
generated Config.in files. As a hint, the +scancpan+ script prints out
the required +source "..."+ statements, sorted alphabetically.
[[perl-package-reference]]
==== +perl-package+ reference
As a policy, packages that provide Perl/CPAN modules should all be
named +perl-<something>+ in Buildroot.
This infrastructure handles various Perl build systems :
+ExtUtils-MakeMaker+ (EUMM), +Module-Build+ (MB) and +Module-Build-Tiny+.
+Build.PL+ is preferred by default when a package provides a +Makefile.PL+
and a +Build.PL+.
The main macro of the Perl/CPAN package infrastructure is
+perl-package+. It is similar to the +generic-package+ macro. The ability to
have target and host packages is also available, with the
+host-perl-package+ macro.
Just like the generic infrastructure, the Perl/CPAN infrastructure
works by defining a number of variables before calling the
+perl-package+ macro.
First, all the package metadata information variables that exist in the
generic infrastructure also exist in the Perl/CPAN infrastructure:
+PERL_FOO_VERSION+, +PERL_FOO_SOURCE+,
+PERL_FOO_PATCH+, +PERL_FOO_SITE+,
+PERL_FOO_SUBDIR+, +PERL_FOO_DEPENDENCIES+,
+PERL_FOO_INSTALL_TARGET+.
Note that setting +PERL_FOO_INSTALL_STAGING+ to +YES+ has no effect
unless a +PERL_FOO_INSTALL_STAGING_CMDS+ variable is defined. The perl
infrastructure doesn't define these commands since Perl modules generally
don't need to be installed to the +staging+ directory.
A few additional variables, specific to the Perl/CPAN infrastructure,
can also be defined. Many of them are only useful in very specific
cases, typical packages will therefore only use a few of them.
* +PERL_FOO_PREFER_INSTALLER+/+HOST_PERL_FOO_PREFER_INSTALLER+,
specifies the preferred installation method. Possible values are
+EUMM+ (for +Makefile.PL+ based installation using
+ExtUtils-MakeMaker+) and +MB+ (for +Build.PL+ based installation
using +Module-Build+). This variable is only used when the package
provides both installation methods.
* +PERL_FOO_CONF_ENV+/+HOST_PERL_FOO_CONF_ENV+, to specify additional
environment variables to pass to the +perl Makefile.PL+ or +perl Build.PL+.
By default, empty.
* +PERL_FOO_CONF_OPTS+/+HOST_PERL_FOO_CONF_OPTS+, to specify additional
configure options to pass to the +perl Makefile.PL+ or +perl Build.PL+.
By default, empty.
* +PERL_FOO_BUILD_OPTS+/+HOST_PERL_FOO_BUILD_OPTS+, to specify additional
options to pass to +make pure_all+ or +perl Build build+ in the build step.
By default, empty.
* +PERL_FOO_INSTALL_TARGET_OPTS+, to specify additional options to
pass to +make pure_install+ or +perl Build install+ in the install step.
By default, empty.
* +HOST_PERL_FOO_INSTALL_OPTS+, to specify additional options to
pass to +make pure_install+ or +perl Build install+ in the install step.
By default, empty.

View File

@@ -1,261 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for Python packages
This infrastructure applies to Python packages that use the standard
Python setuptools mechanism as their build system, generally
recognizable by the usage of a +setup.py+ script.
[[python-package-tutorial]]
==== +python-package+ tutorial
First, let's see how to write a +.mk+ file for a Python package,
with an example :
------------------------
01: ################################################################################
02: #
03: # python-foo
04: #
05: ################################################################################
06:
07: PYTHON_FOO_VERSION = 1.0
08: PYTHON_FOO_SOURCE = python-foo-$(PYTHON_FOO_VERSION).tar.xz
09: PYTHON_FOO_SITE = http://www.foosoftware.org/download
10: PYTHON_FOO_LICENSE = BSD-3-Clause
11: PYTHON_FOO_LICENSE_FILES = LICENSE
12: PYTHON_FOO_ENV = SOME_VAR=1
13: PYTHON_FOO_DEPENDENCIES = libmad
14: PYTHON_FOO_SETUP_TYPE = distutils
15:
16: $(eval $(python-package))
------------------------
On line 7, we declare the version of the package.
On line 8 and 9, we declare the name of the tarball (xz-ed tarball
recommended) and the location of the tarball on the Web. Buildroot
will automatically download the tarball from this location.
On line 10 and 11, we give licensing details about the package (its
license on line 10, and the file containing the license text on line
11).
On line 12, we tell Buildroot to pass custom options to the Python
+setup.py+ script when it is configuring the package.
On line 13, we declare our dependencies, so that they are built
before the build process of our package starts.
On line 14, we declare the specific Python build system being used. In
this case the +distutils+ Python build system is used. The two
supported ones are +distutils+ and +setuptools+.
Finally, on line 16, we invoke the +python-package+ macro that
generates all the Makefile rules that actually allow the package to be
built.
[[python-package-reference]]
==== +python-package+ reference
As a policy, packages that merely provide Python modules should all be
named +python-<something>+ in Buildroot. Other packages that use the
Python build system, but are not Python modules, can freely choose
their name (existing examples in Buildroot are +scons+ and
+supervisor+).
Packages that are only compatible with one version of Python (as in:
Python 2 or Python 3) should depend on that version explicitely in
their +Config.in+ file (+BR2_PACKAGE_PYTHON+ for Python 2,
+BR2_PACKAGE_PYTHON3+ for Python 3). Packages that are compatible
with both versions should not explicitely depend on them in their
+Config.in+ file, since that condition is already expressed for the
whole "External python modules" menu.
The main macro of the Python package infrastructure is
+python-package+. It is similar to the +generic-package+ macro. It is
also possible to create Python host packages with the
+host-python-package+ macro.
Just like the generic infrastructure, the Python infrastructure works
by defining a number of variables before calling the +python-package+
or +host-python-package+ macros.
All the package metadata information variables that exist in the
xref:generic-package-reference[generic package infrastructure] also
exist in the Python infrastructure: +PYTHON_FOO_VERSION+,
+PYTHON_FOO_SOURCE+, +PYTHON_FOO_PATCH+, +PYTHON_FOO_SITE+,
+PYTHON_FOO_SUBDIR+, +PYTHON_FOO_DEPENDENCIES+, +PYTHON_FOO_LICENSE+,
+PYTHON_FOO_LICENSE_FILES+, +PYTHON_FOO_INSTALL_STAGING+, etc.
Note that:
* It is not necessary to add +python+ or +host-python+ in the
+PYTHON_FOO_DEPENDENCIES+ variable of a package, since these basic
dependencies are automatically added as needed by the Python
package infrastructure.
* Similarly, it is not needed to add +host-setuptools+ and/or
+host-distutilscross+ dependencies to +PYTHON_FOO_DEPENDENCIES+ for
setuptools-based packages, since these are automatically added by
the Python infrastructure as needed.
One variable specific to the Python infrastructure is mandatory:
* +PYTHON_FOO_SETUP_TYPE+, to define which Python build system is used
by the package. The two supported values are +distutils+ and
+setuptools+. If you don't know which one is used in your package,
look at the +setup.py+ file in your package source code, and see
whether it imports things from the +distutils+ module or the
+setuptools+ module.
A few additional variables, specific to the Python infrastructure, can
optionally be defined, depending on the package's needs. Many of them
are only useful in very specific cases, typical packages will
therefore only use a few of them, or none.
* +PYTHON_FOO_SUBDIR+ may contain the name of a subdirectory inside the
package that contains the main +setup.py+ file. This is useful,
if for example, the main +setup.py+ file is not at the root of
the tree extracted by the tarball. If +HOST_PYTHON_FOO_SUBDIR+ is not
specified, it defaults to +PYTHON_FOO_SUBDIR+.
* +PYTHON_FOO_ENV+, to specify additional environment variables to
pass to the Python +setup.py+ script (for both the build and install
steps). Note that the infrastructure is automatically passing
several standard variables, defined in +PKG_PYTHON_DISTUTILS_ENV+
(for distutils target packages), +HOST_PKG_PYTHON_DISTUTILS_ENV+
(for distutils host packages), +PKG_PYTHON_SETUPTOOLS_ENV+ (for
setuptools target packages) and +HOST_PKG_PYTHON_SETUPTOOLS_ENV+
(for setuptools host packages).
* +PYTHON_FOO_BUILD_OPTS+, to specify additional options to pass to the
Python +setup.py+ script during the build step. For target distutils
packages, the +PKG_PYTHON_DISTUTILS_BUILD_OPTS+ options are already
passed automatically by the infrastructure.
* +PYTHON_FOO_INSTALL_TARGET_OPTS+, +PYTHON_FOO_INSTALL_STAGING_OPTS+,
+HOST_PYTHON_FOO_INSTALL_OPTS+ to specify additional options to pass
to the Python +setup.py+ script during the target installation step,
the staging installation step or the host installation,
respectively. Note that the infrastructure is automatically passing
some options, defined in +PKG_PYTHON_DISTUTILS_INSTALL_TARGET_OPTS+
or +PKG_PYTHON_DISTUTILS_INSTALL_STAGING_OPTS+ (for target distutils
packages), +HOST_PKG_PYTHON_DISTUTILS_INSTALL_OPTS+ (for host
distutils packages), +PKG_PYTHON_SETUPTOOLS_INSTALL_TARGET_OPTS+ or
+PKG_PYTHON_SETUPTOOLS_INSTALL_STAGING_OPTS+ (for target setuptools
packages) and +HOST_PKG_PYTHON_SETUPTOOLS_INSTALL_OPTS+ (for host
setuptools packages).
* +HOST_PYTHON_FOO_NEEDS_HOST_PYTHON+, to define the host python
interpreter. The usage of this variable is limited to host
packages. The two supported value are +python2+ and +python3+. It
will ensure the right host python package is available and will
invoke it for the build. If some build steps are overloaded, the
right python interpreter must be explicitly called in the commands.
With the Python infrastructure, all the steps required to build and
install the packages are already defined, and they generally work well
for most Python-based packages. However, when required, it is still
possible to customize what is done in any particular step:
* By adding a post-operation hook (after extract, patch, configure,
build or install). See xref:hooks[] for details.
* By overriding one of the steps. For example, even if the Python
infrastructure is used, if the package +.mk+ file defines its own
+PYTHON_FOO_BUILD_CMDS+ variable, it will be used instead of the
default Python one. However, using this method should be restricted
to very specific cases. Do not use it in the general case.
[[scanpypi]]
==== Generating a +python-package+ from a PyPI repository
If the Python package for which you would like to create a Buildroot
package is available on PyPI, you may want to use the +scanpypi+ tool
located in +utils/+ to automate the process.
You can find the list of existing PyPI packages
https://pypi.python.org[here].
+scanpypi+ requires Python's +setuptools+ package to be installed on
your host.
When at the root of your buildroot directory just do :
-----------------------
utils/scanpypi foo bar -o package
-----------------------
This will generate packages +python-foo+ and +python-bar+ in the package
folder if they exist on https://pypi.python.org.
Find the +external python modules+ menu and insert your package inside.
Keep in mind that the items inside a menu should be in alphabetical order.
Please keep in mind that you'll most likely have to manually check the
package for any mistakes as there are things that cannot be guessed by
the generator (e.g. dependencies on any of the python core modules
such as BR2_PACKAGE_PYTHON_ZLIB). Also, please take note that the
license and license files are guessed and must be checked. You also
need to manually add the package to the +package/Config.in+ file.
If your Buildroot package is not in the official Buildroot tree but in
a br2-external tree, use the -o flag as follows:
-----------------------
utils/scanpypi foo bar -o other_package_dir
-----------------------
This will generate packages +python-foo+ and +python-bar+ in the
+other_package_directory+ instead of +package+.
Option +-h+ will list the available options:
-----------------------
utils/scanpypi -h
-----------------------
[[python-package-cffi-backend]]
==== +python-package+ CFFI backend
C Foreign Function Interface for Python (CFFI) provides a convenient
and reliable way to call compiled C code from Python using interface
declarations written in C. Python packages relying on this backend can
be identified by the appearance of a +cffi+ dependency in the
+install_requires+ field of their +setup.py+ file.
Such a package should:
* add +python-cffi+ as a runtime dependency in order to install the
compiled C library wrapper on the target. This is achieved by adding
+select BR2_PACKAGE_PYTHON_CFFI+ to the package +Config.in+.
------------------------
config BR2_PACKAGE_PYTHON_FOO
bool "python-foo"
select BR2_PACKAGE_PYTHON_CFFI # runtime
------------------------
* add +host-python-cffi+ as a build-time dependency in order to
cross-compile the C wrapper. This is achieved by adding
+host-python-cffi+ to the +PYTHON_FOO_DEPENDENCIES+ variable.
------------------------
################################################################################
#
# python-foo
#
################################################################################
...
PYTHON_FOO_DEPENDENCIES = host-python-cffi
$(eval $(python-package))
------------------------

View File

@@ -1,81 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for QMake-based packages
[[qmake-package-tutorial]]
==== +qmake-package+ tutorial
First, let's see how to write a +.mk+ file for a QMake-based package, with
an example :
------------------------
01: ################################################################################
02: #
03: # libfoo
04: #
05: ################################################################################
06:
07: LIBFOO_VERSION = 1.0
08: LIBFOO_SOURCE = libfoo-$(LIBFOO_VERSION).tar.gz
09: LIBFOO_SITE = http://www.foosoftware.org/download
10: LIBFOO_CONF_OPTS = QT_CONFIG+=bar QT_CONFIG-=baz
11: LIBFOO_DEPENDENCIES = bar
12:
13: $(eval $(qmake-package))
------------------------
On line 7, we declare the version of the package.
On line 8 and 9, we declare the name of the tarball (xz-ed tarball
recommended) and the location of the tarball on the Web. Buildroot
will automatically download the tarball from this location.
On line 10, we tell Buildroot what options to enable for libfoo.
On line 11, we tell Buildroot the depednencies of libfoo.
Finally, on line line 13, we invoke the +qmake-package+
macro that generates all the Makefile rules that actually allows the
package to be built.
[[qmake-package-reference]]
==== +qmake-package+ reference
The main macro of the QMake package infrastructure is +qmake-package+.
It is similar to the +generic-package+ macro.
Just like the generic infrastructure, the QMake infrastructure works
by defining a number of variables before calling the +qmake-package+
macro.
First, all the package metadata information variables that exist in
the generic infrastructure also exist in the QMake infrastructure:
+LIBFOO_VERSION+, +LIBFOO_SOURCE+, +LIBFOO_PATCH+, +LIBFOO_SITE+,
+LIBFOO_SUBDIR+, +LIBFOO_DEPENDENCIES+, +LIBFOO_INSTALL_STAGING+,
+LIBFOO_INSTALL_TARGET+.
An additional variable, specific to the QMake infrastructure, can
also be defined.
* +LIBFOO_CONF_ENV+, to specify additional environment variables to
pass to the +qmake+ script for the configuration step. By default, empty.
* +LIBFOO_CONF_OPTS+, to specify additional options to pass to the
+qmake+ script for the configuration step. By default, empty.
* +LIBFOO_MAKE_ENV+, to specify additional environment variables to the
+make+ command during the build and install steps. By default, empty.
* +LIBFOO_MAKE_OPTS+, to specify additional targets to pass to the
+make+ command during the build step. By default, empty.
* +LIBFOO_INSTALL_STAGING_OPTS+, to specify additional targets to pass
to the +make+ command during the staging installation step. By default,
+install+.
* +LIBFOO_INSTALL_TARGET_OPTS+, to specify additional targets to pass
to the +make+ command during the target installation step. By default,
+install+.

View File

@@ -1,108 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for rebar-based packages
[[rebar-package-tutorial]]
==== +rebar-package+ tutorial
First, let's see how to write a +.mk+ file for a rebar-based package,
with an example :
------------------------------
01: ################################################################################
02: #
03: # erlang-foobar
04: #
05: ################################################################################
06:
07: ERLANG_FOOBAR_VERSION = 1.0
08: ERLANG_FOOBAR_SOURCE = erlang-foobar-$(ERLANG_FOOBAR_VERSION).tar.xz
09: ERLANG_FOOBAR_SITE = http://www.foosoftware.org/download
10: ERLANG_FOOBAR_DEPENDENCIES = host-libaaa libbbb
11:
12: $(eval $(rebar-package))
--------------------------------
On line 7, we declare the version of the package.
On line 8 and 9, we declare the name of the tarball (xz-ed tarball
recommended) and the location of the tarball on the Web. Buildroot
will automatically download the tarball from this location.
On line 10, we declare our dependencies, so that they are built
before the build process of our package starts.
Finally, on line 12, we invoke the +rebar-package+ macro that
generates all the Makefile rules that actually allows the package to
be built.
[[rebar-package-reference]]
==== +rebar-package+ reference
The main macro of the +rebar+ package infrastructure is
+rebar-package+. It is similar to the +generic-package+ macro. The
ability to have host packages is also available, with the
+host-rebar-package+ macro.
Just like the generic infrastructure, the +rebar+ infrastructure works
by defining a number of variables before calling the +rebar-package+
macro.
First, all the package metadata information variables that exist in
the generic infrastructure also exist in the +rebar+ infrastructure:
+ERLANG_FOOBAR_VERSION+, +ERLANG_FOOBAR_SOURCE+,
+ERLANG_FOOBAR_PATCH+, +ERLANG_FOOBAR_SITE+,
+ERLANG_FOOBAR_SUBDIR+, +ERLANG_FOOBAR_DEPENDENCIES+,
+ERLANG_FOOBAR_INSTALL_STAGING+, +ERLANG_FOOBAR_INSTALL_TARGET+,
+ERLANG_FOOBAR_LICENSE+ and +ERLANG_FOOBAR_LICENSE_FILES+.
A few additional variables, specific to the +rebar+ infrastructure,
can also be defined. Many of them are only useful in very specific
cases, typical packages will therefore only use a few of them.
* +ERLANG_FOOBAR_USE_AUTOCONF+, to specify that the package uses
_autoconf_ at the configuration step. When a package sets this
variable to +YES+, the +autotools+ infrastructure is used.
+
.Note
You can also use some of the variables from the +autotools+
infrastructure: +ERLANG_FOOBAR_CONF_ENV+, +ERLANG_FOOBAR_CONF_OPTS+,
+ERLANG_FOOBAR_AUTORECONF+, +ERLANG_FOOBAR_AUTORECONF_ENV+ and
+ERLANG_FOOBAR_AUTORECONF_OPTS+.
* +ERLANG_FOOBAR_USE_BUNDLED_REBAR+, to specify that the package has
a bundled version of _rebar_ *and* that it shall be used. Valid
values are +YES+ or +NO+ (the default).
+
.Note
If the package bundles a _rebar_ utility, but can use the generic
one that Buildroot provides, just say +NO+ (i.e., do not specify
this variable). Only set if it is mandatory to use the _rebar_
utility bundled in this package.
* +ERLANG_FOOBAR_REBAR_ENV+, to specify additional environment
variables to pass to the _rebar_ utility.
* +ERLANG_FOOBAR_KEEP_DEPENDENCIES+, to keep the dependencies
described in the rebar.config file. Valid values are +YES+ or +NO+
(the default). Unless this variable is set to +YES+, the _rebar_
infrastructure removes such dependencies in a post-patch hook to
ensure rebar does not download nor compile them.
With the rebar infrastructure, all the steps required to build
and install the packages are already defined, and they generally work
well for most rebar-based packages. However, when required, it is
still possible to customize what is done in any particular step:
* By adding a post-operation hook (after extract, patch, configure,
build or install). See xref:hooks[] for details.
* By overriding one of the steps. For example, even if the rebar
infrastructure is used, if the package +.mk+ file defines its
own +ERLANG_FOOBAR_BUILD_CMDS+ variable, it will be used instead
of the default rebar one. However, using this method should be
restricted to very specific cases. Do not use it in the general
case.

View File

@@ -1,206 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Tips and tricks
[[package-name-variable-relation]]
==== Package name, config entry name and makefile variable relationship
In Buildroot, there is some relationship between:
* the _package name_, which is the package directory name (and the
name of the +*.mk+ file);
* the config entry name that is declared in the +Config.in+ file;
* the makefile variable prefix.
It is mandatory to maintain consistency between these elements,
using the following rules:
* the package directory and the +*.mk+ name are the _package name_
itself (e.g.: +package/foo-bar_boo/foo-bar_boo.mk+);
* the _make_ target name is the _package name_ itself (e.g.:
+foo-bar_boo+);
* the config entry is the upper case _package name_ with `.` and `-`
characters substituted with `_`, prefixed with +BR2_PACKAGE_+ (e.g.:
+BR2_PACKAGE_FOO_BAR_BOO+);
* the +*.mk+ file variable prefix is the upper case _package name_
with `.` and `-` characters substituted with `_` (e.g.:
+FOO_BAR_BOO_VERSION+).
[[check-package]]
==== How to check the coding style
Buildroot provides a script in +utils/check-package+ that checks new or
changed files for coding style. It is not a complete language validator,
but it catches many common mistakes. It is meant to run in the actual
files you created or modified, before creating the patch for submission.
This script can be used for packages, filesystem makefiles, Config.in
files, etc. It does not check the files defining the package
infrastructures and some other files containing similar common code.
To use it, run the +check-package+ script, by telling which files you
created or changed:
----
$ ./utils/check-package package/new-package/*
----
If you have the +utils+ directory in your path you can also run:
----
$ cd package/new-package/
$ check-package *
----
The tool can also be used for packages in a br2-external:
----
$ check-package -b /path/to/br2-ext-tree/package/my-package/*
----
[[testing-package]]
==== How to test your package
Once you have added your new package, it is important that you test it
under various conditions: does it build for all architectures? Does it
build with the different C libraries? Does it need threads, NPTL? And
so on...
Buildroot runs http://autobuild.buildroot.org/[autobuilders] which
continuously test random configurations. However, these only build the
`master` branch of the git tree, and your new fancy package is not yet
there.
Buildroot provides a script in +utils/test-pkg+ that uses the same base
configurations as used by the autobuilders so you can test your package
in the same conditions.
First, create a config snippet that contains all the necessary options
needed to enable your package, but without any architecture or toolchain
option. For example, let's create a config snippet that just enables
+libcurl+, without any TLS backend:
----
$ cat libcurl.config
BR2_PACKAGE_LIBCURL=y
----
If your package needs more configuration options, you can add them to the
config snippet. For example, here's how you would test +libcurl+ with
+openssl+ as a TLS backend and the +curl+ program:
----
$ cat libcurl.config
BR2_PACKAGE_LIBCURL=y
BR2_PACKAGE_LIBCURL_CURL=y
BR2_PACKAGE_OPENSSL=y
----
Then run the +test-pkg+ script, by telling it what config snippet to use
and what package to test:
----
$ ./utils/test-pkg -c libcurl.config -p libcurl
----
By default, +test-pkg+ will build your package against a subset of the
toolchains used by the autobuilders, which has been selected by the
Buildroot developers as being the most useful and representative
subset. If you want to test all toolchains, pass the +-a+ option. Note
that in any case, internal toolchains are excluded as they take too
long to build.
The output lists all toolchains that are tested and the corresponding
result (excerpt, results are fake):
----
$ ./utils/test-pkg -c libcurl.config -p libcurl
armv5-ctng-linux-gnueabi [ 1/11]: OK
armv7-ctng-linux-gnueabihf [ 2/11]: OK
br-aarch64-glibc [ 3/11]: SKIPPED
br-arcle-hs38 [ 4/11]: SKIPPED
br-arm-basic [ 5/11]: FAILED
br-arm-cortex-a9-glibc [ 6/11]: OK
br-arm-cortex-a9-musl [ 7/11]: FAILED
br-arm-cortex-m4-full [ 8/11]: OK
br-arm-full [ 9/11]: OK
br-arm-full-nothread [10/11]: FAILED
br-arm-full-static [11/11]: OK
11 builds, 2 skipped, 2 build failed, 1 legal-info failed
----
The results mean:
* `OK`: the build was successful.
* `SKIPPED`: one or more configuration options listed in the config
snippet were not present in the final configuration. This is due to
options having dependencies not satisfied by the toolchain, such as
for example a package that +depends on BR2_USE_MMU+ with a noMMU
toolchain. The missing options are reported in +missing.config+ in
the output build directory (+~/br-test-pkg/TOOLCHAIN_NAME/+ by
default).
* `FAILED`: the build failed. Inspect the +logfile+ file in the output
build directory to see what went wrong:
** the actual build failed,
** the legal-info failed,
** one of the preliminary steps (downloading the config file, applying
the configuration, running `dirclean` for the package) failed.
When there are failures, you can just re-run the script with the same
options (after you fixed your package); the script will attempt to
re-build the package specified with +-p+ for all toolchains, without
the need to re-build all the dependencies of that package.
The +test-pkg+ script accepts a few options, for which you can get some
help by running:
----
$ ./utils/test-pkg -h
----
[[github-download-url]]
==== How to add a package from GitHub
Packages on GitHub often don't have a download area with release tarballs.
However, it is possible to download tarballs directly from the repository
on GitHub. As GitHub is known to have changed download mechanisms in the
past, the 'github' helper function should be used as shown below.
------------------------
# Use a tag or a full commit ID
FOO_VERSION = v1.0
FOO_SITE = $(call github,<user>,<package>,$(FOO_VERSION))
------------------------
.Notes
- The FOO_VERSION can either be a tag or a commit ID.
- The tarball name generated by github matches the default one from
Buildroot (e.g.: +foo-f6fb6654af62045239caed5950bc6c7971965e60.tar.gz+),
so it is not necessary to specify it in the +.mk+ file.
- When using a commit ID as version, you should use the full 40 hex characters.
If the package you wish to add does have a release section on GitHub, the
maintainer may have uploaded a release tarball, or the release may just point
to the automatically generated tarball from the git tag. If there is a
release tarball uploaded by the maintainer, we prefer to use that since it
may be slightly different (e.g. it contains a configure script so we don't
need to do AUTORECONF).
You can see on the release page if it's an uploaded tarball or a git tag:
image::github_hash_mongrel2.png[]
- If it looks like the image above then it was uploaded by the
maintainer and you should use that link (in that example:
'mongrel2-v1.9.2.tar.bz2') to specify +FOO_SITE+, and not use the
'github' helper.
- On the other hand, if there's is *only* the "Source code" link, then
it's an automatically generated tarball and you should use the
'github' helper function.

View File

@@ -1,144 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for virtual packages
[[virtual-package-tutorial]]
In Buildroot, a virtual package is a package whose functionalities are
provided by one or more packages, referred to as 'providers'. The virtual
package management is an extensible mechanism allowing the user to choose
the provider used in the rootfs.
For example, 'OpenGL ES' is an API for 2D and 3D graphics on embedded systems.
The implementation of this API is different for the 'Allwinner Tech Sunxi' and
the 'Texas Instruments OMAP35xx' platforms. So +libgles+ will be a virtual
package and +sunxi-mali+ and +ti-gfx+ will be the providers.
==== +virtual-package+ tutorial
In the following example, we will explain how to add a new virtual package
('something-virtual') and a provider for it ('some-provider').
First, let's create the virtual package.
==== Virtual package's +Config.in+ file
The +Config.in+ file of virtual package 'something-virtual' should contain:
---------------------------
01: config BR2_PACKAGE_HAS_SOMETHING_VIRTUAL
02: bool
03:
04: config BR2_PACKAGE_PROVIDES_SOMETHING_VIRTUAL
05: depends on BR2_PACKAGE_HAS_SOMETHING_VIRTUAL
06: string
---------------------------
In this file, we declare two options, +BR2_PACKAGE_HAS_SOMETHING_VIRTUAL+ and
+BR2_PACKAGE_PROVIDES_SOMETHING_VIRTUAL+, whose values will be used by the
providers.
==== Virtual package's +.mk+ file
The +.mk+ for the virtual package should just evaluate the +virtual-package+ macro:
---------------------------
01: ################################################################################
02: #
03: # something-virtual
04: #
05: ################################################################################
06:
07: $(eval $(virtual-package))
---------------------------
The ability to have target and host packages is also available, with the
+host-virtual-package+ macro.
==== Provider's +Config.in+ file
When adding a package as a provider, only the +Config.in+ file requires some
modifications.
The +Config.in+ file of the package 'some-provider', which provides the
functionalities of 'something-virtual', should contain:
---------------------------
01: config BR2_PACKAGE_SOME_PROVIDER
02: bool "some-provider"
03: select BR2_PACKAGE_HAS_SOMETHING_VIRTUAL
04: help
05: This is a comment that explains what some-provider is.
06:
07: http://foosoftware.org/some-provider/
08:
09: if BR2_PACKAGE_SOME_PROVIDER
10: config BR2_PACKAGE_PROVIDES_SOMETHING_VIRTUAL
11: default "some-provider"
12: endif
---------------------------
On line 3, we select +BR2_PACKAGE_HAS_SOMETHING_VIRTUAL+, and on line 11, we
set the value of +BR2_PACKAGE_PROVIDES_SOMETHING_VIRTUAL+ to the name of the
provider, but only if it is selected.
==== Provider's +.mk+ file
The +.mk+ file should also declare an additional variable
+SOME_PROVIDER_PROVIDES+ to contain the names of all the virtual
packages it is an implementation of:
---------------------------
01: SOME_PROVIDER_PROVIDES = something-virtual
---------------------------
Of course, do not forget to add the proper build and runtime dependencies for
this package!
==== Notes on depending on a virtual package
When adding a package that requires a certain +FEATURE+ provided by a virtual
package, you have to use +depends on BR2_PACKAGE_HAS_FEATURE+, like so:
---------------------------
config BR2_PACKAGE_HAS_FEATURE
bool
config BR2_PACKAGE_FOO
bool "foo"
depends on BR2_PACKAGE_HAS_FEATURE
---------------------------
==== Notes on depending on a specific provider
If your package really requires a specific provider, then you'll have to
make your package +depends on+ this provider; you can _not_ +select+ a
provider.
Let's take an example with two providers for a +FEATURE+:
---------------------------
config BR2_PACKAGE_HAS_FEATURE
bool
config BR2_PACKAGE_FOO
bool "foo"
select BR2_PACKAGE_HAS_FEATURE
config BR2_PACKAGE_BAR
bool "bar"
select BR2_PACKAGE_HAS_FEATURE
---------------------------
And you are adding a package that needs +FEATURE+ as provided by +foo+,
but not as provided by +bar+.
If you were to use +select BR2_PACKAGE_FOO+, then the user would still
be able to select +BR2_PACKAGE_BAR+ in the menuconfig. This would create
a configuration inconsistency, whereby two providers of the same +FEATURE+
would be enabled at once, one explicitly set by the user, the other
implicitly by your +select+.
Instead, you have to use +depends on BR2_PACKAGE_FOO+, which avoids any
implicit configuration inconsistency.

View File

@@ -1,91 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Infrastructure for Waf-based packages
[[waf-package-tutorial]]
==== +waf-package+ tutorial
First, let's see how to write a +.mk+ file for a Waf-based package, with
an example :
------------------------
01: ################################################################################
02: #
03: # libfoo
04: #
05: ################################################################################
06:
07: LIBFOO_VERSION = 1.0
08: LIBFOO_SOURCE = libfoo-$(LIBFOO_VERSION).tar.gz
09: LIBFOO_SITE = http://www.foosoftware.org/download
10: LIBFOO_CONF_OPTS = --enable-bar --disable-baz
11: LIBFOO_DEPENDENCIES = bar
12:
13: $(eval $(waf-package))
------------------------
On line 7, we declare the version of the package.
On line 8 and 9, we declare the name of the tarball (xz-ed tarball
recommended) and the location of the tarball on the Web. Buildroot
will automatically download the tarball from this location.
On line 10, we tell Buildroot what options to enable for libfoo.
On line 11, we tell Buildroot the depednencies of libfoo.
Finally, on line line 13, we invoke the +waf-package+
macro that generates all the Makefile rules that actually allows the
package to be built.
[[waf-package-reference]]
==== +waf-package+ reference
The main macro of the Waf package infrastructure is +waf-package+.
It is similar to the +generic-package+ macro.
Just like the generic infrastructure, the Waf infrastructure works
by defining a number of variables before calling the +waf-package+
macro.
First, all the package metadata information variables that exist in
the generic infrastructure also exist in the Waf infrastructure:
+LIBFOO_VERSION+, +LIBFOO_SOURCE+, +LIBFOO_PATCH+, +LIBFOO_SITE+,
+LIBFOO_SUBDIR+, +LIBFOO_DEPENDENCIES+, +LIBFOO_INSTALL_STAGING+,
+LIBFOO_INSTALL_TARGET+.
An additional variable, specific to the Waf infrastructure, can
also be defined.
* +LIBFOO_SUBDIR+ may contain the name of a subdirectory inside the
package that contains the main wscript file. This is useful,
if for example, the main wscript file is not at the root of
the tree extracted by the tarball. If +HOST_LIBFOO_SUBDIR+ is not
specified, it defaults to +LIBFOO_SUBDIR+.
* +LIBFOO_NEEDS_EXTERNAL_WAF+ can be set to +YES+ or +NO+ to tell
Buildroot to use the bundled +waf+ executable. If set to +NO+, the
default, then Buildroot will use the waf executable provided in the
package source tree; if set to +YES+, then Buildroot will download,
install waf as a host tool and use it to build the package.
* +LIBFOO_WAF_OPTS+, to specify additional options to pass to the
+waf+ script at every step of the package build process: configure,
build and installation. By default, empty.
* +LIBFOO_CONF_OPTS+, to specify additional options to pass to the
+waf+ script for the configuration step. By default, empty.
* +LIBFOO_BUILD_OPTS+, to specify additional options to pass to the
+waf+ script during the build step. By default, empty.
* +LIBFOO_INSTALL_STAGING_OPTS+, to specify additional options to pass
to the +waf+ script during the staging installation step. By default,
empty.
* +LIBFOO_INSTALL_TARGET_OPTS+, to specify additional options to pass
to the +waf+ script during the target installation step. By default,
empty.

View File

@@ -1,58 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[adding-packages]]
== Adding new packages to Buildroot
This section covers how new packages (userspace libraries or
applications) can be integrated into Buildroot. It also shows how
existing packages are integrated, which is needed for fixing issues or
tuning their configuration.
When you add a new package, be sure to test it in various conditions
(see xref:testing-package[]) and also check it for coding style (see
xref:check-package[]).
include::adding-packages-directory.txt[]
include::adding-packages-generic.txt[]
include::adding-packages-autotools.txt[]
include::adding-packages-cmake.txt[]
include::adding-packages-python.txt[]
include::adding-packages-luarocks.txt[]
include::adding-packages-perl.txt[]
include::adding-packages-virtual.txt[]
include::adding-packages-kconfig.txt[]
include::adding-packages-rebar.txt[]
include::adding-packages-waf.txt[]
include::adding-packages-meson.txt[]
include::adding-packages-cargo.txt[]
include::adding-packages-golang.txt[]
include::adding-packages-qmake.txt[]
include::adding-packages-kernel-module.txt[]
include::adding-packages-asciidoc.txt[]
include::adding-packages-linux-kernel-spec-infra.txt[]
include::adding-packages-hooks.txt[]
include::adding-packages-gettext.txt[]
include::adding-packages-tips.txt[]
include::adding-packages-conclusion.txt[]

View File

@@ -1,16 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Advanced usage
include::using-buildroot-toolchain.txt[]
include::using-buildroot-debugger.txt[]
include::ccache-support.txt[]
include::download-location.txt[]
include::package-make-target.txt[]
include::using-buildroot-development.txt[]

View File

@@ -1,6 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
include::makedev-syntax.txt[]
include::makeusers-syntax.txt[]
include::migrating.txt[]

View File

@@ -1,66 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
== Beyond Buildroot
=== Boot the generated images
==== NFS boot
To achieve NFS-boot, enable _tar root filesystem_ in the _Filesystem
images_ menu.
After a complete build, just run the following commands to setup the
NFS-root directory:
-------------------
sudo tar -xavf /path/to/output_dir/rootfs.tar -C /path/to/nfs_root_dir
-------------------
Remember to add this path to +/etc/exports+.
Then, you can execute a NFS-boot from your target.
==== Live CD
To build a live CD image, enable the _iso image_ option in the
_Filesystem images_ menu. Note that this option is only available on
the x86 and x86-64 architectures, and if you are building your kernel
with Buildroot.
You can build a live CD image with either IsoLinux, Grub or Grub 2 as
a bootloader, but only Isolinux supports making this image usable both
as a live CD and live USB (through the _Build hybrid image_ option).
You can test your live CD image using QEMU:
-------------------
qemu-system-i386 -cdrom output/images/rootfs.iso9660
-------------------
Or use it as a hard-drive image if it is a hybrid ISO:
-------------------
qemu-system-i386 -hda output/images/rootfs.iso9660
-------------------
It can be easily flashed to a USB drive with +dd+:
-------------------
dd if=output/images/rootfs.iso9660 of=/dev/sdb
-------------------
=== Chroot
If you want to chroot in a generated image, then there are few thing
you should be aware of:
* you should setup the new root from the _tar root filesystem_ image;
* either the selected target architecture is compatible with your host
machine, or you should use some +qemu-*+ binary and correctly set it
within the +binfmt+ properties to be able to run the binaries built
for the target on your host machine;
* Buildroot does not currently provide +host-qemu+ and +binfmt+
correctly built and set for that kind of use.

View File

@@ -1,55 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[ccache]]
==== Using +ccache+ in Buildroot
http://ccache.samba.org[ccache] is a compiler cache. It stores the
object files resulting from each compilation process, and is able to
skip future compilation of the same source file (with same compiler
and same arguments) by using the pre-existing object files. When doing
almost identical builds from scratch a number of times, it can nicely
speed up the build process.
+ccache+ support is integrated in Buildroot. You just have to enable
+Enable compiler cache+ in +Build options+. This will automatically
build +ccache+ and use it for every host and target compilation.
The cache is located in +$HOME/.buildroot-ccache+. It is stored
outside of Buildroot output directory so that it can be shared by
separate Buildroot builds. If you want to get rid of the cache, simply
remove this directory.
You can get statistics on the cache (its size, number of hits,
misses, etc.) by running +make ccache-stats+.
The make target +ccache-options+ and the +CCACHE_OPTIONS+ variable
provide more generic access to the ccache. For example
-----------------
# set cache limit size
make CCACHE_OPTIONS="--max-size=5G" ccache-options
# zero statistics counters
make CCACHE_OPTIONS="--zero-stats" ccache-options
-----------------
+ccache+ makes a hash of the source files and of the compiler options.
If a compiler option is different, the cached object file will not be
used. Many compiler options, however, contain an absolute path to the
staging directory. Because of this, building in a different output
directory would lead to many cache misses.
To avoid this issue, buildroot has the +Use relative paths+ option
(+BR2_CCACHE_USE_BASEDIR+). This will rewrite all absolute paths that
point inside the output directory into relative paths. Thus, changing
the output directory no longer leads to cache misses.
A disadvantage of the relative paths is that they also end up to be
relative paths in the object file. Therefore, for example, the debugger
will no longer find the file, unless you cd to the output directory
first.
See https://ccache.samba.org/manual.html#_compiling_in_different_directories[the
ccache manual's section on "Compiling in different directories"] for
more details about this rewriting of absolute paths.

View File

@@ -1,399 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
== General Buildroot usage
include::make-tips.txt[]
include::rebuilding-packages.txt[]
=== Offline builds
If you intend to do an offline build and just want to download
all sources that you previously selected in the configurator
('menuconfig', 'nconfig', 'xconfig' or 'gconfig'), then issue:
--------------------
$ make source
--------------------
You can now disconnect or copy the content of your +dl+
directory to the build-host.
=== Building out-of-tree
As default, everything built by Buildroot is stored in the directory
+output+ in the Buildroot tree.
Buildroot also supports building out of tree with a syntax similar to
the Linux kernel. To use it, add +O=<directory>+ to the make command
line:
--------------------
$ make O=/tmp/build
--------------------
Or:
--------------------
$ cd /tmp/build; make O=$PWD -C path/to/buildroot
--------------------
All the output files will be located under +/tmp/build+. If the +O+
path does not exist, Buildroot will create it.
*Note:* the +O+ path can be either an absolute or a relative path, but if it's
passed as a relative path, it is important to note that it is interpreted
relative to the main Buildroot source directory, *not* the current working
directory.
When using out-of-tree builds, the Buildroot +.config+ and temporary
files are also stored in the output directory. This means that you can
safely run multiple builds in parallel using the same source tree as
long as they use unique output directories.
For ease of use, Buildroot generates a Makefile wrapper in the output
directory - so after the first run, you no longer need to pass +O=<...>+
and +-C <...>+, simply run (in the output directory):
--------------------
$ make <target>
--------------------
[[env-vars]]
=== Environment variables
Buildroot also honors some environment variables, when they are passed
to +make+ or set in the environment:
* +HOSTCXX+, the host C++ compiler to use
* +HOSTCC+, the host C compiler to use
* +UCLIBC_CONFIG_FILE=<path/to/.config>+, path to
the uClibc configuration file, used to compile uClibc, if an
internal toolchain is being built.
+
Note that the uClibc configuration file can also be set from the
configuration interface, so through the Buildroot +.config+ file; this
is the recommended way of setting it.
+
* +BUSYBOX_CONFIG_FILE=<path/to/.config>+, path to
the BusyBox configuration file.
+
Note that the BusyBox configuration file can also be set from the
configuration interface, so through the Buildroot +.config+ file; this
is the recommended way of setting it.
+
* +BR2_CCACHE_DIR+ to override the directory where
Buildroot stores the cached files when using ccache.
+
* +BR2_DL_DIR+ to override the directory in which
Buildroot stores/retrieves downloaded files.
+
Note that the Buildroot download directory can also be set from the
configuration interface, so through the Buildroot +.config+ file. See
xref:download-location[] for more details on how you can set the download
directory.
* +BR2_GRAPH_ALT+, if set and non-empty, to use an alternate color-scheme in
build-time graphs
* +BR2_GRAPH_OUT+ to set the filetype of generated graphs, either +pdf+ (the
default), or +png+.
* +BR2_GRAPH_DEPS_OPTS+ to pass extra options to the dependency graph; see
xref:graph-depends[] for the accepted options
* +BR2_GRAPH_DOT_OPTS+ is passed verbatim as options to the +dot+ utility to
draw the dependency graph.
* +BR2_GRAPH_SIZE_OPTS+ to pass extra options to the size graph; see
xref:graph-size[] for the acepted options
An example that uses config files located in the toplevel directory and
in your $HOME:
--------------------
$ make UCLIBC_CONFIG_FILE=uClibc.config BUSYBOX_CONFIG_FILE=$HOME/bb.config
--------------------
If you want to use a compiler other than the default +gcc+
or +g+++ for building helper-binaries on your host, then do
--------------------
$ make HOSTCXX=g++-4.3-HEAD HOSTCC=gcc-4.3-HEAD
--------------------
=== Dealing efficiently with filesystem images
Filesystem images can get pretty big, depending on the filesystem you choose,
the number of packages, whether you provisioned free space... Yet, some
locations in the filesystems images may just be _empty_ (e.g. a long run of
'zeroes'); such a file is called a _sparse_ file.
Most tools can handle sparse files efficiently, and will only store or write
those parts of a sparse file that are not empty.
For example:
* +tar+ accepts the +-S+ option to tell it to only store non-zero blocks
of sparse files:
** +tar cf archive.tar -S [files...]+ will efficiently store sparse files
in a tarball
** +tar xf archive.tar -S+ will efficiently store sparse files extracted
from a tarball
* +cp+ accepts the +--sparse=WHEN+ option (+WHEN+ is one of +auto+,
+never+ or +always+):
** +cp --sparse=always source.file dest.file+ will make +dest.file+ a
sparse file if +source.file+ has long runs of zeroes
Other tools may have similar options. Please consult their respective man
pages.
You can use sparse files if you need to store the filesystem images (e.g.
to transfer from one machine to another), or if you need to send them (e.g.
to the Q&A team).
Note however that flashing a filesystem image to a device while using the
sparse mode of +dd+ may result in a broken filesystem (e.g. the block bitmap
of an ext2 filesystem may be corrupted; or, if you have sparse files in
your filesystem, those parts may not be all-zeroes when read back). You
should only use sparse files when handling files on the build machine, not
when transferring them to an actual device that will be used on the target.
=== Graphing the dependencies between packages
[[graph-depends]]
One of Buildroot's jobs is to know the dependencies between packages,
and make sure they are built in the right order. These dependencies
can sometimes be quite complicated, and for a given system, it is
often not easy to understand why such or such package was brought into
the build by Buildroot.
In order to help understanding the dependencies, and therefore better
understand what is the role of the different components in your
embedded Linux system, Buildroot is capable of generating dependency
graphs.
To generate a dependency graph of the full system you have compiled,
simply run:
------------------------
make graph-depends
------------------------
You will find the generated graph in
+output/graphs/graph-depends.pdf+.
If your system is quite large, the dependency graph may be too complex
and difficult to read. It is therefore possible to generate the
dependency graph just for a given package:
------------------------
make <pkg>-graph-depends
------------------------
You will find the generated graph in
+output/graph/<pkg>-graph-depends.pdf+.
Note that the dependency graphs are generated using the +dot+ tool
from the _Graphviz_ project, which you must have installed on your
system to use this feature. In most distributions, it is available as
the +graphviz+ package.
By default, the dependency graphs are generated in the PDF
format. However, by passing the +BR2_GRAPH_OUT+ environment variable, you
can switch to other output formats, such as PNG, PostScript or
SVG. All formats supported by the +-T+ option of the +dot+ tool are
supported.
--------------------------------
BR2_GRAPH_OUT=svg make graph-depends
--------------------------------
The +graph-depends+ behaviour can be controlled by setting options in the
+BR2_GRAPH_DEPS_OPTS+ environment variable. The accepted options are:
* +--depth N+, +-d N+, to limit the dependency depth to +N+ levels. The
default, +0+, means no limit.
* +--stop-on PKG+, +-s PKG+, to stop the graph on the package +PKG+.
+PKG+ can be an actual package name, a glob, the keyword 'virtual'
(to stop on virtual packages), or the keyword 'host' (to stop on
host packages). The package is still present on the graph, but its
dependencies are not.
* +--exclude PKG+, +-x PKG+, like +--stop-on+, but also omits +PKG+ from
the graph.
* +--transitive+, +--no-transitive+, to draw (or not) the transitive
dependencies. The default is to not draw transitive dependencies.
* +--colors R,T,H+, the comma-separated list of colors to draw the
root package (+R+), the target packages (+T+) and the host packages
(+H+). Defaults to: +lightblue,grey,gainsboro+
--------------------------------
BR2_GRAPH_DEPS_OPTS='-d 3 --no-transitive --colors=red,green,blue' make graph-depends
--------------------------------
=== Graphing the build duration
[[graph-duration]]
When the build of a system takes a long time, it is sometimes useful
to be able to understand which packages are the longest to build, to
see if anything can be done to speed up the build. In order to help
such build time analysis, Buildroot collects the build time of each
step of each package, and allows to generate graphs from this data.
To generate the build time graph after a build, run:
----------------
make graph-build
----------------
This will generate a set of files in +output/graphs+ :
* +build.hist-build.pdf+, a histogram of the build time for each
package, ordered in the build order.
* +build.hist-duration.pdf+, a histogram of the build time for each
package, ordered by duration (longest first)
* +build.hist-name.pdf+, a histogram of the build time for each
package, order by package name.
* +build.pie-packages.pdf+, a pie chart of the build time per package
* +build.pie-steps.pdf+, a pie chart of the global time spent in each
step of the packages build process.
This +graph-build+ target requires the Python Matplotlib and Numpy
libraries to be installed (+python-matplotlib+ and +python-numpy+ on
most distributions), and also the +argparse+ module if you're using a
Python version older than 2.7 (+python-argparse+ on most
distributions).
By default, the output format for the graph is PDF, but a different
format can be selected using the +BR2_GRAPH_OUT+ environment variable. The
only other format supported is PNG:
----------------
BR2_GRAPH_OUT=png make graph-build
----------------
[[graph-size]]
=== Graphing the filesystem size contribution of packages
When your target system grows, it is sometimes useful to understand
how much each Buildroot package is contributing to the overall root
filesystem size. To help with such an analysis, Buildroot collects
data about files installed by each package and using this data,
generates a graph and CSV files detailing the size contribution of
the different packages.
To generate these data after a build, run:
----------------
make graph-size
----------------
This will generate:
* +output/graphs/graph-size.pdf+, a pie chart of the contribution of
each package to the overall root filesystem size
* +output/graphs/package-size-stats.csv+, a CSV file giving the size
contribution of each package to the overall root filesystem size
* +output/graphs/file-size-stats.csv+, a CSV file giving the size
contribution of each installed file to the package it belongs, and
to the overall filesystem size.
This +graph-size+ target requires the Python Matplotlib library to be
installed (+python-matplotlib+ on most distributions), and also the
+argparse+ module if you're using a Python version older than 2.7
(+python-argparse+ on most distributions).
Just like for the duration graph, a +BR2_GRAPH_OUT+ environment variable
is supported to adjust the output file format. See xref:graph-depends[]
for details about this environment variable.
Additionally, one may set the environment variable +BR2_GRAPH_SIZE_OPTS+
to further control the generated graph. Accepted options are:
* `--size-limit X`, `-l X`, will group all packages which individual
contribution is below `X` percent, to a single entry labelled _Others_
in the graph. By default, `X=0.01`, which means packages each
contributing less than 1% are grouped under _Others_. Accepted values
are in the range `[0.0..1.0]`.
* `--iec`, `--binary`, `--si`, `--decimal`, to use IEC (binary, powers
of 1024) or SI (decimal, powers of 1000; the default) prefixes.
* `--biggest-first`, to sort packages in decreasing size order, rather
than in increasing size order.
.Note
The collected filesystem size data is only meaningful after a complete
clean rebuild. Be sure to run +make clean all+ before using +make
graph-size+.
To compare the root filesystem size of two different Buildroot compilations,
for example after adjusting the configuration or when switching to another
Buildroot release, use the +size-stats-compare+ script. It takes two
+file-size-stats.csv+ files (produced by +make graph-size+) as input.
Refer to the help text of this script for more details:
----------------
utils/size-stats-compare -h
----------------
[[top-level-parallel-build]]
=== Top-level parallel build
.Note
This section deals with a very experimental feature, which is known to
break even in some non-unusual situations. Use at your own risk.
Buildroot has always been capable of using parallel build on a per
package basis: each package is built by Buildroot using +make -jN+ (or
the equivalent invocation for non-make-based build systems). The level
of parallelism is by default number of CPUs + 1, but it can be
adjusted using the +BR2_JLEVEL+ configuration option.
Until 2020.02, Buildroot was however building packages in a serial
fashion: each package was built one after the other, without
parallelization of the build between packages. As of 2020.02,
Buildroot has experimental support for *top-level parallel build*,
which allows some signicant build time savings by building packages
that have no dependency relationship in parallel. This feature is
however marked as experimental and is known not to work in some cases.
In order to use top-level parallel build, one must:
. Enable the option +BR2_PER_PACKAGE_DIRECTORIES+ in the Buildroot
configuration
. Use +make -jN+ when starting the Buildroot build
Internally, the +BR2_PER_PACKAGE_DIRECTORIES+ will enable a mechanism
called *per-package directories*, which will have the following
effects:
* Instead of a global _target_ directory and a global _host_ directory
common to all packages, per-package _target_ and _host_ directories
will be used, in +$(O)/per-package/<pkg>/target/+ and
+$(O)/per-package/<pkg>/host/+ respectively. Those folders will be
populated from the corresponding folders of the package dependencies
at the beginning of +<pkg>+ build. The compiler and all other tools
will therefore only be able to see and access files installed by
dependencies explicitly listed by +<pkg>+.
* At the end of the build, the global _target_ and _host_ directories
will be populated, located in +$(O)/target+ and +$(O)/host+
respectively. This means that during the build, those folders will
be empty and it's only at the very end of the build that they will
be populated.
include::eclipse-integration.txt[]
include::advanced.txt[]

View File

@@ -1,60 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
== Configuration of other components
Before attempting to modify any of the components below, make sure you
have already configured Buildroot itself, and have enabled the
corresponding package.
BusyBox::
+
If you already have a BusyBox configuration file, you can directly
specify this file in the Buildroot configuration, using
+BR2_PACKAGE_BUSYBOX_CONFIG+. Otherwise, Buildroot will start from a
default BusyBox configuration file.
+
To make subsequent changes to the configuration, use +make
busybox-menuconfig+ to open the BusyBox configuration editor.
+
It is also possible to specify a BusyBox configuration file through an
environment variable, although this is not recommended. Refer to
xref:env-vars[] for more details.
uClibc::
+
Configuration of uClibc is done in the same way as for BusyBox. The
configuration variable to specify an existing configuration file is
+BR2_UCLIBC_CONFIG+. The command to make subsequent changes is +make
uclibc-menuconfig+.
Linux kernel::
+
If you already have a kernel configuration file, you can directly
specify this file in the Buildroot configuration, using
+BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG+.
+
If you do not yet have a kernel configuration file, you can either start
by specifying a defconfig in the Buildroot configuration, using
+BR2_LINUX_KERNEL_USE_DEFCONFIG+, or start by creating an empty file and
specifying it as custom configuration file, using
+BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG+.
+
To make subsequent changes to the configuration, use +make
linux-menuconfig+ to open the Linux configuration editor.
Barebox::
+
Configuration of Barebox is done in the same way as for the Linux
kernel. The corresponding configuration variables are
+BR2_TARGET_BAREBOX_USE_CUSTOM_CONFIG+ and
+BR2_TARGET_BAREBOX_USE_DEFCONFIG+. To open the configuration editor,
use +make barebox-menuconfig+.
U-Boot::
+
Configuration of U-Boot (version 2015.04 or newer) is done in the same
way as for the Linux kernel. The corresponding configuration variables
are +BR2_TARGET_UBOOT_USE_CUSTOM_CONFIG+ and
+BR2_TARGET_UBOOT_USE_DEFCONFIG+. To open the configuration editor,
use +make uboot-menuconfig+.

View File

@@ -1,657 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
== Contributing to Buildroot
There are many ways in which you can contribute to Buildroot: analyzing
and fixing bugs, analyzing and fixing package build failures detected by
the autobuilders, testing and reviewing patches sent by other
developers, working on the items in our TODO list and sending your own
improvements to Buildroot or its manual. The following sections give a
little more detail on each of these items.
If you are interested in contributing to Buildroot, the first thing you
should do is to subscribe to the Buildroot mailing list. This list is
the main way of interacting with other Buildroot developers and to send
contributions to. If you aren't subscribed yet, then refer to
xref:community-resources[] for the subscription link.
If you are going to touch the code, it is highly recommended to use a
git repository of Buildroot, rather than starting from an extracted
source code tarball. Git is the easiest way to develop from and directly
send your patches to the mailing list. Refer to xref:getting-buildroot[]
for more information on obtaining a Buildroot git tree.
=== Reproducing, analyzing and fixing bugs
A first way of contributing is to have a look at the open bug reports in
the https://bugs.buildroot.org/buglist.cgi?product=buildroot[Buildroot bug
tracker]. As we strive to keep the bug count as small as possible, all
help in reproducing, analyzing and fixing reported bugs is more than
welcome. Don't hesitate to add a comment to bug reports reporting your
findings, even if you don't yet see the full picture.
=== Analyzing and fixing autobuild failures
The Buildroot autobuilders are a set of build machines that continuously
run Buildroot builds based on random configurations. This is done for
all architectures supported by Buildroot, with various toolchains, and
with a random selection of packages. With the large commit activity on
Buildroot, these autobuilders are a great help in detecting problems
very early after commit.
All build results are available at http://autobuild.buildroot.org[],
statistics are at http://autobuild.buildroot.org/stats.php[]. Every day,
an overview of all failed packages is sent to the mailing list.
Detecting problems is great, but obviously these problems have to be
fixed as well. Your contribution is very welcome here! There are
basically two things that can be done:
- Analyzing the problems. The daily summary mails do not contain details
about the actual failures: in order to see what's going on you have to
open the build log and check the last output. Having someone doing
this for all packages in the mail is very useful for other developers,
as they can make a quick initial analysis based on this output alone.
- Fixing a problem. When fixing autobuild failures, you should follow
these steps:
. Check if you can reproduce the problem by building with the same
configuration. You can do this manually, or use the
http://git.buildroot.org/buildroot-test/tree/utils/br-reproduce-build[br-reproduce-build]
script that will automatically clone a Buildroot git repository,
checkout the correct revision, download and set the right
configuration, and start the build.
. Analyze the problem and create a fix.
. Verify that the problem is really fixed by starting from a clean
Buildroot tree and only applying your fix.
. Send the fix to the Buildroot mailing list (see
xref:submitting-patches[]). In case you created a patch against the
package sources, you should also send the patch upstream so that the
problem will be fixed in a later release, and the patch in Buildroot
can be removed.
In the commit message of a patch fixing an autobuild failure, add a
reference to the build result directory, as follows:
---------------------
Fixes: http://autobuild.buildroot.org/results/51000a9d4656afe9e0ea6f07b9f8ed374c2e4069
---------------------
=== Reviewing and testing patches
With the amount of patches sent to the mailing list each day, the
maintainer has a very hard job to judge which patches are ready to apply
and which ones aren't. Contributors can greatly help here by reviewing
and testing these patches.
In the review process, do not hesitate to respond to patch submissions
for remarks, suggestions or anything that will help everyone to
understand the patches and make them better. Please use internet
style replies in plain text emails when responding to patch
submissions.
To indicate approval of a patch, there are three formal tags that keep
track of this approval. To add your tag to a patch, reply to it with the
approval tag below the original author's Signed-off-by line. These tags
will be picked up automatically by patchwork (see
xref:apply-patches-patchwork[]) and will be part of the commit log when
the patch is accepted.
Tested-by:: Indicates that the patch has been tested successfully.
You are encouraged to specify what kind of testing you performed
(compile-test on architecture X and Y, runtime test on target A,
...). This additional information helps other testers and the
maintainer.
Reviewed-by:: Indicates that you code-reviewed the patch and did your
best in spotting problems, but you are not sufficiently familiar with
the area touched to provide an Acked-by tag. This means that there
may be remaining problems in the patch that would be spotted by
someone with more experience in that area. Should such problems be
detected, your Reviewed-by tag remains appropriate and you cannot
be blamed.
Acked-by:: Indicates that you code-reviewed the patch and you are
familiar enough with the area touched to feel that the patch can be
committed as-is (no additional changes required). In case it later
turns out that something is wrong with the patch, your Acked-by could
be considered inappropriate. The difference between Acked-by and
Reviewed-by is thus mainly that you are prepared to take the blame on
Acked patches, but not on Reviewed ones.
If you reviewed a patch and have comments on it, you should simply reply
to the patch stating these comments, without providing a Reviewed-by or
Acked-by tag. These tags should only be provided if you judge the patch
to be good as it is.
It is important to note that neither Reviewed-by nor Acked-by imply
that testing has been performed. To indicate that you both reviewed and
tested the patch, provide two separate tags (Reviewed/Acked-by and
Tested-by).
Note also that _any developer_ can provide Tested/Reviewed/Acked-by
tags, without exception, and we encourage everyone to do this. Buildroot
does not have a defined group of _core_ developers, it just so happens
that some developers are more active than others. The maintainer will
value tags according to the track record of their submitter. Tags
provided by a regular contributor will naturally be trusted more than
tags provided by a newcomer. As you provide tags more regularly, your
'trustworthiness' (in the eyes of the maintainer) will go up, but _any_
tag provided is valuable.
Buildroot's Patchwork website can be used to pull in patches for testing
purposes. Please see xref:apply-patches-patchwork[] for more
information on using Buildroot's Patchwork website to apply patches.
[[apply-patches-patchwork]]
==== Applying Patches from Patchwork
The main use of Buildroot's Patchwork website for a developer is for
pulling in patches into their local git repository for testing
purposes.
When browsing patches in the patchwork management interface, an +mbox+
link is provided at the top of the page. Copy this link address and
run the following commands:
---------------------
$ git checkout -b <test-branch-name>
$ wget -O - <mbox-url> | git am
---------------------
Another option for applying patches is to create a bundle. A bundle is
a set of patches that you can group together using the patchwork
interface. Once the bundle is created and the bundle is made public,
you can copy the +mbox+ link for the bundle and apply the bundle
using the above commands.
=== Work on items from the TODO list
If you want to contribute to Buildroot but don't know where to start,
and you don't like any of the above topics, you can always work on items
from the http://elinux.org/Buildroot#Todo_list[Buildroot TODO list].
Don't hesitate to discuss an item first on the mailing list or on IRC.
Do edit the wiki to indicate when you start working on an item, so we
avoid duplicate efforts.
[[submitting-patches]]
=== Submitting patches
[NOTE]
_Please, do not attach patches to bugs, send them to the mailing list
instead_.
If you made some changes to Buildroot and you would like to contribute
them to the Buildroot project, proceed as follows.
==== The formatting of a patch
We expect patches to be formatted in a specific way. This is necessary
to make it easy to review patches, to be able to apply them easily to
the git repository, to make it easy to find back in the history how
and why things have changed, and to make it possible to use +git
bisect+ to locate the origin of a problem.
First of all, it is essential that the patch has a good commit
message. The commit message should start with a separate line with a
brief summary of the change, prefixed by the area touched by the
patch. A few examples of good commit titles:
* +package/linuxptp: bump version to 2.0+
* +configs/imx23evk: bump Linux version to 4.19+
* +package/pkg-generic: postpone evaluation of dependency conditions+
* +boot/uboot: needs host-{flex,bison}+
* +support/testing: add python-ubjson tests+
The description that follows the prefix should start with a lower case
letter (i.e "bump", "needs", "postpone", "add" in the above examples).
Second, the body of the commit message should describe _why_ this
change is needed, and if necessary also give details about _how_ it
was done. When writing the commit message, think of how the reviewers
will read it, but also think about how you will read it when you look
at this change again a few years down the line.
Third, the patch itself should do only one change, but do it
completely. Two unrelated or weakly related changes should usually be
done in two separate patches. This usually means that a patch affects
only a single package. If several changes are related, it is often
still possible to split them up in small patches and apply them in a
specific order. Small patches make it easier to review, and often
make it easier to understand afterwards why a change was done.
However, each patch must be complete. It is not allowed that the
build is broken when only the first but not the second patch is
applied. This is necessary to be able to use +git bisect+ afterwards.
Of course, while you're doing your development, you're probably going
back and forth between packages, and certainly not committing things
immediately in a way that is clean enough for submission. So most
developers rewrite the history of commits to produce a clean set of
commits that is appropriate for submission. To do this, you need to
use _interactive rebasing_. You can learn about it
https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History[in the Pro
Git book]. Sometimes, it is even easier to discard you history with
+git reset --soft origin/master+ and select individual changes with
+git add -i+ or +git add -p+.
Finally, the patch should be signed off. This is done by adding
+Signed-off-by: Your Real Name <your@email.address>+ at the end of the
commit message. +git commit -s+ does that for you, if configured
properly. The +Signed-off-by+ tag means that you publish the patch
under the Buildroot license (i.e. GPL-2.0+, except for package patches,
which have the upstream license), and that you are allowed to do so.
See http://developercertificate.org/[the Developer Certificate of
Origin] for details.
When adding new packages, you should submit every package in a
separate patch. This patch should have the update to
+package/Config.in+, the package +Config.in+ file, the +.mk+ file, the
+.hash+ file, any init script, and all package patches. If the package
has many sub-options, these are sometimes better added as separate
follow-up patches. The summary line should be something like
+<packagename>: new package+. The body of the commit message can be
empty for simple packages, or it can contain the description of the
package (like the Config.in help text). If anything special has to be
done to build the package, this should also be explained explicitly in
the commit message body.
When you bump a package to a new version, you should also submit a
separate patch for each package. Don't forget to update the +.hash+
file, or add it if it doesn't exist yet. Also don't forget to check if
the +_LICENSE+ and +_LICENSE_FILES+ are still valid. The summary line
should be something like +<packagename>: bump to version <new
version>+. If the new version only contains security updates compared
to the existing one, the summary should be +<packagename>: security
bump to version <new version>+ and the commit message body should show
the CVE numbers that are fixed. If some package patches can be removed
in the new version, it should be explained explicitly why they can be
removed, preferably with the upstream commit ID. Also any other
required changes should be explained explicitly, like configure
options that no longer exist or are no longer needed.
If you are interested in getting notified of build failures and of
further changes in the packages you added or modified, please add
yourself to the DEVELOPERS file. This should be done in the same patch
creating or modifying the package. See xref:DEVELOPERS[the DEVELOPERS file]
for more information.
Buildroot provides a handy tool to check for common coding style
mistakes on files you created or modified, called +check-package+ (see
xref:check-package[] for more information).
==== Preparing a patch series
Starting from the changes committed in your local git view, _rebase_
your development branch on top of the upstream tree before generating
a patch set. To do so, run:
---------------------
$ git fetch --all --tags
$ git rebase origin/master
---------------------
Now, you are ready to generate then submit your patch set.
To generate it, run:
---------------------
$ git format-patch -M -n -s -o outgoing origin/master
---------------------
This will generate patch files in the +outgoing+ subdirectory,
automatically adding the +Signed-off-by+ line.
Once patch files are generated, you can review/edit the commit message
before submitting them, using your favorite text editor.
Buildroot provides a handy tool to know to whom your patches should be
sent, called +get-developers+ (see xref:DEVELOPERS[] for more
information). This tool reads your patches and outputs the appropriate
+git send-email+ command to use:
---------------------
$ ./utils/get-developers outgoing/*
---------------------
Use the output of +get-developers+ to send your patches:
---------------------
$ git send-email --to buildroot@buildroot.org --cc bob --cc alice outgoing/*
---------------------
Alternatively, +get-developers -e+ can be used directly with the
+--cc-cmd+ argument to +git send-email+ to automatically CC the
affected developers:
---------------------
$ git send-email --to buildroot@buildroot.org \
--cc-cmd './utils/get-developers -e' origin/master
---------------------
+git+ can be configured to automatically do this out of the box with:
---------------------
$ git config sendemail.to buildroot@buildroot.org
$ git config sendemail.ccCmd "$(pwd)/utils/get-developers -e"
---------------------
And then just do:
---------------------
$ git send-email origin/master
---------------------
Note that +git+ should be configured to use your mail account.
To configure +git+, see +man git-send-email+ or google it.
If you do not use +git send-email+, make sure posted *patches are not
line-wrapped*, otherwise they cannot easily be applied. In such a case,
fix your e-mail client, or better yet, learn to use +git send-email+.
==== Cover letter
If you want to present the whole patch set in a separate mail, add
+--cover-letter+ to the +git format-patch+ command (see +man
git-format-patch+ for further information). This will generate a
template for an introduction e-mail to your patch series.
A 'cover letter' may be useful to introduce the changes you propose
in the following cases:
* large number of commits in the series;
* deep impact of the changes in the rest of the project;
* RFC footnote:[RFC: (Request for comments) change proposal];
* whenever you feel it will help presenting your work, your choices,
the review process, etc.
==== Patch revision changelog
When improvements are requested, the new revision of each commit
should include a changelog of the modifications between each
submission. Note that when your patch series is introduced by a cover
letter, an overall changelog may be added to the cover letter in
addition to the changelog in the individual commits.
The best thing to rework a patch series is by interactive rebasing:
+git rebase -i origin/master+. Consult the git manual for more
information.
When added to the individual commits, this changelog is added when
editing the commit message. Below the +Signed-off-by+ section, add
+---+ and your changelog.
Although the changelog will be visible for the reviewers in the mail
thread, as well as in http://patchwork.buildroot.org[patchwork], +git+
will automatically ignores lines below +---+ when the patch will be
merged. This is the intended behavior: the changelog is not meant to
be preserved forever in the +git+ history of the project.
Hereafter the recommended layout:
---------------
Patch title: short explanation, max 72 chars
A paragraph that explains the problem, and how it manifests itself. If
the problem is complex, it is OK to add more paragraphs. All paragraphs
should be wrapped at 72 characters.
A paragraph that explains the root cause of the problem. Again, more
than one paragraph is OK.
Finally, one or more paragraphs that explain how the problem is solved.
Don't hesitate to explain complex solutions in detail.
Signed-off-by: John DOE <john.doe@example.net>
---
Changes v2 -> v3:
- foo bar (suggested by Jane)
- bar buz
Changes v1 -> v2:
- alpha bravo (suggested by John)
- charly delta
---------------
Any patch revision should include the version number. The version number
is simply composed of the letter +v+ followed by an +integer+ greater or
equal to two (i.e. "PATCH v2", "PATCH v3" ...).
This can be easily handled with +git format-patch+ by using the option
+--subject-prefix+:
---------------------
$ git format-patch --subject-prefix "PATCH v4" \
-M -s -o outgoing origin/master
---------------------
Since git version 1.8.1, you can also use +-v <n>+ (where <n> is the
version number):
---------------------
$ git format-patch -v4 -M -s -o outgoing origin/master
---------------------
When you provide a new version of a patch, please mark the old one as
superseded in http://patchwork.buildroot.org[patchwork]. You need to
create an account on http://patchwork.buildroot.org[patchwork] to be
able to modify the status of your patches. Note that you can only change
the status of patches you submitted yourself, which means the email
address you register in http://patchwork.buildroot.org[patchwork] should
match the one you use for sending patches to the mailing list.
You can also add the +--in-reply-to <message-id>+ option when
submitting a patch to the mailing list. The id of the mail to reply to
can be found under the "Message Id" tag on
http://patchwork.buildroot.org[patchwork]. The advantage of
*in-reply-to* is that patchwork will automatically mark the previous
version of the patch as superseded.
[[reporting-bugs]]
=== Reporting issues/bugs or getting help
Before reporting any issue, please check in
xref:community-resources[the mailing list archive] whether someone has
already reported and/or fixed a similar problem.
However you choose to report bugs or get help, either by
opening a bug in the xref:community-resources[bug tracker] or by
xref:community-resources[sending a mail to the mailing list], there are
a number of details to provide in order to help people reproduce and
find a solution to the issue.
Try to think as if you were trying to help someone else; in
that case, what would you need?
Here is a short list of details to provide in such case:
* host machine (OS/release)
* version of Buildroot
* target for which the build fails
* package(s) for which the build fails
* the command that fails and its output
* any information you think that may be relevant
Additionally, you should add the +.config+ file (or if you know how, a
+defconfig+; see xref:customize-store-buildroot-config[]).
If some of these details are too large, do not hesitate to use a
pastebin service. Note that not all available pastebin services will
preserve Unix-style line terminators when downloading raw pastes.
Following pastebin services are known to work correctly:
- https://gist.github.com/
- http://code.bulix.org/
=== Using the run-tests framework
Buildroot includes a run-time testing framework called run-tests built
upon Python scripting and QEMU runtime execution. There are two types of
test cases within the framework, one for build time tests and another for
run-time tests that have a QEMU dependency. The goals of the framework are
the following:
* build a well defined configuration
* optionally, verify some properties of the build output
* if it is a run-time test:
** boot it under QEMU
** run some test condition to verify that a given feature is working
The run-tests tool has a series of options documented in the tool's help '-h'
description. Some common options include setting the download folder, the
output folder, keeping build output, and for multiple test cases, you can set
the JLEVEL for each.
Here is an example walk through of running a test case.
* For a first step, let us see what all the test case options are. The test
cases can be listed by executing +support/testing/run-tests -l+. These tests
can all be run individually during test development from the console. Both
one at a time and selectively as a group of a subset of tests.
---------------------
$ support/testing/run-tests -l
List of tests
test_run (tests.utils.test_check_package.TestCheckPackage)
Test the various ways the script can be called in a simple top to ... ok
test_run (tests.toolchain.test_external.TestExternalToolchainBuildrootMusl) ... ok
test_run (tests.toolchain.test_external.TestExternalToolchainBuildrootuClibc) ... ok
test_run (tests.toolchain.test_external.TestExternalToolchainCCache) ... ok
test_run (tests.toolchain.test_external.TestExternalToolchainCtngMusl) ... ok
test_run (tests.toolchain.test_external.TestExternalToolchainLinaroArm) ... ok
test_run (tests.toolchain.test_external.TestExternalToolchainSourceryArmv4) ... ok
test_run (tests.toolchain.test_external.TestExternalToolchainSourceryArmv5) ... ok
test_run (tests.toolchain.test_external.TestExternalToolchainSourceryArmv7) ... ok
[snip]
test_run (tests.init.test_systemd.TestInitSystemSystemdRoFull) ... ok
test_run (tests.init.test_systemd.TestInitSystemSystemdRoIfupdown) ... ok
test_run (tests.init.test_systemd.TestInitSystemSystemdRoNetworkd) ... ok
test_run (tests.init.test_systemd.TestInitSystemSystemdRwFull) ... ok
test_run (tests.init.test_systemd.TestInitSystemSystemdRwIfupdown) ... ok
test_run (tests.init.test_systemd.TestInitSystemSystemdRwNetworkd) ... ok
test_run (tests.init.test_busybox.TestInitSystemBusyboxRo) ... ok
test_run (tests.init.test_busybox.TestInitSystemBusyboxRoNet) ... ok
test_run (tests.init.test_busybox.TestInitSystemBusyboxRw) ... ok
test_run (tests.init.test_busybox.TestInitSystemBusyboxRwNet) ... ok
Ran 157 tests in 0.021s
OK
---------------------
Those runtime tests are regularly executed by Buildroot Gitlab CI
infrastructure, see .gitlab.yml and https://gitlab.com/buildroot.org/buildroot/-/jobs.
==== Creating a test case
The best way to get familiar with how to create a test case is to look at a
few of the basic file system +support/testing/tests/fs/+ and init
+support/testing/tests/init/+ test scripts. Those tests give good examples
of a basic build and build with run type of tests. There are other more
advanced cases that use things like nested +br2-external+ folders to provide
skeletons and additional packages.
The test cases by default use a br-arm-full-* uClibc-ng toolchain and the
prebuild kernel for a armv5/7 cpu. It is recommended to use the default
defconfig test configuration except when Glibc/musl or a newer kernel are
necessary. By using the default it saves build time and the test would
automatically inherit a kernel/std library upgrade when the default is
updated.
The basic test case definition involves
* Creation of a new test file
* Defining a unique test class
* Determining if the default defconfig plus test options can be used
* Implementing a +def test_run(self):+ function to optionally startup the
emulator and provide test case conditions.
Beyond creating the test script, there are a couple of additional steps that
should be taken once you have your initial test case script. The first is
to add yourself to the +DEVELOPERS+ file to be the maintainer of that test
case. The second is to update the Gitlab CI yml by executing
+make .gitlab-ci.yml+.
==== Debugging a test case
Within the Buildroot repository, the testing framework is organized at the
top level in +support/testing/+ by folders of +conf+, +infra+ and +tests+.
All the test cases live under the +test+ folder and are organized in various
folders representing the catagory of test.
Lets walk through an example.
* Using the Busybox Init system test case with a read/write rootfs
+tests.init.test_busybox.TestInitSystemBusyboxRw+
* A minimal set of command line arguments when debugging a test case would
include '-d' which points to your dl folder, '-o' to an output folder, and
'-k' to keep any output on both pass/fail. With those options, the test will
retain logging and build artifacts providing status of the build and
execution of the test case.
---------------------
$ support/testing/run-tests -d dl -o output_folder -k tests.init.test_busybox.TestInitSystemBusyboxRw
15:03:26 TestInitSystemBusyboxRw Starting
15:03:28 TestInitSystemBusyboxRw Building
15:08:18 TestInitSystemBusyboxRw Building done
15:08:27 TestInitSystemBusyboxRw Cleaning up
.
Ran 1 test in 301.140s
OK
---------------------
* For the case of a successful build, the +output_folder+ would contain a
<test name> folder with the Buildroot build, build log and run-time log. If
the build failed, the console output would show the stage at which it failed
(setup / build / run). Depending on the failure stage, the build/run logs
and/or Buildroot build artifacts can be inspected and instrumented. If the
QEMU instance needs to be launched for additional testing, the first few
lines of the run-time log capture it and it would allow some incremental
testing without re-running +support/testing/run-tests+.
* You can also make modifications to the current sources inside the
+output_folder+ (e.g. for debug purposes) and rerun the standard
Buildroot make targets (in order to regenerate the complete image with
the new modifications) and then rerun the test. Modifying the sources
directly can speed up debugging compared to adding patch files, wiping the
output directoy, and starting the test again.
---------------------
$ ls output_folder/
TestInitSystemBusyboxRw/
TestInitSystemBusyboxRw-build.log
TestInitSystemBusyboxRw-run.log
---------------------
* The source file used to implement this example test is found under
+support/testing/tests/init/test_busybox.py+. This file outlines the
minimal defconfig that creates the build, QEMU configuration to launch
the built images and the test case assertions.
To test an existing or new test case within Gitlab CI, there is a method of
invoking a specific test by creating a Buildroot fork in Gitlab under your
account. This can be handy when adding/changing a run-time test or fixing a
bug on a use case tested by a run-time test case.
In the examples below, the <name> component of the branch name is a unique
string you choose to identify this specific job being created.
* to trigger all run-test test case jobs:
---------------------
$ git push gitlab HEAD:<name>-runtime-tests
---------------------
* to trigger one test case job, a specific branch naming string is used that
includes the full test case name.
---------------------
$ git push gitlab HEAD:<name>-<test case name>
---------------------

View File

@@ -1,64 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[customize-store-buildroot-config]]
=== Storing the Buildroot configuration
The Buildroot configuration can be stored using the command
+make savedefconfig+.
This strips the Buildroot configuration down by removing configuration
options that are at their default value. The result is stored in a file
called +defconfig+. If you want to save it in another place, change the
+BR2_DEFCONFIG+ option in the Buildroot configuration itself, or call
make with +make savedefconfig BR2_DEFCONFIG=<path-to-defconfig>+.
The recommended place to store this defconfig is
+configs/<boardname>_defconfig+. If you follow this recommendation, the
configuration will be listed in +make help+ and can be set again by
running +make <boardname>_defconfig+.
Alternatively, you can copy the file to any other place and rebuild with
+make defconfig BR2_DEFCONFIG=<path-to-defconfig-file>+.
[[customize-store-package-config]]
=== Storing the configuration of other components
The configuration files for BusyBox, the Linux kernel, Barebox, U-Boot
and uClibc should be stored as well if changed. For each of these
components, a Buildroot configuration option exists to point to an input
configuration file, e.g. +BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE+. To store
their configuration, set these configuration options to a path where you
want to save the configuration files, and then use the helper targets
described below to actually store the configuration.
As explained in xref:customize-dir-structure[], the recommended path to
store these configuration files is
+board/<company>/<boardname>/foo.config+.
Make sure that you create a configuration file 'before' changing
the +BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE+ etc. options. Otherwise,
Buildroot will try to access this config file, which doesn't exist
yet, and will fail. You can create the configuration file by running
+make linux-menuconfig+ etc.
Buildroot provides a few helper targets to make the saving of
configuration files easier.
* +make linux-update-defconfig+ saves the linux configuration to the
path specified by +BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE+. It
simplifies the config file by removing default values. However,
this only works with kernels starting from 2.6.33. For earlier
kernels, use +make linux-update-config+.
* +make busybox-update-config+ saves the busybox configuration to the
path specified by +BR2_PACKAGE_BUSYBOX_CONFIG+.
* +make uclibc-update-config+ saves the uClibc configuration to the
path specified by +BR2_UCLIBC_CONFIG+.
* +make barebox-update-defconfig+ saves the barebox configuration to the
path specified by +BR2_TARGET_BAREBOX_CUSTOM_CONFIG_FILE+.
* +make uboot-update-defconfig+ saves the U-Boot configuration to the
path specified by +BR2_TARGET_UBOOT_CUSTOM_CONFIG_FILE+.
* For at91bootstrap3, no helper exists so you have to copy the config
file manually to +BR2_TARGET_AT91BOOTSTRAP3_CUSTOM_CONFIG_FILE+.

View File

@@ -1,30 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[customize-device-permission]]
==== Setting file permissions and ownership and adding custom devices nodes
Sometimes it is needed to set specific permissions or ownership on files
or device nodes. For example, certain files may need to be owned by
root. Since the post-build scripts are not run as root, you cannot do
such changes from there unless you use an explicit fakeroot from the
post-build script.
Instead, Buildroot provides support for so-called _permission tables_.
To use this feature, set config option +BR2_ROOTFS_DEVICE_TABLE+ to a
space-separated list of permission tables, regular text files following
the xref:makedev-syntax[makedev syntax].
If you are using a static device table (i.e. not using +devtmpfs+,
+mdev+, or +(e)udev+) then you can add device nodes using the same
syntax, in so-called _device tables_. To use this feature, set config
option +BR2_ROOTFS_STATIC_DEVICE_TABLE+ to a space-separated list of
device tables.
As shown in xref:customize-dir-structure[], the recommended location for
such files is +board/<company>/<boardname>/+.
It should be noted that if the specific permissions or device nodes are
related to a specific application, you should set variables
+FOO_PERMISSIONS+ and +FOO_DEVICES+ in the package's +.mk+ file instead
(see xref:generic-package-reference[]).

View File

@@ -1,111 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[customize-dir-structure]]
=== Recommended directory structure
When customizing Buildroot for your project, you will be creating one or
more project-specific files that need to be stored somewhere. While most
of these files could be placed in _any_ location as their path is to be
specified in the Buildroot configuration, the Buildroot developers
recommend a specific directory structure which is described in this
section.
Orthogonal to this directory structure, you can choose _where_ you place
this structure itself: either inside the Buildroot tree, or outside of
it using a br2-external tree. Both options are valid, the choice is up
to you.
-----
+-- board/
| +-- <company>/
| +-- <boardname>/
| +-- linux.config
| +-- busybox.config
| +-- <other configuration files>
| +-- post_build.sh
| +-- post_image.sh
| +-- rootfs_overlay/
| | +-- etc/
| | +-- <some file>
| +-- patches/
| +-- foo/
| | +-- <some patch>
| +-- libbar/
| +-- <some other patches>
|
+-- configs/
| +-- <boardname>_defconfig
|
+-- package/
| +-- <company>/
| +-- Config.in (if not using a br2-external tree)
| +-- <company>.mk (if not using a br2-external tree)
| +-- package1/
| | +-- Config.in
| | +-- package1.mk
| +-- package2/
| +-- Config.in
| +-- package2.mk
|
+-- Config.in (if using a br2-external tree)
+-- external.mk (if using a br2-external tree)
+-- external.desc (if using a br2-external tree)
------
Details on the files shown above are given further in this chapter.
Note: if you choose to place this structure outside of the Buildroot
tree but in a br2-external tree, the <company> and possibly <boardname>
components may be superfluous and can be left out.
==== Implementing layered customizations
It is quite common for a user to have several related projects that partly
need the same customizations. Instead of duplicating these
customizations for each project, it is recommended to use a layered
customization approach, as explained in this section.
Almost all of the customization methods available in Buildroot, like
post-build scripts and root filesystem overlays, accept a
space-separated list of items. The specified items are always treated in
order, from left to right. By creating more than one such item, one for
the common customizations and another one for the really
project-specific customizations, you can avoid unnecessary duplication.
Each layer is typically embodied by a separate directory inside
+board/<company>/+. Depending on your projects, you could even introduce
more than two layers.
An example directory structure for where a user has two customization
layers 'common' and 'fooboard' is:
-----
+-- board/
+-- <company>/
+-- common/
| +-- post_build.sh
| +-- rootfs_overlay/
| | +-- ...
| +-- patches/
| +-- ...
|
+-- fooboard/
+-- linux.config
+-- busybox.config
+-- <other configuration files>
+-- post_build.sh
+-- rootfs_overlay/
| +-- ...
+-- patches/
+-- ...
-----
For example, if the user has the +BR2_GLOBAL_PATCH_DIR+ configuration
option set as:
-----
BR2_GLOBAL_PATCH_DIR="board/<company>/common/patches board/<company>/fooboard/patches"
-----
then first the patches from the 'common' layer would be applied,
followed by the patches from the 'fooboard' layer.

View File

@@ -1,415 +0,0 @@
// -*- mode:doc -*- ;
// vim: set syntax=asciidoc:
[[outside-br-custom]]
=== Keeping customizations outside of Buildroot
As already briefly mentioned in xref:customize-dir-structure[], you can
place project-specific customizations in two locations:
* directly within the Buildroot tree, typically maintaining them using
branches in a version control system so that upgrading to a newer
Buildroot release is easy.
* outside of the Buildroot tree, using the _br2-external_ mechanism.
This mechanism allows to keep package recipes, board support and
configuration files outside of the Buildroot tree, while still
having them nicely integrated in the build logic. We call this
location a _br2-external tree_. This section explains how to use
the br2-external mechanism and what to provide in a br2-external
tree.
One can tell Buildroot to use one or more br2-external trees by setting
the +BR2_EXTERNAL+ make variable set to the path(s) of the br2-external
tree(s) to use. It can be passed to any Buildroot +make+ invocation. It
is automatically saved in the hidden +.br2-external.mk+ file in the output
directory. Thanks to this, there is no need to pass +BR2_EXTERNAL+ at
every +make+ invocation. It can however be changed at any time by
passing a new value, and can be removed by passing an empty value.
.Note
The path to a br2-external tree can be either absolute or relative.
If it is passed as a relative path, it is important to note that it is
interpreted relative to the main Buildroot source directory, *not* to
the Buildroot output directory.
.Note:
If using an br2-external tree from before Buildroot 2016.11, you need to
convert it before you can use it with Buildroot 2016.11 onward. See
xref:br2-external-converting[] for help on doing so.
Some examples:
-----
buildroot/ $ make BR2_EXTERNAL=/path/to/foo menuconfig
-----
From now on, definitions from the +/path/to/foo+ br2-external tree
will be used:
-----
buildroot/ $ make
buildroot/ $ make legal-info
-----
We can switch to another br2-external tree at any time:
-----
buildroot/ $ make BR2_EXTERNAL=/where/we/have/bar xconfig
-----
We can also use multiple br2-external trees:
----
buildroot/ $ make BR2_EXTERNAL=/path/to/foo:/where/we/have/bar menuconfig
----
Or disable the usage of any br2-external tree:
-----
buildroot/ $ make BR2_EXTERNAL= xconfig
-----
==== Layout of a br2-external tree
A br2-external tree must contain at least those three files, described
in the following chapters:
* +external.desc+
* +external.mk+
* +Config.in+
Apart from those mandatory files, there may be additional and optional
content that may be present in a br2-external tree, like the +configs/+
or +provides/+ directories. They are described in the following chapters
as well.
A complete example br2-external tree layout is also described later.
===== The +external.desc+ file
That file describes the br2-external tree: the _name_ and _description_
for that br2-external tree.
The format for this file is line based, with each line starting by a
keyword, followed by a colon and one or more spaces, followed by the
value assigned to that keyword. There are two keywords currently
recognised:
* +name+, mandatory, defines the name for that br2-external tree. That
name must only use ASCII characters in the set +[A-Za-z0-9_]+; any
other character is forbidden. Buildroot sets the variable
+BR2_EXTERNAL_$(NAME)_PATH+ to the absolute path of the br2-external
tree, so that you can use it to refer to your br2-external tree. This
variable is available both in Kconfig, so you can use it to source your
Kconfig files (see below) and in the Makefile, so that you can use it
to include other Makefiles (see below) or refer to other files (like
data files) from your br2-external tree.
+
.Note:
Since it is possible to use multiple br2-external trees at once, this
name is used by Buildroot to generate variables for each of those trees.
That name is used to identify your br2-external tree, so try to come up
with a name that really describes your br2-external tree, in order for
it to be relatively unique, so that it does not clash with another name
from another br2-external tree, especially if you are planning on
somehow sharing your br2-external tree with third parties or using
br2-external trees from third parties.
* +desc+, optional, provides a short description for that br2-external
tree. It shall fit on a single line, is mostly free-form (see below),
and is used when displaying information about a br2-external tree (e.g.
above the list of defconfig files, or as the prompt in the menuconfig);
as such, it should relatively brief (40 chars is probably a good upper
limit). The description is available in the +BR2_EXTERNAL_$(NAME)_DESC+
variable.
Examples of names and the corresponding +BR2_EXTERNAL_$(NAME)_PATH+
variables:
* +FOO+ -> +BR2_EXTERNAL_FOO_PATH+
* +BAR_42+ -> +BR2_EXTERNAL_BAR_42_PATH+
In the following examples, it is assumed the name to be set to +BAR_42+.
.Note:
Both +BR2_EXTERNAL_$(NAME)_PATH+ and `BR2_EXTERNAL_$(NAME)_DESC` are
available in the Kconfig files and the Makefiles. They are also
exported in the environment so are available in post-build, post-image
and in-fakeroot scripts.
===== The +Config.in+ and +external.mk+ files
Those files (which may each be empty) can be used to define package
recipes (i.e. +foo/Config.in+ and +foo/foo.mk+ like for packages bundled
in Buildroot itself) or other custom configuration options or make logic.
Buildroot automatically includes the +Config.in+ from each br2-external
tree to make it appear in the top-level configuration menu, and includes
the +external.mk+ from each br2-external tree with the rest of the
makefile logic.
The main usage of this is to store package recipes. The recommended way
to do this is to write a +Config.in+ file that looks like:
------
source "$BR2_EXTERNAL_BAR_42_PATH/package/package1/Config.in"
source "$BR2_EXTERNAL_BAR_42_PATH/package/package2/Config.in"
------
Then, have an +external.mk+ file that looks like:
------
include $(sort $(wildcard $(BR2_EXTERNAL_BAR_42_PATH)/package/*/*.mk))
------
And then in +$(BR2_EXTERNAL_BAR_42_PATH)/package/package1+ and
+$(BR2_EXTERNAL_BAR_42_PATH)/package/package2+ create normal
Buildroot package recipes, as explained in xref:adding-packages[].
If you prefer, you can also group the packages in subdirectories
called <boardname> and adapt the above paths accordingly.
You can also define custom configuration options in +Config.in+ and
custom make logic in +external.mk+.
===== The +configs/+ directory
One can store Buildroot defconfigs in the +configs+ subdirectory of
the br2-external tree. Buildroot will automatically show them in the
output of +make list-defconfigs+ and allow them to be loaded with the
normal +make <name>_defconfig+ command. They will be visible in the
'make list-defconfigs' output, below an +External configs+ label that
contains the name of the br2-external tree they are defined in.
.Note:
If a defconfig file is present in more than one br2-external tree, then
the one from the last br2-external tree is used. It is thus possible
to override a defconfig bundled in Buildroot or another br2-external
tree.
===== The +provides/+ directory
For some packages, Buildroot provides a choice between two (or more)
implementations of API-compatible such packages. For example, there is
a choice to choose either libjpeg ot jpeg-turbo, and another choice
between openssl or libressl. Finally, there is a choice to select one
of the known, pre-configured toolchains.
It is possible for a br2-external to extend those choices, by providing
a set of files that define those alternatives:
* +provides/toolchains.in+ defines the pre-configured toolchains, which
will then be listed in the toolchain selection;
* +provides/jpeg.in+ defines the alternative libjpeg implementations;
* +provides/openssl.in+ defines the alternative openssl implementations.
===== Free-form content
One can store all the board-specific configuration files there, such
as the kernel configuration, the root filesystem overlay, or any other
configuration file for which Buildroot allows to set the location (by
using the +BR2_EXTERNAL_$(NAME)_PATH+ variable). For example, you
could set the paths to a global patch directory, to a rootfs overlay
and to the kernel configuration file as follows (e.g. by running
`make menuconfig` and filling in these options):
----
BR2_GLOBAL_PATCH_DIR=$(BR2_EXTERNAL_BAR_42_PATH)/patches/
BR2_ROOTFS_OVERLAY=$(BR2_EXTERNAL_BAR_42_PATH)/board/<boardname>/overlay/
BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE=$(BR2_EXTERNAL_BAR_42_PATH)/board/<boardname>/kernel.config
----
===== Additional Linux kernel extensions
Additional Linux kernel extensions (see xref:linux-kernel-ext[]) can
be added by storing them in the `linux/` directory at the root of a
br2-external tree.
===== Example layout
Here is an example layout using all features of br2-external (the sample
content is shown for the file above it, when it is relevant to explain
the br2-external tree; this is all entirely made up just for the sake of
illustration, of course):
----
/path/to/br2-ext-tree/
|- external.desc
| |name: BAR_42
| |desc: Example br2-external tree
| `----
|
|- Config.in
| |source "$BR2_EXTERNAL_BAR_42_PATH/toolchain/toolchain-external-mine/Config.in.options"
| |source "$BR2_EXTERNAL_BAR_42_PATH/package/pkg-1/Config.in"
| |source "$BR2_EXTERNAL_BAR_42_PATH/package/pkg-2/Config.in"
| |source "$BR2_EXTERNAL_BAR_42_PATH/package/my-jpeg/Config.in"
| |
| |config BAR_42_FLASH_ADDR
| | hex "my-board flash address"
| | default 0x10AD
| `----
|
|- external.mk
| |include $(sort $(wildcard $(BR2_EXTERNAL_BAR_42_PATH)/package/*/*.mk))
| |include $(sort $(wildcard $(BR2_EXTERNAL_BAR_42_PATH)/toolchain/*/*.mk))
| |
| |flash-my-board:
| | $(BR2_EXTERNAL_BAR_42_PATH)/board/my-board/flash-image \
| | --image $(BINARIES_DIR)/image.bin \
| | --address $(BAR_42_FLASH_ADDR)
| `----
|
|- package/pkg-1/Config.in
| |config BR2_PACKAGE_PKG_1
| | bool "pkg-1"
| | help
| | Some help about pkg-1
| `----
|- package/pkg-1/pkg-1.hash
|- package/pkg-1/pkg-1.mk
| |PKG_1_VERSION = 1.2.3
| |PKG_1_SITE = /some/where/to/get/pkg-1
| |PKG_1_LICENSE = blabla
| |
| |define PKG_1_INSTALL_INIT_SYSV
| | $(INSTALL) -D -m 0755 $(PKG_1_PKGDIR)/S99my-daemon \
| | $(TARGET_DIR)/etc/init.d/S99my-daemon
| |endef
| |
| |$(eval $(autotools-package))
| `----
|- package/pkg-1/S99my-daemon
|
|- package/pkg-2/Config.in
|- package/pkg-2/pkg-2.hash
|- package/pkg-2/pkg-2.mk
|
|- provides/jpeg.in
| |config BR2_PACKAGE_MY_JPEG
| | bool "my-jpeg"
| `----
|- package/my-jpeg/Config.in
| |config BR2_PACKAGE_PROVIDES_JPEG
| | default "my-jpeg" if BR2_PACKAGE_MY_JPEG
| `----
|- package/my-jpeg/my-jpeg.mk
| |# This is a normal package .mk file
| |MY_JPEG_VERSION = 1.2.3
| |MY_JPEG_SITE = https://example.net/some/place
| |MY_JPEG_PROVIDES = jpeg
| |$(eval $(autotools-package))
| `----
|
|- provides/toolchains.in
| |config BR2_TOOLCHAIN_EXTERNAL_MINE
| | bool "my custom toolchain"
| | depends on BR2_some_arch
| | select BR2_INSTALL_LIBSTDCPP
| `----
|- toolchain/toolchain-external-mine/Config.in.options
| |if BR2_TOOLCHAIN_EXTERNAL_MINE
| |config BR2_TOOLCHAIN_EXTERNAL_PREFIX
| | default "arch-mine-linux-gnu"
| |config BR2_PACKAGE_PROVIDES_TOOLCHAIN_EXTERNAL
| | default "toolchain-external-mine"
| |endif
| `----
|- toolchain/toolchain-external-mine/toolchain-external-mine.mk
| |TOOLCHAIN_EXTERNAL_MINE_SITE = https://example.net/some/place
| |TOOLCHAIN_EXTERNAL_MINE_SOURCE = my-toolchain.tar.gz
| |$(eval $(toolchain-external-package))
| `----
|
|- linux/Config.ext.in
| |config BR2_LINUX_KERNEL_EXT_EXAMPLE_DRIVER
| | bool "example-external-driver"
| | help
| | Example external driver
| |---
|- linux/linux-ext-example-driver.mk
|
|- configs/my-board_defconfig
| |BR2_GLOBAL_PATCH_DIR="$(BR2_EXTERNAL_BAR_42_PATH)/patches/"
| |BR2_ROOTFS_OVERLAY="$(BR2_EXTERNAL_BAR_42_PATH)/board/my-board/overlay/"
| |BR2_ROOTFS_POST_IMAGE_SCRIPT="$(BR2_EXTERNAL_BAR_42_PATH)/board/my-board/post-image.sh"
| |BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE="$(BR2_EXTERNAL_BAR_42_PATH)/board/my-board/kernel.config"
| `----
|
|- patches/linux/0001-some-change.patch
|- patches/linux/0002-some-other-change.patch
|- patches/busybox/0001-fix-something.patch
|
|- board/my-board/kernel.config
|- board/my-board/overlay/var/www/index.html
|- board/my-board/overlay/var/www/my.css
|- board/my-board/flash-image
`- board/my-board/post-image.sh
|#!/bin/sh
|generate-my-binary-image \
| --root ${BINARIES_DIR}/rootfs.tar \
| --kernel ${BINARIES_DIR}/zImage \
| --dtb ${BINARIES_DIR}/my-board.dtb \
| --output ${BINARIES_DIR}/image.bin
`----
----
The br2-external tree will then be visible in the menuconfig (with
the layout expanded):
----
External options --->
*** Example br2-external tree (in /path/to/br2-ext-tree/)
[ ] pkg-1
[ ] pkg-2
(0x10AD) my-board flash address
----
If you are using more than one br2-external tree, it would look like
(with the layout expanded and the second one with name +FOO_27+ but no
+desc:+ field in +external.desc+):
----
External options --->
Example br2-external tree --->
*** Example br2-external tree (in /path/to/br2-ext-tree)
[ ] pkg-1
[ ] pkg-2
(0x10AD) my-board flash address
FOO_27 --->
*** FOO_27 (in /path/to/another-br2-ext)
[ ] foo
[ ] bar
----
Additionally, the jpeg provider will be visible in the jpeg choice:
----
Target packages --->
Libraries --->
Graphics --->
[*] jpeg support
jpeg variant () --->
( ) jpeg
( ) jpeg-turbo
*** jpeg from: Example br2-external tree ***
(X) my-jpeg
*** jpeg from: FOO_27 ***
( ) another-jpeg
----
And similarly for the toolchains:
----
Toolchain --->
Toolchain () --->
( ) Custom toolchain
*** Toolchains from: Example br2-external tree ***
(X) my custom toolchain
----
.Note
The toolchain options in +toolchain/toolchain-external-mine/Config.in.options+
will not appear in the `Toolchain` menu. They must be explicitly included
from within the br2-external's top-level +Config.in+ and will thus appear
in the `External options` menu.

View File

@@ -1,56 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[customize-packages]]
=== Adding project-specific packages
In general, any new package should be added directly in the +package+
directory and submitted to the Buildroot upstream project. How to add
packages to Buildroot in general is explained in full detail in
xref:adding-packages[] and will not be repeated here. However, your
project may need some proprietary packages that cannot be upstreamed.
This section will explain how you can keep such project-specific
packages in a project-specific directory.
As shown in xref:customize-dir-structure[], the recommended location for
project-specific packages is +package/<company>/+. If you are using the
br2-external tree feature (see xref:outside-br-custom[]) the recommended
location is to put them in a sub-directory named +package/+ in your
br2-external tree.
However, Buildroot will not be aware of the packages in this location,
unless we perform some additional steps. As explained in
xref:adding-packages[], a package in Buildroot basically consists of two
files: a +.mk+ file (describing how to build the package) and a
+Config.in+ file (describing the configuration options for this
package).
Buildroot will automatically include the +.mk+ files in first-level
subdirectories of the +package+ directory (using the pattern
+package/\*/*.mk+). If we want Buildroot to include +.mk+ files from
deeper subdirectories (like +package/<company>/package1/+) then we
simply have to add a +.mk+ file in a first-level subdirectory that
includes these additional +.mk+ files. Therefore, create a file
+package/<company>/<company>.mk+ with following contents (assuming you
have only one extra directory level below +package/<company>/+):
-----
include $(sort $(wildcard package/<company>/*/*.mk))
-----
For the +Config.in+ files, create a file +package/<company>/Config.in+
that includes the +Config.in+ files of all your packages. An exhaustive
list has to be provided since wildcards are not supported in the source command of kconfig.
For example:
-----
source "package/<company>/package1/Config.in"
source "package/<company>/package2/Config.in"
-----
Include this new file +package/<company>/Config.in+ from
+package/Config.in+, preferably in a company-specific menu to make
merges with future Buildroot versions easier.
If using a br2-external tree, refer to xref:outside-br-custom[] for how
to fill in those files.

View File

@@ -1,59 +0,0 @@
// -*- mode:doc -*- ;
// vim: set syntax=asciidoc:
[[customize-patches]]
=== Adding project-specific patches
It is sometimes useful to apply 'extra' patches to packages - on top of
those provided in Buildroot. This might be used to support custom
features in a project, for example, or when working on a new
architecture.
The +BR2_GLOBAL_PATCH_DIR+ configuration option can be used to specify
a space separated list of one or more directories containing package
patches.
For a specific version +<packageversion>+ of a specific package
+<packagename>+, patches are applied from +BR2_GLOBAL_PATCH_DIR+ as
follows:
. For every directory - +<global-patch-dir>+ - that exists in
+BR2_GLOBAL_PATCH_DIR+, a +<package-patch-dir>+ will be determined as
follows:
+
* +<global-patch-dir>/<packagename>/<packageversion>/+ if the
directory exists.
+
* Otherwise, +<global-patch-dir>/<packagename>+ if the directory
exists.
. Patches will then be applied from a +<package-patch-dir>+ as
follows:
+
* If a +series+ file exists in the package directory, then patches are
applied according to the +series+ file;
+
* Otherwise, patch files matching +*.patch+ are applied in
alphabetical order. So, to ensure they are applied in the right
order, it is highly recommended to name the patch files like this:
+<number>-<description>.patch+, where +<number>+ refers to the
'apply order'.
For information about how patches are applied for a package, see
xref:patch-apply-order[]
The +BR2_GLOBAL_PATCH_DIR+ option is the preferred method for
specifying a custom patch directory for packages. It can be used to
specify a patch directory for any package in buildroot. It should also
be used in place of the custom patch directory options that are
available for packages such as U-Boot and Barebox. By doing this, it
will allow a user to manage their patches from one top-level
directory.
The exception to +BR2_GLOBAL_PATCH_DIR+ being the preferred method for
specifying custom patches is +BR2_LINUX_KERNEL_PATCH+.
+BR2_LINUX_KERNEL_PATCH+ should be used to specify kernel patches that
are available at a URL. *Note:* +BR2_LINUX_KERNEL_PATCH+ specifies kernel
patches that are applied after patches available in +BR2_GLOBAL_PATCH_DIR+,
as it is done from a post-patch hook of the Linux package.

View File

@@ -1,37 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Customization _after_ the images have been created
While post-build scripts (xref:rootfs-custom[]) are run _before_
building the filesystem image, kernel and bootloader, *post-image
scripts* can be used to perform some specific actions _after_ all images
have been created.
Post-image scripts can for example be used to automatically extract your
root filesystem tarball in a location exported by your NFS server, or
to create a special firmware image that bundles your root filesystem and
kernel image, or any other custom action required for your project.
To enable this feature, specify a space-separated list of post-image
scripts in config option +BR2_ROOTFS_POST_IMAGE_SCRIPT+ (in the +System
configuration+ menu). If you specify a relative path, it will be
relative to the root of the Buildroot tree.
Just like post-build scripts, post-image scripts are run with the main
Buildroot tree as current working directory. The path to the +images+
output directory is passed as the first argument to each script. If the
config option +BR2_ROOTFS_POST_SCRIPT_ARGS+ is not empty, these
arguments will be passed to the script too. All the scripts will be
passed the exact same set of arguments, it is not possible to pass
different sets of arguments to each script.
Again just like for the post-build scripts, the scripts have access to
the environment variables +BR2_CONFIG+, +HOST_DIR+, +STAGING_DIR+,
+TARGET_DIR+, +BUILD_DIR+, +BINARIES_DIR+ and +BASE_DIR+.
The post-image scripts will be executed as the user that executes
Buildroot, which should normally _not_ be the root user. Therefore, any
action requiring root permissions in one of these scripts will require
special handling (usage of fakeroot or sudo), which is left to the
script developer.

View File

@@ -1,66 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Quick guide to storing your project-specific customizations
Earlier in this chapter, the different methods for making
project-specific customizations have been described. This section will
now summarize all this by providing step-by-step instructions to storing your
project-specific customizations. Clearly, the steps that are not relevant to
your project can be skipped.
1. +make menuconfig+ to configure toolchain, packages and kernel.
1. +make linux-menuconfig+ to update the kernel config, similar for
other configuration like busybox, uclibc, ...
1. +mkdir -p board/<manufacturer>/<boardname>+
1. Set the following options to +board/<manufacturer>/<boardname>/<package>.config+
(as far as they are relevant):
* +BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE+
* +BR2_PACKAGE_BUSYBOX_CONFIG+
* +BR2_UCLIBC_CONFIG+
* +BR2_TARGET_AT91BOOTSTRAP3_CUSTOM_CONFIG_FILE+
* +BR2_TARGET_BAREBOX_CUSTOM_CONFIG_FILE+
* +BR2_TARGET_UBOOT_CUSTOM_CONFIG_FILE+
1. Write the configuration files:
* +make linux-update-defconfig+
* +make busybox-update-config+
* +make uclibc-update-config+
* +cp <output>/build/at91bootstrap3-*/.config
board/<manufacturer>/<boardname>/at91bootstrap3.config+
* +make barebox-update-defconfig+
* +make uboot-update-defconfig+
1. Create +board/<manufacturer>/<boardname>/rootfs-overlay/+ and fill it
with additional files you need on your rootfs, e.g.
+board/<manufacturer>/<boardname>/rootfs-overlay/etc/inittab+.
Set +BR2_ROOTFS_OVERLAY+
to +board/<manufacturer>/<boardname>/rootfs-overlay+.
1. Create a post-build script
+board/<manufacturer>/<boardname>/post_build.sh+. Set
+BR2_ROOTFS_POST_BUILD_SCRIPT+ to
+board/<manufacturer>/<boardname>/post_build.sh+
1. If additional setuid permissions have to be set or device nodes have
to be created, create +board/<manufacturer>/<boardname>/device_table.txt+
and add that path to +BR2_ROOTFS_DEVICE_TABLE+.
1. If additional user accounts have to be created, create
+board/<manufacturer>/<boardname>/users_table.txt+ and add that path
to +BR2_ROOTFS_USERS_TABLES+.
1. To add custom patches to certain packages, set +BR2_GLOBAL_PATCH_DIR+
to +board/<manufacturer>/<boardname>/patches/+ and add your patches
for each package in a subdirectory named after the package. Each
patch should be called +<packagename>-<num>-<description>.patch+.
1. Specifically for the Linux kernel, there also exists the option
+BR2_LINUX_KERNEL_PATCH+ with as main advantage that it can also
download patches from a URL. If you do not need this,
+BR2_GLOBAL_PATCH_DIR+ is preferred. U-Boot, Barebox, at91bootstrap
and at91bootstrap3 also have separate options, but these do not
provide any advantage over +BR2_GLOBAL_PATCH_DIR+ and will likely be
removed in the future.
1. If you need to add project-specific packages, create
+package/<manufacturer>/+ and place your packages in that
directory. Create an overall +<manufacturer>.mk+ file that
includes the +.mk+ files of all your packages. Create an overall
+Config.in+ file that sources the +Config.in+ files of all your
packages. Include this +Config.in+ file from Buildroot's
+package/Config.in+ file.
1. +make savedefconfig+ to save the buildroot configuration.
1. +cp defconfig configs/<boardname>_defconfig+

View File

@@ -1,153 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[rootfs-custom]]
=== Customizing the generated target filesystem
Besides changing the configuration through +make *config+,
there are a few other ways to customize the resulting target filesystem.
The two recommended methods, which can co-exist, are root filesystem
overlay(s) and post build script(s).
Root filesystem overlays (+BR2_ROOTFS_OVERLAY+)::
+
A filesystem overlay is a tree of files that is copied directly
over the target filesystem after it has been built. To enable this
feature, set config option +BR2_ROOTFS_OVERLAY+ (in the +System
configuration+ menu) to the root of the overlay. You can even specify
multiple overlays, space-separated. If you specify a relative path,
it will be relative to the root of the Buildroot tree. Hidden
directories of version control systems, like +.git+, +.svn+, +.hg+,
etc., files called +.empty+ and files ending in +~+ are excluded from
the copy.
+
When +BR2_ROOTFS_MERGED_USR+ is enabled, then the overlay must not
contain the '/bin', '/lib' or '/sbin' directories, as Buildroot will
create them as symbolic links to the relevant folders in '/usr'. In
such a situation, should the overlay have any programs or libraries,
they should be placed in '/usr/bin', '/usr/sbin' and '/usr/lib'.
+
As shown in xref:customize-dir-structure[], the recommended path for
this overlay is +board/<company>/<boardname>/rootfs-overlay+.
Post-build scripts (+BR2_ROOTFS_POST_BUILD_SCRIPT+)::
+
Post-build scripts are shell scripts called 'after' Buildroot builds
all the selected software, but 'before' the rootfs images are
assembled. To enable this feature, specify a space-separated list of
post-build scripts in config option +BR2_ROOTFS_POST_BUILD_SCRIPT+ (in
the +System configuration+ menu). If you specify a relative path, it
will be relative to the root of the Buildroot tree.
+
Using post-build scripts, you can remove or modify any file in your
target filesystem. You should, however, use this feature with care.
Whenever you find that a certain package generates wrong or unneeded
files, you should fix that package rather than work around it with some
post-build cleanup scripts.
+
As shown in xref:customize-dir-structure[], the recommended path for
this script is +board/<company>/<boardname>/post_build.sh+.
+
The post-build scripts are run with the main Buildroot tree as current
working directory. The path to the target filesystem is passed as the
first argument to each script. If the config option
+BR2_ROOTFS_POST_SCRIPT_ARGS+ is not empty, these arguments will be
passed to the script too. All the scripts will be passed the exact
same set of arguments, it is not possible to pass different sets of
arguments to each script.
+
In addition, you may also use these environment variables:
- +BR2_CONFIG+: the path to the Buildroot .config file
- +HOST_DIR+, +STAGING_DIR+, +TARGET_DIR+: see
xref:generic-package-reference[]
- +BUILD_DIR+: the directory where packages are extracted and built
- +BINARIES_DIR+: the place where all binary files (aka images) are
stored
- +BASE_DIR+: the base output directory
Below three more methods of customizing the target filesystem are
described, but they are not recommended.
Direct modification of the target filesystem::
+
For temporary modifications, you can modify the target filesystem
directly and rebuild the image. The target filesystem is available
under +output/target/+. After making your changes, run +make+ to
rebuild the target filesystem image.
+
This method allows you to do anything to the target filesystem, but if
you need to clean your Buildroot tree using +make clean+, these
changes will be lost. Such cleaning is necessary in several cases,
refer to xref:full-rebuild[] for details. This solution is therefore
only useful for quick tests: _changes do not survive the +make clean+
command_. Once you have validated your changes, you should make sure
that they will persist after a +make clean+, using a root filesystem
overlay or a post-build script.
Custom target skeleton (+BR2_ROOTFS_SKELETON_CUSTOM+)::
+
The root filesystem image is created from a target skeleton, on top of
which all packages install their files. The skeleton is copied to the
target directory +output/target+ before any package is built and
installed. The default target skeleton provides the standard Unix
filesystem layout and some basic init scripts and configuration files.
+
If the default skeleton (available under +system/skeleton+) does not
match your needs, you would typically use a root filesystem overlay or
post-build script to adapt it. However, if the default skeleton is
entirely different than what you need, using a custom skeleton may be
more suitable.
+
To enable this feature, enable config option
+BR2_ROOTFS_SKELETON_CUSTOM+ and set +BR2_ROOTFS_SKELETON_CUSTOM_PATH+
to the path of your custom skeleton. Both options are available in the
+System configuration+ menu. If you specify a relative path, it will
be relative to the root of the Buildroot tree.
+
Custom skeletons don't need to contain the '/bin', '/lib' or '/sbin'
directories, since they are created automatically during the build.
When +BR2_ROOTFS_MERGED_USR+ is enabled, then the custom skeleton must
not contain the '/bin', '/lib' or '/sbin' directories, as Buildroot
will create them as symbolic links to the relevant folders in '/usr'.
In such a situation, should the skeleton have any programs or
libraries, they should be placed in '/usr/bin', '/usr/sbin' and
'/usr/lib'.
+
This method is not recommended because it duplicates the entire
skeleton, which prevents taking advantage of the fixes or improvements
brought to the default skeleton in later Buildroot releases.
Post-fakeroot scripts (+BR2_ROOTFS_POST_FAKEROOT_SCRIPT+)::
+
When aggregating the final images, some parts of the process requires
root rights: creating device nodes in `/dev`, setting permissions or
ownership to files and directories... To avoid requiring actual root
rights, Buildroot uses +fakeroot+ to simulate root rights. This is not
a complete substitute for actually being root, but is enough for what
Buildroot needs.
+
Post-fakeroot scripts are shell scripts that are called at the 'end' of
the fakeroot phase, 'right before' the filesystem image generator is
called. As such, they are called in the fakeroot context.
+
Post-fakeroot scripts can be useful in case you need to tweak the
filesystem to do modifications that are usually only available to the
root user.
+
.Note:
It is recommended to use the existing mechanisms to set file permissions
or create entries in `/dev` (see xref:customize-device-permission[]) or
to create users (see xref:customize-users[])
+
.Note:
The difference between post-build scripts (above) and fakeroot scripts,
is that post-build scripts are not called in the fakeroot context.
+
.Note:
Using `fakeroot` is not an absolute substitute for actually being root.
`fakeroot` only ever fakes the file access rights and types (regular,
block-or-char device...) and uid/gid; these are emulated in-memory.
include::customize-device-permission-tables.txt[]

View File

@@ -1,18 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[customize-users]]
=== Adding custom user accounts
Sometimes it is needed to add specific users in the target system.
To cover this requirement, Buildroot provides support for so-called
_users tables_. To use this feature, set config option
+BR2_ROOTFS_USERS_TABLES+ to a space-separated list of users tables,
regular text files following the xref:makeuser-syntax[makeusers syntax].
As shown in xref:customize-dir-structure[], the recommended location for
such files is +board/<company>/<boardname>/+.
It should be noted that if the custom users are related to a specific
application, you should set variable +FOO_USERS+ in the package's +.mk+
file instead (see xref:generic-package-reference[]).

View File

@@ -1,60 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[customize]]
== Project-specific customization
Typical actions you may need to perform for a given project are:
* configuring Buildroot (including build options and toolchain,
bootloader, kernel, package and filesystem image type selection)
* configuring other components, like the Linux kernel and BusyBox
* customizing the generated target filesystem
** adding or overwriting files on the target filesystem (using
+BR2_ROOTFS_OVERLAY+)
** modifying or deleting files on the target filesystem (using
+BR2_ROOTFS_POST_BUILD_SCRIPT+)
** running arbitrary commands prior to generating the filesystem image
(using +BR2_ROOTFS_POST_BUILD_SCRIPT+)
** setting file permissions and ownership (using
+BR2_ROOTFS_DEVICE_TABLE+)
** adding custom devices nodes (using
+BR2_ROOTFS_STATIC_DEVICE_TABLE+)
* adding custom user accounts (using +BR2_ROOTFS_USERS_TABLES+)
* running arbitrary commands after generating the filesystem image
(using +BR2_ROOTFS_POST_IMAGE_SCRIPT+)
* adding project-specific patches to some packages (using
+BR2_GLOBAL_PATCH_DIR+)
* adding project-specific packages
An important note regarding such 'project-specific' customizations:
please carefully consider which changes are indeed project-specific and
which changes are also useful to developers outside your project. The
Buildroot community highly recommends and encourages the upstreaming of
improvements, packages and board support to the official Buildroot
project. Of course, it is sometimes not possible or desirable to
upstream because the changes are highly specific or proprietary.
This chapter describes how to make such project-specific customizations
in Buildroot and how to store them in a way that you can build the same
image in a reproducible way, even after running 'make clean'. By
following the recommended strategy, you can even use the same Buildroot
tree to build multiple distinct projects!
include::customize-directory-structure.txt[]
include::customize-outside-br.txt[]
include::customize-configuration.txt[]
include::customize-rootfs.txt[]
include::customize-users-tables.txt[]
include::customize-post-image.txt[]
include::customize-patches.txt[]
include::customize-packages.txt[]
include::customize-quick-guide.txt[]

View File

@@ -1,43 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[debugging-buildroot]]
== Debugging Buildroot
It is possible to instrument the steps +Buildroot+ does when building
packages. Define the variable +BR2_INSTRUMENTATION_SCRIPTS+ to contain
the path of one or more scripts (or other executables), in a
space-separated list, you want called before and after each step. The
scripts are called in sequence, with three parameters:
- +start+ or +end+ to denote the start (resp. the end) of a step;
- the name of the step about to be started, or which just ended;
- the name of the package.
For example :
----
make BR2_INSTRUMENTATION_SCRIPTS="/path/to/my/script1 /path/to/my/script2"
----
The list of steps is:
- +extract+
- +patch+
- +configure+
- +build+
- +install-host+, when a host-package is installed in +$(HOST_DIR)+
- +install-target+, when a target-package is installed in +$(TARGET_DIR)+
- +install-staging+, when a target-package is installed in +$(STAGING_DIR)+
- +install-image+, when a target-package installs files in +$(BINARIES_DIR)+
The script has access to the following variables:
- +BR2_CONFIG+: the path to the Buildroot .config file
- +HOST_DIR+, +STAGING_DIR+, +TARGET_DIR+: see
xref:generic-package-reference[]
- +BUILD_DIR+: the directory where packages are extracted and built
- +BINARIES_DIR+: the place where all binary files (aka images) are
stored
- +BASE_DIR+: the base output directory

View File

@@ -1,49 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[DEVELOPERS]]
== DEVELOPERS file and get-developers
The main Buildroot directory contains a file named +DEVELOPERS+ that
lists the developers involved with various areas of Buildroot. Thanks
to this file, the +get-developers+ tool allows to:
- Calculate the list of developers to whom patches should be sent, by
parsing the patches and matching the modified files with the
relevant developers. See xref:submitting-patches[] for details.
- Find which developers are taking care of a given architecture or
package, so that they can be notified when a build failure occurs on
this architecture or package. This is done in interaction with
Buildroot's autobuild infrastructure.
We ask developers adding new packages, new boards, or generally new
functionality in Buildroot, to register themselves in the +DEVELOPERS+
file. As an example, we expect a developer contributing a new package
to include in his patch the appropriate modification to the
+DEVELOPERS+ file.
The +DEVELOPERS+ file format is documented in detail inside the file
itself.
The +get-developers+ tool, located in +utils/+ allows to use
the +DEVELOPERS+ file for various tasks:
- When passing one or several patches as command line argument,
+get-developers+ will return the appropriate +git send-email+
command. If the +-e+ option is passed, only the email addresses are
printed in a format suitable for +git send-email --cc-cmd+.
- When using the +-a <arch>+ command line option, +get-developers+ will
return the list of developers in charge of the given architecture.
- When using the +-p <package>+ command line option, +get-developers+
will return the list of developers in charge of the given package.
- When using the +-c+ command line option, +get-developers+ will look
at all files under version control in the Buildroot repository, and
list the ones that are not handled by any developer. The purpose of
this option is to help completing the +DEVELOPERS+ file.
- When using without any arguments, it validates the integrity of the
DEVELOPERS file and will note WARNINGS for items that don't match.

View File

@@ -1,8 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[download-infra]]
== Download infrastructure
TODO

View File

@@ -1,27 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[download-location]]
==== Location of downloaded packages
The various tarballs that are downloaded by Buildroot are all stored
in +BR2_DL_DIR+, which by default is the +dl+ directory. If you want
to keep a complete version of Buildroot which is known to be working
with the associated tarballs, you can make a copy of this directory.
This will allow you to regenerate the toolchain and the target
filesystem with exactly the same versions.
If you maintain several Buildroot trees, it might be better to have a
shared download location. This can be achieved by pointing the
+BR2_DL_DIR+ environment variable to a directory. If this is
set, then the value of +BR2_DL_DIR+ in the Buildroot configuration is
overridden. The following line should be added to +<~/.bashrc>+.
-----------------
export BR2_DL_DIR=<shared download location>
-----------------
The download location can also be set in the +.config+ file, with the
+BR2_DL_DIR+ option. Unlike most options in the .config file, this value
is overridden by the +BR2_DL_DIR+ environment variable.

View File

@@ -1,30 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
=== Integration with Eclipse
While a part of the embedded Linux developers like classical text
editors like Vim or Emacs, and command-line based interfaces, a number
of other embedded Linux developers like richer graphical interfaces to
do their development work. Eclipse being one of the most popular
Integrated Development Environment, Buildroot integrates with Eclipse
in order to ease the development work of Eclipse users.
Our integration with Eclipse simplifies the compilation, remote
execution and remote debugging of applications and libraries that are
built on top of a Buildroot system. It does not integrate the
Buildroot configuration and build processes themselves with
Eclipse. Therefore, the typical usage model of our Eclipse integration
would be:
* Configure your Buildroot system with +make menuconfig+, +make
xconfig+ or any other configuration interface provided with
Buildroot.
* Build your Buildroot system by running +make+.
* Start Eclipse to develop, execute and debug your own custom
applications and libraries, that will rely on the libraries built
and installed by Buildroot.
The Buildroot Eclipse integration installation process and usage is
described in detail at
https://github.com/mbats/eclipse-buildroot-bundle/wiki.

View File

@@ -1,244 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
== Frequently Asked Questions & Troubleshooting
[[faq-boot-hang-after-starting]]
=== The boot hangs after 'Starting network...'
If the boot process seems to hang after the following messages
(messages not necessarily exactly similar, depending on the list of
packages selected):
------------------------
Freeing init memory: 3972K
Initializing random number generator... done.
Starting network...
Starting dropbear sshd: generating rsa key... generating dsa key... OK
------------------------
then it means that your system is running, but didn't start a shell on
the serial console. In order to have the system start a shell on your
serial console, you have to go into the Buildroot configuration, in
+System configuration+, modify +Run a getty (login prompt) after boot+
and set the appropriate port and baud rate in the +getty options+
submenu. This will automatically tune the +/etc/inittab+ file of the
generated system so that a shell starts on the correct serial port.
[[faq-no-compiler-on-target]]
=== Why is there no compiler on the target?
It has been decided that support for the _native compiler on the
target_ would be stopped from the Buildroot-2012.11 release because:
* this feature was neither maintained nor tested, and often broken;
* this feature was only available for Buildroot toolchains;
* Buildroot mostly targets _small_ or _very small_ target hardware
with limited resource onboard (CPU, ram, mass-storage), for which
compiling on the target does not make much sense;
* Buildroot aims at easing the cross-compilation, making native
compilation on the target unnecessary.
If you need a compiler on your target anyway, then Buildroot is not
suitable for your purpose. In such case, you need a _real
distribution_ and you should opt for something like:
* http://www.openembedded.org[openembedded]
* https://www.yoctoproject.org[yocto]
* http://www.emdebian.org[emdebian]
* https://fedoraproject.org/wiki/Architectures[Fedora]
* http://en.opensuse.org/Portal:ARM[openSUSE ARM]
* http://archlinuxarm.org[Arch Linux ARM]
* ...
[[faq-no-dev-files-on-target]]
=== Why are there no development files on the target?
Since there is no compiler available on the target (see
xref:faq-no-compiler-on-target[]), it does not make sense to waste
space with headers or static libraries.
Therefore, those files are always removed from the target since the
Buildroot-2012.11 release.
[[faq-no-doc-on-target]]
=== Why is there no documentation on the target?
Because Buildroot mostly targets _small_ or _very small_ target
hardware with limited resource onboard (CPU, ram, mass-storage), it
does not make sense to waste space with the documentation data.
If you need documentation data on your target anyway, then Buildroot
is not suitable for your purpose, and you should look for a _real
distribution_ (see: xref:faq-no-compiler-on-target[]).
[[faq-why-not-visible-package]]
=== Why are some packages not visible in the Buildroot config menu?
If a package exists in the Buildroot tree and does not appear in the
config menu, this most likely means that some of the package's
dependencies are not met.
To know more about the dependencies of a package, search for the
package symbol in the config menu (see xref:make-tips[]).
Then, you may have to recursively enable several options (which
correspond to the unmet dependencies) to finally be able to select
the package.
If the package is not visible due to some unmet toolchain options,
then you should certainly run a full rebuild (see xref:make-tips[] for
more explanations).
[[faq-why-not-use-target-as-chroot]]
=== Why not use the target directory as a chroot directory?
There are plenty of reasons to *not* use the target directory a chroot
one, among these:
* file ownerships, modes and permissions are not correctly set in the
target directory;
* device nodes are not created in the target directory.
For these reasons, commands run through chroot, using the target
directory as the new root, will most likely fail.
If you want to run the target filesystem inside a chroot, or as an NFS
root, then use the tarball image generated in +images/+ and extract it
as root.
[[faq-no-binary-packages]]
=== Why doesn't Buildroot generate binary packages (.deb, .ipkg...)?
One feature that is often discussed on the Buildroot list is the
general topic of "package management". To summarize, the idea
would be to add some tracking of which Buildroot package installs
what files, with the goals of:
* being able to remove files installed by a package when this package
gets unselected from the menuconfig;
* being able to generate binary packages (ipk or other format) that
can be installed on the target without re-generating a new root
filesystem image.
In general, most people think it is easy to do: just track which package
installed what and remove it when the package is unselected. However, it
is much more complicated than that:
* It is not only about the +target/+ directory, but also the sysroot in
+host/<tuple>/sysroot+ and the +host/+ directory itself. All files
installed in those directories by various packages must be tracked.
* When a package is unselected from the configuration, it is not
sufficient to remove just the files it installed. One must also
remove all its reverse dependencies (i.e. packages relying on it)
and rebuild all those packages. For example, package A depends
optionally on the OpenSSL library. Both are selected, and Buildroot
is built. Package A is built with crypto support using OpenSSL.
Later on, OpenSSL gets unselected from the configuration, but
package A remains (since OpenSSL is an optional dependency, this
is possible.) If only OpenSSL files are removed, then the files
installed by package A are broken: they use a library that is no
longer present on the target. Although this is technically doable,
it adds a lot of complexity to Buildroot, which goes against the
simplicity we try to stick to.
* In addition to the previous problem, there is the case where the
optional dependency is not even known to Buildroot. For example,
package A in version 1.0 never used OpenSSL, but in version 2.0 it
automatically uses OpenSSL if available. If the Buildroot .mk file
hasn't been updated to take this into account, then package A will
not be part of the reverse dependencies of OpenSSL and will not be
removed and rebuilt when OpenSSL is removed. For sure, the .mk file
of package A should be fixed to mention this optional dependency,
but in the mean time, you can have non-reproducible behaviors.
* The request is to also allow changes in the menuconfig to be
applied on the output directory without having to rebuild
everything from scratch. However, this is very difficult to achieve
in a reliable way: what happens when the suboptions of a package
are changed (we would have to detect this, and rebuild the package
from scratch and potentially all its reverse dependencies), what
happens if toolchain options are changed, etc. At the moment, what
Buildroot does is clear and simple so its behaviour is very
reliable and it is easy to support users. If configuration changes
done in menuconfig are applied after the next make, then it has to
work correctly and properly in all situations, and not have some
bizarre corner cases. The risk is to get bug reports like "I have
enabled package A, B and C, then ran make, then disabled package
C and enabled package D and ran make, then re-enabled package C
and enabled package E and then there is a build failure". Or worse
"I did some configuration, then built, then did some changes,
built, some more changes, built, some more changes, built, and now
it fails, but I don't remember all the changes I did and in which
order". This will be impossible to support.
For all these reasons, the conclusion is that adding tracking of
installed files to remove them when the package is unselected, or to
generate a repository of binary packages, is something that is very
hard to achieve reliably and will add a lot of complexity.
On this matter, the Buildroot developers make this position statement:
* Buildroot strives to make it easy to generate a root filesystem (hence
the name, by the way.) That is what we want to make Buildroot good at:
building root filesystems.
* Buildroot is not meant to be a distribution (or rather, a distribution
generator.) It is the opinion of most Buildroot developers that this
is not a goal we should pursue. We believe that there are other tools
better suited to generate a distro than Buildroot is. For example,
http://openembedded.org/[Open Embedded], or https://openwrt.org/[openWRT],
are such tools.
* We prefer to push Buildroot in a direction that makes it easy (or even
easier) to generate complete root filesystems. This is what makes
Buildroot stands out in the crowd (among other things, of course!)
* We believe that for most embedded Linux systems, binary packages are
not necessary, and potentially harmful. When binary packages are
used, it means that the system can be partially upgraded, which
creates an enormous number of possible combinations of package
versions that should be tested before doing the upgrade on the
embedded device. On the other hand, by doing complete system
upgrades by upgrading the entire root filesystem image at once,
the image deployed to the embedded system is guaranteed to really
be the one that has been tested and validated.
[[faq-speeding-up-build]]
=== How to speed-up the build process?
Since Buildroot often involves doing full rebuilds of the entire
system that can be quite long, we provide below a number of tips to
help reduce the build time:
* Use a pre-built external toolchain instead of the default Buildroot
internal toolchain. By using a pre-built Linaro toolchain (on ARM)
or a Sourcery CodeBench toolchain (for ARM, x86, x86-64, MIPS,
etc.), you will save the build time of the toolchain at each
complete rebuild, approximately 15 to 20 minutes. Note that
temporarily using an external toolchain does not prevent you to
switch back to an internal toolchain (that may provide a higher
level of customization) once the rest of your system is working;
* Use the +ccache+ compiler cache (see: xref:ccache[]);
* Learn about rebuilding only the few packages you actually care
about (see xref:rebuild-pkg[]), but beware that sometimes full
rebuilds are anyway necessary (see xref:full-rebuild[]);
* Make sure you are not using a virtual machine for the Linux system
used to run Buildroot. Most of the virtual machine technologies are
known to cause a significant performance impact on I/O, which is
really important for building source code;
* Make sure that you're using only local files: do not attempt to do
a build over NFS, which significantly slows down the build. Having
the Buildroot download folder available locally also helps a bit.
* Buy new hardware. SSDs and lots of RAM are key to speeding up the
builds.
* Experiment with top-level parallel build, see
xref:top-level-parallel-build[].

View File

@@ -1,69 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
== How Buildroot works
As mentioned above, Buildroot is basically a set of Makefiles that
download, configure, and compile software with the correct options. It
also includes patches for various software packages - mainly the ones
involved in the cross-compilation toolchain (+gcc+, +binutils+ and
+uClibc+).
There is basically one Makefile per software package, and they are
named with the +.mk+ extension. Makefiles are split into many different
parts.
* The +toolchain/+ directory contains the Makefiles
and associated files for all software related to the
cross-compilation toolchain: +binutils+, +gcc+, +gdb+,
+kernel-headers+ and +uClibc+.
* The +arch/+ directory contains the definitions for all the processor
architectures that are supported by Buildroot.
* The +package/+ directory contains the Makefiles and
associated files for all user-space tools and libraries that Buildroot
can compile and add to the target root filesystem. There is one
sub-directory per package.
* The +linux/+ directory contains the Makefiles and associated files for
the Linux kernel.
* The +boot/+ directory contains the Makefiles and associated files for
the bootloaders supported by Buildroot.
* The +system/+ directory contains support for system integration, e.g.
the target filesystem skeleton and the selection of an init system.
* The +fs/+ directory contains the Makefiles and
associated files for software related to the generation of the
target root filesystem image.
Each directory contains at least 2 files:
* +something.mk+ is the Makefile that downloads, configures,
compiles and installs the package +something+.
* +Config.in+ is a part of the configuration tool
description file. It describes the options related to the
package.
The main Makefile performs the following steps (once the
configuration is done):
* Create all the output directories: +staging+, +target+, +build+,
etc. in the output directory (+output/+ by default,
another value can be specified using +O=+)
* Generate the toolchain target. When an internal toolchain is used, this
means generating the cross-compilation toolchain. When an external
toolchain is used, this means checking the features of the external
toolchain and importing it into the Buildroot environment.
* Generate all the targets listed in the +TARGETS+ variable. This
variable is filled by all the individual components'
Makefiles. Generating these targets will trigger the compilation of
the userspace packages (libraries, programs), the kernel, the
bootloader and the generation of the root filesystem images,
depending on the configuration.

View File

@@ -1,15 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
== Known issues
* It is not possible to pass extra linker options via +BR2_TARGET_LDFLAGS+
if such options contain a +$+ sign. For example, the following is known
to break: +BR2_TARGET_LDFLAGS="-Wl,-rpath=\'$ORIGIN/../lib'"+
* The +libffi+ package is not supported on the SuperH 2 and ARC
architectures.
* The +prboom+ package triggers a compiler failure with the SuperH 4
compiler from Sourcery CodeBench, version 2012.09.

View File

@@ -1,122 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[legal-info]]
== Legal notice and licensing
=== Complying with open source licenses
All of the end products of Buildroot (toolchain, root filesystem, kernel,
bootloaders) contain open source software, released under various licenses.
Using open source software gives you the freedom to build rich embedded
systems, choosing from a wide range of packages, but also imposes some
obligations that you must know and honour.
Some licenses require you to publish the license text in the documentation of
your product. Others require you to redistribute the source code of the
software to those that receive your product.
The exact requirements of each license are documented in each package, and
it is your responsibility (or that of your legal office) to comply with those
requirements.
To make this easier for you, Buildroot can collect for you some material you
will probably need. To produce this material, after you have configured
Buildroot with +make menuconfig+, +make xconfig+ or +make gconfig+, run:
--------------------
make legal-info
--------------------
Buildroot will collect legally-relevant material in your output directory,
under the +legal-info/+ subdirectory.
There you will find:
* A +README+ file, that summarizes the produced material and contains warnings
about material that Buildroot could not produce.
* +buildroot.config+: this is the Buildroot configuration file that is usually
produced with +make menuconfig+, and which is necessary to reproduce the
build.
* The source code for all packages; this is saved in the +sources/+ and
+host-sources/+ subdirectories for target and host packages respectively.
The source code for packages that set +<PKG>_REDISTRIBUTE = NO+ will not be
saved.
Patches that were applied are also saved, along with a file named +series+
that lists the patches in the order they were applied. Patches are under the
same license as the files that they modify.
Note: Buildroot applies additional patches to Libtool scripts of
autotools-based packages. These patches can be found under
+support/libtool+ in the Buildroot source and, due to technical
limitations, are not saved with the package sources. You may need to
collect them manually.
* A manifest file (one for host and one for target packages) listing the
configured packages, their version, license and related information.
Some of this information might not be defined in Buildroot; such items are
marked as "unknown".
* The license texts of all packages, in the +licenses/+ and +host-licenses/+
subdirectories for target and host packages respectively.
If the license file(s) are not defined in Buildroot, the file is not produced
and a warning in the +README+ indicates this.
Please note that the aim of the +legal-info+ feature of Buildroot is to
produce all the material that is somehow relevant for legal compliance with the
package licenses. Buildroot does not try to produce the exact material that
you must somehow make public. Certainly, more material is produced than is
needed for a strict legal compliance. For example, it produces the source code
for packages released under BSD-like licenses, that you are not required to
redistribute in source form.
Moreover, due to technical limitations, Buildroot does not produce some
material that you will or may need, such as the toolchain source code for
some of the external toolchains and the Buildroot source code itself.
When you run +make legal-info+, Buildroot produces warnings in the +README+
file to inform you of relevant material that could not be saved.
Finally, keep in mind that the output of +make legal-info+ is based on
declarative statements in each of the packages recipes. The Buildroot
developers try to do their best to keep those declarative statements as
accurate as possible, to the best of their knowledge. However, it is very
well possible that those declarative statements are not all fully accurate
nor exhaustive. You (or your legal department) _have_ to check the output
of +make legal-info+ before using it as your own compliance delivery. See
the _NO WARRANTY_ clauses (clauses 11 and 12) in the +COPYING+ file at the
root of the Buildroot distribution.
[[legal-info-buildroot]]
=== Complying with the Buildroot license
Buildroot itself is an open source software, released under the
http://www.gnu.org/licenses/old-licenses/gpl-2.0.html[GNU General
Public License, version 2] or (at your option) any later version, with
the exception of the package patches detailed below.
However, being a build system, it is not normally part of the end product:
if you develop the root filesystem, kernel, bootloader or toolchain for a
device, the code of Buildroot is only present on the development machine, not
in the device storage.
Nevertheless, the general view of the Buildroot developers is that you should
release the Buildroot source code along with the source code of other packages
when releasing a product that contains GPL-licensed software.
This is because the
http://www.gnu.org/licenses/old-licenses/gpl-2.0.html[GNU GPL]
defines the "'complete source code'" for an executable work as "'all the
source code for all modules it contains, plus any associated interface
definition files, plus the scripts used to control compilation and installation
of the executable'".
Buildroot is part of the 'scripts used to control compilation and
installation of the executable', and as such it is considered part of the
material that must be redistributed.
Keep in mind that this is only the Buildroot developers' opinion, and you
should consult your legal department or lawyer in case of any doubt.
==== Patches to packages
Buildroot also bundles patch files, which are applied to the sources
of the various packages. Those patches are not covered by the license
of Buildroot. Instead, they are covered by the license of the software
to which the patches are applied. When said software is available
under multiple licenses, the Buildroot patches are only provided under
the publicly accessible licenses.
See xref:patch-policy[] for the technical details.

View File

@@ -1,133 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[make-tips]]
=== 'make' tips
This is a collection of tips that help you make the most of Buildroot.
.Display all commands executed by make:
--------------------
$ make V=1 <target>
--------------------
.Display the list of boards with a defconfig:
--------------------
$ make list-defconfigs
--------------------
.Display all available targets:
--------------------
$ make help
--------------------
Not all targets are always available,
some settings in the +.config+ file may hide some targets:
* +busybox-menuconfig+ only works when +busybox+ is enabled;
* +linux-menuconfig+ and +linux-savedefconfig+ only work when
+linux+ is enabled;
* +uclibc-menuconfig+ is only available when the uClibc C library is
selected in the internal toolchain backend;
* +barebox-menuconfig+ and +barebox-savedefconfig+ only work when the
+barebox+ bootloader is enabled.
* +uboot-menuconfig+ and +uboot-savedefconfig+ only work when the
+U-Boot+ bootloader is enabled.
.Cleaning:
Explicit cleaning is required when any of the architecture or toolchain
configuration options are changed.
To delete all build products (including build directories, host, staging
and target trees, the images and the toolchain):
--------------------
$ make clean
--------------------
.Generating the manual:
The present manual sources are located in the 'docs/manual' directory.
To generate the manual:
---------------------------------
$ make manual-clean
$ make manual
---------------------------------
The manual outputs will be generated in 'output/docs/manual'.
.Notes
- A few tools are required to build the documentation (see:
xref:requirement-optional[]).
.Resetting Buildroot for a new target:
To delete all build products as well as the configuration:
--------------------
$ make distclean
--------------------
.Notes
If +ccache+ is enabled, running +make clean+ or +distclean+ does
not empty the compiler cache used by Buildroot. To delete it, refer
to xref:ccache[].
.Dumping the internal make variables:
One can dump the variables known to make, along with their values:
----
$ make -s printvars VARS='VARIABLE1 VARIABLE2'
VARIABLE1=value_of_variable
VARIABLE2=value_of_variable
----
It is possible to tweak the output using some variables:
- +VARS+ will limit the listing to variables which names match the
specified make-patterns - this must be set else nothing is printed
- +QUOTED_VARS+, if set to +YES+, will single-quote the value
- +RAW_VARS+, if set to +YES+, will print the unexpanded value
For example:
----
$ make -s printvars VARS=BUSYBOX_%DEPENDENCIES
BUSYBOX_DEPENDENCIES=skeleton toolchain
BUSYBOX_FINAL_ALL_DEPENDENCIES=skeleton toolchain
BUSYBOX_FINAL_DEPENDENCIES=skeleton toolchain
BUSYBOX_FINAL_PATCH_DEPENDENCIES=
BUSYBOX_RDEPENDENCIES=ncurses util-linux
----
----
$ make -s printvars VARS=BUSYBOX_%DEPENDENCIES QUOTED_VARS=YES
BUSYBOX_DEPENDENCIES='skeleton toolchain'
BUSYBOX_FINAL_ALL_DEPENDENCIES='skeleton toolchain'
BUSYBOX_FINAL_DEPENDENCIES='skeleton toolchain'
BUSYBOX_FINAL_PATCH_DEPENDENCIES=''
BUSYBOX_RDEPENDENCIES='ncurses util-linux'
----
----
$ make -s printvars VARS=BUSYBOX_%DEPENDENCIES RAW_VARS=YES
BUSYBOX_DEPENDENCIES=skeleton toolchain
BUSYBOX_FINAL_ALL_DEPENDENCIES=$(sort $(BUSYBOX_FINAL_DEPENDENCIES) $(BUSYBOX_FINAL_PATCH_DEPENDENCIES))
BUSYBOX_FINAL_DEPENDENCIES=$(sort $(BUSYBOX_DEPENDENCIES))
BUSYBOX_FINAL_PATCH_DEPENDENCIES=$(sort $(BUSYBOX_PATCH_DEPENDENCIES))
BUSYBOX_RDEPENDENCIES=ncurses util-linux
----
The output of quoted variables can be reused in shell scripts, for example:
----
$ eval $(make -s printvars VARS=BUSYBOX_DEPENDENCIES QUOTED_VARS=YES)
$ echo $BUSYBOX_DEPENDENCIES
skeleton toolchain
----

View File

@@ -1,103 +0,0 @@
// -*- mode:doc; -*-
// vim: set syntax=asciidoc:
[[makedev-syntax]]
== Makedev syntax documentation
The makedev syntax is used in several places in Buildroot to
define changes to be made for permissions, or which device files to
create and how to create them, in order to avoid calls to mknod.
This syntax is derived from the makedev utility, and more complete
documentation can be found in the +package/makedevs/README+ file.
It takes the form of a space separated list of fields, one file per
line; the fields are:
|===========================================================
|name |type |mode |uid |gid |major |minor |start |inc |count
|===========================================================
There are a few non-trivial blocks:
- +name+ is the path to the file you want to create/modify
- +type+ is the type of the file, being one of:
* f: a regular file
* d: a directory
* r: a directory recursively
* c: a character device file
* b: a block device file
* p: a named pipe
- +mode+ are the usual permissions settings (only numerical values
are allowed)
- +uid+ and +gid+ are the UID and GID to set on this file; can be
either numerical values or actual names
- +major+ and +minor+ are here for device files, set to +-+ for other
files
- +start+, +inc+ and +count+ are for when you want to create a batch
of files, and can be reduced to a loop, beginning at +start+,
incrementing its counter by +inc+ until it reaches +count+
Let's say you want to change the permissions of a given file; using
this syntax, you will need to write:
----
/usr/bin/foo f 755 0 0 - - - - -
/usr/bin/bar f 755 root root - - - - -
/data/buz f 644 buz-user buz-group - - - - -
----
Alternatively, if you want to change owner/permission of a directory
recursively, you can write (to set UID to foo, GID to bar and access
rights to rwxr-x--- for the directory /usr/share/myapp and all files
and directories below it):
----
/usr/share/myapp r 750 foo bar - - - - -
----
On the other hand, if you want to create the device file +/dev/hda+
and the corresponding 15 files for the partitions, you will need for
+/dev/hda+:
----
/dev/hda b 640 root root 3 0 0 0 -
----
and then for device files corresponding to the partitions of
+/dev/hda+, +/dev/hdaX+, +X+ ranging from 1 to 15:
----
/dev/hda b 640 root root 3 1 1 1 15
----
Extended attributes are supported if
+BR2_ROOTFS_DEVICE_TABLE_SUPPORTS_EXTENDED_ATTRIBUTES+ is enabled.
This is done by adding a line starting with +|xattr+ after
the line describing the file. Right now, only capability
is supported as extended attribute.
|=====================
| \|xattr | capability
|=====================
- +|xattr+ is a "flag" that indicate an extended attribute
- +capability+ is a capability to add to the previous file
If you want to add the capability cap_sys_admin to the binary foo,
you will write :
----
/usr/bin/foo f 755 root root - - - - -
|xattr cap_sys_admin+eip
----
You can add several capabilities to a file by using several +|xattr+ lines.
If you want to add the capability cap_sys_admin and cap_net_admin to the
binary foo, you will write :
----
/usr/bin/foo f 755 root root - - - - -
|xattr cap_sys_admin+eip
|xattr cap_net_admin+eip
----

View File

@@ -1,89 +0,0 @@
// -*- mode:doc -*- ;
// vim: set syntax=asciidoc:
[[makeuser-syntax]]
== Makeusers syntax documentation
The syntax to create users is inspired by the makedev syntax, above, but
is specific to Buildroot.
The syntax for adding a user is a space-separated list of fields, one
user per line; the fields are:
|=================================================================
|username |uid |group |gid |password |home |shell |groups |comment
|=================================================================
Where:
- +username+ is the desired user name (aka login name) for the user.
It can not be +root+, and must be unique. If set to +-+, then just a
group will be created.
- +uid+ is the desired UID for the user. It must be unique, and not
+0+. If set to +-1+, then a unique UID will be computed by Buildroot
in the range [1000...1999]
- +group+ is the desired name for the user's main group. It can not
be +root+. If the group does not exist, it will be created.
- +gid+ is the desired GID for the user's main group. It must be unique,
and not +0+. If set to +-1+, and the group does not already exist, then
a unique GID will be computed by Buildroot in the range [1000..1999]
- +password+ is the crypt(3)-encoded password. If prefixed with +!+,
then login is disabled. If prefixed with +=+, then it is interpreted
as clear-text, and will be crypt-encoded (using MD5). If prefixed with
+!=+, then the password will be crypt-encoded (using MD5) and login
will be disabled. If set to +*+, then login is not allowed. If set to
+-+, then no password value will be set.
- +home+ is the desired home directory for the user. If set to '-', no
home directory will be created, and the user's home will be +/+.
Explicitly setting +home+ to +/+ is not allowed.
- +shell+ is the desired shell for the user. If set to +-+, then
+/bin/false+ is set as the user's shell.
- +groups+ is the comma-separated list of additional groups the user
should be part of. If set to +-+, then the user will be a member of
no additional group. Missing groups will be created with an arbitrary
+gid+.
- +comment+ (aka https://en.wikipedia.org/wiki/Gecos_field[GECOS]
field) is an almost-free-form text.
There are a few restrictions on the content of each field:
* except for +comment+, all fields are mandatory.
* except for +comment+, fields may not contain spaces.
* no field may contain a colon (+:+).
If +home+ is not +-+, then the home directory, and all files below,
will belong to the user and its main group.
Examples:
----
foo -1 bar -1 !=blabla /home/foo /bin/sh alpha,bravo Foo user
----
This will create this user:
- +username+ (aka login name) is: +foo+
- +uid+ is computed by Buildroot
- main +group+ is: +bar+
- main group +gid+ is computed by Buildroot
- clear-text +password+ is: +blabla+, will be crypt(3)-encoded, and login is disabled.
- +home+ is: +/home/foo+
- +shell+ is: +/bin/sh+
- +foo+ is also a member of +groups+: +alpha+ and +bravo+
- +comment+ is: +Foo user+
----
test 8000 wheel -1 = - /bin/sh - Test user
----
This will create this user:
- +username+ (aka login name) is: +test+
- +uid+ is : +8000+
- main +group+ is: +wheel+
- main group +gid+ is computed by Buildroot, and will use the value defined in the rootfs skeleton
- +password+ is empty (aka no password).
- +home+ is +/+ but will not belong to +test+
- +shell+ is: +/bin/sh+
- +test+ is not a member of any additional +groups+
- +comment+ is: +Test user+

Some files were not shown because too many files have changed in this diff Show More