Browse Source

lm32: add patch for kernel and elf2flt

Waldemar Brodkorb 6 years ago
parent
commit
728149c0fc

+ 190 - 0
target/linux/patches/6a2f2e4198eaff63ee75f6085ce9f966c47b4441/new-gcc.patch

@@ -0,0 +1,190 @@
+diff -Nur linux-6a2f2e4198eaff63ee75f6085ce9f966c47b4441.orig/include/linux/compiler-gcc.h linux-6a2f2e4198eaff63ee75f6085ce9f966c47b4441/include/linux/compiler-gcc.h
+--- linux-6a2f2e4198eaff63ee75f6085ce9f966c47b4441.orig/include/linux/compiler-gcc.h	2017-09-27 06:14:10.000000000 +0200
++++ linux-6a2f2e4198eaff63ee75f6085ce9f966c47b4441/include/linux/compiler-gcc.h	2017-09-27 20:01:29.488190516 +0200
+@@ -6,6 +6,10 @@
+  * Common definitions for all gcc versions go here.
+  */
+ 
++#define GCC_VERSION (__GNUC__ * 10000		\
++		     + __GNUC_MINOR__ * 100	\
++		     + __GNUC_PATCHLEVEL__)
++
+ 
+ /* Optimization barrier */
+ /* The "volatile" is due to gcc bugs */
+@@ -94,14 +98,6 @@
+ #define __maybe_unused			__attribute__((unused))
+ #define __always_unused			__attribute__((unused))
+ 
+-#define __gcc_header(x) #x
+-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
+-#define gcc_header(x) _gcc_header(x)
+-#include gcc_header(__GNUC__)
+-
+-#if !defined(__noclone)
+-#define __noclone	/* not needed */
+-#endif
+ 
+ /*
+  * A trick to suppress uninitialized variable warning without generating any
+@@ -110,3 +106,160 @@
+ #define uninitialized_var(x) x = x
+ 
+ #define __always_inline		inline __attribute__((always_inline))
++
++/* gcc version specific checks */
++
++#if GCC_VERSION < 30200
++# error Sorry, your compiler is too old - please upgrade it.
++#endif
++
++#if GCC_VERSION < 30300
++# define __used			__attribute__((__unused__))
++#else
++# define __used			__attribute__((__used__))
++#endif
++
++#ifdef CONFIG_GCOV_KERNEL
++# if GCC_VERSION < 30400
++#   error "GCOV profiling support for gcc versions below 3.4 not included"
++# endif /* __GNUC_MINOR__ */
++#endif /* CONFIG_GCOV_KERNEL */
++
++#if GCC_VERSION >= 30400
++#define __must_check		__attribute__((warn_unused_result))
++#endif
++
++#if GCC_VERSION >= 40000
++
++/* GCC 4.1.[01] miscompiles __weak */
++#ifdef __KERNEL__
++# if GCC_VERSION >= 40100 &&  GCC_VERSION <= 40101
++#  error Your version of gcc miscompiles the __weak directive
++# endif
++#endif
++
++#define __used			__attribute__((__used__))
++#define __compiler_offsetof(a, b)					\
++	__builtin_offsetof(a, b)
++
++#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
++# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
++#endif
++
++#if GCC_VERSION >= 40300
++/* Mark functions as cold. gcc will assume any path leading to a call
++ * to them will be unlikely.  This means a lot of manual unlikely()s
++ * are unnecessary now for any paths leading to the usual suspects
++ * like BUG(), printk(), panic() etc. [but let's keep them for now for
++ * older compilers]
++ *
++ * Early snapshots of gcc 4.3 don't support this and we can't detect this
++ * in the preprocessor, but we can live with this because they're unreleased.
++ * Maketime probing would be overkill here.
++ *
++ * gcc also has a __attribute__((__hot__)) to move hot functions into
++ * a special section, but I don't see any sense in this right now in
++ * the kernel context
++ */
++#define __cold			__attribute__((__cold__))
++
++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
++
++#ifndef __CHECKER__
++# define __compiletime_warning(message) __attribute__((warning(message)))
++# define __compiletime_error(message) __attribute__((error(message)))
++#endif /* __CHECKER__ */
++#endif /* GCC_VERSION >= 40300 */
++
++#if GCC_VERSION >= 40500
++/*
++ * Mark a position in code as unreachable.  This can be used to
++ * suppress control flow warnings after asm blocks that transfer
++ * control elsewhere.
++ *
++ * Early snapshots of gcc 4.5 don't support this and we can't detect
++ * this in the preprocessor, but we can live with this because they're
++ * unreleased.  Really, we need to have autoconf for the kernel.
++ */
++#define unreachable() __builtin_unreachable()
++
++/* Mark a function definition as prohibited from being cloned. */
++#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
++
++#endif /* GCC_VERSION >= 40500 */
++
++#if GCC_VERSION >= 40600
++/*
++ * When used with Link Time Optimization, gcc can optimize away C functions or
++ * variables which are referenced only from assembly code.  __visible tells the
++ * optimizer that something else uses this function or variable, thus preventing
++ * this.
++ */
++#define __visible	__attribute__((externally_visible))
++#endif
++
++
++#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
++/*
++ * __assume_aligned(n, k): Tell the optimizer that the returned
++ * pointer can be assumed to be k modulo n. The second argument is
++ * optional (default 0), so we use a variadic macro to make the
++ * shorthand.
++ *
++ * Beware: Do not apply this to functions which may return
++ * ERR_PTRs. Also, it is probably unwise to apply it to functions
++ * returning extra information in the low bits (but in that case the
++ * compiler should see some alignment anyway, when the return value is
++ * massaged by 'flags = ptr & 3; ptr &= ~3;').
++ */
++#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
++#endif
++
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
++
++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
++#if GCC_VERSION >= 40400
++#define __HAVE_BUILTIN_BSWAP32__
++#define __HAVE_BUILTIN_BSWAP64__
++#endif
++#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
++#define __HAVE_BUILTIN_BSWAP16__
++#endif
++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
++
++#if GCC_VERSION >= 70000
++#define KASAN_ABI_VERSION 5
++#elif GCC_VERSION >= 50000
++#define KASAN_ABI_VERSION 4
++#elif GCC_VERSION >= 40902
++#define KASAN_ABI_VERSION 3
++#endif
++
++#if GCC_VERSION >= 40902
++/*
++ * Tell the compiler that address safety instrumentation (KASAN)
++ * should not be applied to that function.
++ * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
++ */
++#define __no_sanitize_address __attribute__((no_sanitize_address))
++#endif
++
++#endif	/* gcc version >= 40000 specific checks */
++
++#if !defined(__noclone)
++#define __noclone	/* not needed */
++#endif
++
++#if !defined(__no_sanitize_address)
++#define __no_sanitize_address
++#endif
++

+ 12 - 0
target/linux/patches/6a2f2e4198eaff63ee75f6085ce9f966c47b4441/perl-fix.patch

@@ -0,0 +1,12 @@
+diff -Nur linux-1d008423ab5fd12459f53342e4d17585ec63cfe4.orig/kernel/timeconst.pl linux-1d008423ab5fd12459f53342e4d17585ec63cfe4/kernel/timeconst.pl
+--- linux-1d008423ab5fd12459f53342e4d17585ec63cfe4.orig/kernel/timeconst.pl	2016-12-22 18:05:10.000000000 +0100
++++ linux-1d008423ab5fd12459f53342e4d17585ec63cfe4/kernel/timeconst.pl	2016-12-22 18:42:55.573173008 +0100
+@@ -370,7 +370,7 @@
+ 	}
+ 
+ 	@val = @{$canned_values{$hz}};
+-	if (!defined(@val)) {
++	if (!@val) {
+ 		@val = compute_values($hz);
+ 	}
+ 	output($hz, @val);

+ 159 - 0
toolchain/elf2flt/patches/6d80ab6c93409e796f85da404bde84b841231531/0001-lm32.patch

@@ -0,0 +1,159 @@
+diff -Nur elf2flt-6d80ab6c93409e796f85da404bde84b841231531.orig/elf2flt.c elf2flt-6d80ab6c93409e796f85da404bde84b841231531/elf2flt.c
+--- elf2flt-6d80ab6c93409e796f85da404bde84b841231531.orig/elf2flt.c	2017-09-27 06:06:04.000000000 +0200
++++ elf2flt-6d80ab6c93409e796f85da404bde84b841231531/elf2flt.c	2017-09-27 06:07:51.032597579 +0200
+@@ -61,6 +61,8 @@
+ #include <elf/bfin.h>
+ #elif defined(TARGET_h8300)
+ #include <elf/h8.h>
++#elif defined(TARGET_lm32)
++#include <elf/lm32.h>
+ #elif defined(TARGET_m68k)
+ #include <elf/m68k.h>
+ #elif defined(TARGET_microblaze)
+@@ -120,6 +122,11 @@
+ #define ARCH	"nios"
+ #elif defined(TARGET_nios2)
+ #define ARCH	"nios2"
++#elif defined(TARGET_lm32)
++#define ARCH	"lm32"
++#define FLAT_LM32_RELOC_TYPE_32_BIT   0
++#define FLAT_LM32_RELOC_TYPE_HI16_BIT 1
++#define FLAT_LM32_RELOC_TYPE_LO16_BIT 2
+ #elif defined(TARGET_xtensa)
+ #define ARCH	"xtensa"
+ #else
+@@ -357,7 +364,7 @@
+   int			bad_relocs = 0;
+   asymbol		**symb;
+   long			nsymb;
+-#ifdef TARGET_bfin
++#if defined (TARGET_bfin) || defined (TARGET_lm32)
+   unsigned long		persistent_data = 0;
+ #endif
+   
+@@ -682,6 +689,36 @@
+ 					break;
+ 				default:
+ 					goto bad_resolved_reloc;
++#elif defined(TARGET_lm32)
++				case R_LM32_HI16:
++				case R_LM32_LO16:
++					if (q->howto->type == R_LM32_HI16) {
++						pflags = FLAT_LM32_RELOC_TYPE_HI16_BIT << 29;
++					} else {
++						pflags = FLAT_LM32_RELOC_TYPE_LO16_BIT << 29;
++					}
++
++					relocation_needed = 1;
++
++					/* remember the upper 16 bits */
++				    if ((0xffff0000 & sym_addr) != persistent_data) {
++						flat_relocs = (uint32_t *)
++							(realloc (flat_relocs, (flat_reloc_count + 1) * sizeof (uint32_t)));
++						if (verbose)
++							printf ("New persistent data for %08lx\n", sym_addr);
++						persistent_data = 0xffff0000 & sym_addr;
++						flat_relocs[flat_reloc_count++] = (sym_addr >> 16) | (3 << 29);
++					}
++					break;
++				case R_LM32_32:
++					pflags = FLAT_LM32_RELOC_TYPE_32_BIT << 29;
++					relocation_needed = 1;
++					break;
++				case R_LM32_CALL:
++					relocation_needed = 0;
++					break;
++				default:
++					goto bad_resolved_reloc;
+ #elif defined(TARGET_m68k)
+ 				case R_68K_32:
+ 					goto good_32bit_resolved_reloc;
+@@ -1459,6 +1496,63 @@
+ #undef _30BITS_RELOC
+ #undef _28BITS_RELOC
+ #endif
++#ifdef TARGET_lm32
++				case R_LM32_32:
++				{
++					pflags = FLAT_LM32_RELOC_TYPE_32_BIT << 29;
++					sym_vma = bfd_section_vma(abs_bfd, sym_section);
++					sym_addr += sym_vma + q->addend;
++					relocation_needed = 1;
++					break;
++				}
++				case R_LM32_CALL:
++				{
++					sym_vma = 0;
++					sym_addr += sym_vma + q->addend;
++					sym_addr -= q->address;
++					sym_addr = (int32_t)sym_addr >> q->howto->rightshift;
++
++					if ((int32_t)sym_addr < -0x8000000 || (int32_t)sym_addr > 0x7ffffff) {
++						printf("ERROR: Relocation overflow for R_LM32_CALL relocation against %s\n", sym_name);
++						bad_relocs++;
++						continue;
++					}
++
++					r_mem[0] |= (sym_addr >> 24) & 0x03;
++					r_mem[1] = (sym_addr >> 16) & 0xff;
++					r_mem[2] = (sym_addr >> 8) & 0xff;
++					r_mem[3] = sym_addr & 0xff;
++					break;
++				}
++				case R_LM32_HI16:
++				case R_LM32_LO16:
++				{
++					if (q->howto->type == R_LM32_HI16) {
++						pflags = FLAT_LM32_RELOC_TYPE_HI16_BIT << 29;
++					} else {
++						pflags = FLAT_LM32_RELOC_TYPE_LO16_BIT << 29;
++					}
++
++					sym_vma = bfd_section_vma(abs_bfd, sym_section);
++					sym_addr += sym_vma + q->addend;
++
++					relocation_needed = 1;
++
++					/* remember the upper 16 bits */
++				    if ((0xffff0000 & sym_addr) != persistent_data) {
++						flat_relocs = (uint32_t *)
++							(realloc (flat_relocs, (flat_reloc_count + 1) * sizeof (uint32_t)));
++						if (verbose)
++							printf ("New persistent data for %08lx\n", sym_addr);
++						persistent_data = 0xffff0000 & sym_addr;
++						flat_relocs[flat_reloc_count++] = (sym_addr >> 16) | (3 << 29);
++					}
++
++					r_mem[2] = (sym_addr >> 8) & 0xff;
++					r_mem[3] = sym_addr & 0xff;
++					break;
++				}
++#endif /* TARGET_lm32 */
+ 				default:
+ 					/* missing support for other types of relocs */
+ 					printf("ERROR: bad reloc type %d\n", (*p)->howto->type);
+@@ -1596,6 +1690,13 @@
+ 					break;
+ #endif
+ 
++#ifdef TARGET_lm32
++				case R_LM32_HI16:
++				case R_LM32_LO16:
++				case R_LM32_CALL:
++					/* entry has already been written */
++					break;
++#endif
+ 				default:
+ 					/* The alignment of the build host
+ 					   might be stricter than that of the
+diff -Nur elf2flt-6d80ab6c93409e796f85da404bde84b841231531.orig/elf2flt.ld.in elf2flt-6d80ab6c93409e796f85da404bde84b841231531/elf2flt.ld.in
+--- elf2flt-6d80ab6c93409e796f85da404bde84b841231531.orig/elf2flt.ld.in	2017-09-27 06:06:04.000000000 +0200
++++ elf2flt-6d80ab6c93409e796f85da404bde84b841231531/elf2flt.ld.in	2017-09-29 18:11:30.999698955 +0200
+@@ -34,6 +34,7 @@
+ W_RODAT		*(.rodata1)
+ W_RODAT		*(.rodata.*)
+ W_RODAT		*(.gnu.linkonce.r*)
++W_RODAT		*(.rofixup)
+ 
+ 		/* .ARM.extab name sections containing exception unwinding information */
+ 		*(.ARM.extab* .gnu.linkonce.armextab.*)