|
@@ -0,0 +1,9424 @@
|
|
|
+diff -Nur linux-4.6.2/arch/.gitignore linux-4.6.2.riscv/arch/.gitignore
|
|
|
+--- linux-4.6.2/arch/.gitignore 2016-06-08 03:23:53.000000000 +0200
|
|
|
++++ linux-4.6.2.riscv/arch/.gitignore 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -1,2 +1,3 @@
|
|
|
+-i386
|
|
|
+-x86_64
|
|
|
++# In ../, we ignored everything, so suppress that.
|
|
|
++!riscv/
|
|
|
++
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/configs/riscv64_qemu linux-4.6.2.riscv/arch/riscv/configs/riscv64_qemu
|
|
|
+--- linux-4.6.2/arch/riscv/configs/riscv64_qemu 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/configs/riscv64_qemu 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -0,0 +1,64 @@
|
|
|
++# CONFIG_COMPACTION is not set
|
|
|
++# CONFIG_CROSS_MEMORY_ATTACH is not set
|
|
|
++CONFIG_HZ_100=y
|
|
|
++CONFIG_CROSS_COMPILE="riscv64-unknown-linux-gnu-"
|
|
|
++CONFIG_DEFAULT_HOSTNAME="ucbvax"
|
|
|
++CONFIG_NAMESPACES=y
|
|
|
++CONFIG_EMBEDDED=y
|
|
|
++# CONFIG_BLK_DEV_BSG is not set
|
|
|
++CONFIG_PARTITION_ADVANCED=y
|
|
|
++# CONFIG_IOSCHED_DEADLINE is not set
|
|
|
++CONFIG_NET=y
|
|
|
++CONFIG_PACKET=y
|
|
|
++CONFIG_UNIX=y
|
|
|
++CONFIG_INET=y
|
|
|
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
|
|
|
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
|
|
|
++# CONFIG_INET_XFRM_MODE_BEET is not set
|
|
|
++# CONFIG_INET_LRO is not set
|
|
|
++# CONFIG_INET_DIAG is not set
|
|
|
++# CONFIG_IPV6 is not set
|
|
|
++# CONFIG_WIRELESS is not set
|
|
|
++CONFIG_DEVTMPFS=y
|
|
|
++CONFIG_DEVTMPFS_MOUNT=y
|
|
|
++# CONFIG_FIRMWARE_IN_KERNEL is not set
|
|
|
++# CONFIG_BLK_DEV is not set
|
|
|
++CONFIG_SCSI=y
|
|
|
++CONFIG_BLK_DEV_SD=y
|
|
|
++CONFIG_SCSI_VIRTIO=y
|
|
|
++CONFIG_NETDEVICES=y
|
|
|
++CONFIG_VIRTIO_NET=y
|
|
|
++# CONFIG_ETHERNET is not set
|
|
|
++# CONFIG_WLAN is not set
|
|
|
++# CONFIG_INPUT_MOUSEDEV is not set
|
|
|
++# CONFIG_INPUT_KEYBOARD is not set
|
|
|
++# CONFIG_INPUT_MOUSE is not set
|
|
|
++# CONFIG_VT is not set
|
|
|
++CONFIG_SERIAL_8250=y
|
|
|
++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
|
|
|
++CONFIG_SERIAL_8250_CONSOLE=y
|
|
|
++CONFIG_SERIAL_8250_NR_UARTS=1
|
|
|
++CONFIG_SERIAL_8250_RUNTIME_UARTS=1
|
|
|
++CONFIG_VIRTIO_CONSOLE=y
|
|
|
++# CONFIG_HW_RANDOM is not set
|
|
|
++# CONFIG_HWMON is not set
|
|
|
++CONFIG_FB=y
|
|
|
++# CONFIG_USB_SUPPORT is not set
|
|
|
++CONFIG_VIRTIO_MMIO=y
|
|
|
++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
|
|
|
++# CONFIG_IOMMU_SUPPORT is not set
|
|
|
++CONFIG_EXT4_FS=y
|
|
|
++# CONFIG_FILE_LOCKING is not set
|
|
|
++# CONFIG_DNOTIFY is not set
|
|
|
++# CONFIG_INOTIFY_USER is not set
|
|
|
++# CONFIG_PROC_PAGE_MONITOR is not set
|
|
|
++CONFIG_TMPFS=y
|
|
|
++# CONFIG_MISC_FILESYSTEMS is not set
|
|
|
++# CONFIG_NETWORK_FILESYSTEMS is not set
|
|
|
++CONFIG_CMDLINE_BOOL=y
|
|
|
++CONFIG_CMDLINE="virtio_mmio.device=0x200@0x400:1 virtio_mmio.device=0x200@0x600:2 virtio_mmio.device=0x200@0x800:3 lpj=100000"
|
|
|
++CONFIG_CMDLINE_OVERRIDE=y
|
|
|
++CONFIG_PRINTK_TIME=y
|
|
|
++CONFIG_DEBUG_SECTION_MISMATCH=y
|
|
|
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
|
|
++# CONFIG_CRYPTO_HW is not set
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/configs/riscv64_spike linux-4.6.2.riscv/arch/riscv/configs/riscv64_spike
|
|
|
+--- linux-4.6.2/arch/riscv/configs/riscv64_spike 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/configs/riscv64_spike 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -0,0 +1,929 @@
|
|
|
++#
|
|
|
++# Automatically generated file; DO NOT EDIT.
|
|
|
++# Linux/riscv 3.14.29 Kernel Configuration
|
|
|
++#
|
|
|
++CONFIG_RISCV=y
|
|
|
++CONFIG_MMU=y
|
|
|
++CONFIG_PCI=y
|
|
|
++CONFIG_STACKTRACE_SUPPORT=y
|
|
|
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
|
|
|
++CONFIG_GENERIC_BUG=y
|
|
|
++CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
|
|
|
++CONFIG_GENERIC_CALIBRATE_DELAY=y
|
|
|
++CONFIG_GENERIC_CSUM=y
|
|
|
++CONFIG_GENERIC_HWEIGHT=y
|
|
|
++
|
|
|
++#
|
|
|
++# Platform type
|
|
|
++#
|
|
|
++CONFIG_CPU_RV_ROCKET=y
|
|
|
++# CONFIG_CPU_RV_GENERIC is not set
|
|
|
++CONFIG_CPU_SUPPORTS_64BIT_KERNEL=y
|
|
|
++CONFIG_RV_ATOMIC=y
|
|
|
++# CONFIG_RV_SYSRISCV_ATOMIC is not set
|
|
|
++CONFIG_SBI_CONSOLE=y
|
|
|
++
|
|
|
++#
|
|
|
++# Kernel type
|
|
|
++#
|
|
|
++CONFIG_64BIT=y
|
|
|
++CONFIG_FLATMEM=y
|
|
|
++CONFIG_FLAT_NODE_MEM_MAP=y
|
|
|
++CONFIG_HAVE_MEMBLOCK=y
|
|
|
++CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
|
|
|
++# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
|
|
|
++CONFIG_PAGEFLAGS_EXTENDED=y
|
|
|
++CONFIG_SPLIT_PTLOCK_CPUS=4
|
|
|
++# CONFIG_COMPACTION is not set
|
|
|
++CONFIG_PHYS_ADDR_T_64BIT=y
|
|
|
++CONFIG_ZONE_DMA_FLAG=0
|
|
|
++# CONFIG_KSM is not set
|
|
|
++CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
|
|
|
++# CONFIG_CROSS_MEMORY_ATTACH is not set
|
|
|
++CONFIG_NEED_PER_CPU_KM=y
|
|
|
++# CONFIG_CLEANCACHE is not set
|
|
|
++# CONFIG_FRONTSWAP is not set
|
|
|
++# CONFIG_CMA is not set
|
|
|
++# CONFIG_ZBUD is not set
|
|
|
++# CONFIG_ZSMALLOC is not set
|
|
|
++CONFIG_PREEMPT_NONE=y
|
|
|
++# CONFIG_PREEMPT_VOLUNTARY is not set
|
|
|
++# CONFIG_PREEMPT is not set
|
|
|
++CONFIG_HZ_100=y
|
|
|
++# CONFIG_HZ_250 is not set
|
|
|
++# CONFIG_HZ_300 is not set
|
|
|
++# CONFIG_HZ_1000 is not set
|
|
|
++CONFIG_HZ=100
|
|
|
++# CONFIG_SCHED_HRTICK is not set
|
|
|
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
|
|
|
++CONFIG_IRQ_WORK=y
|
|
|
++
|
|
|
++#
|
|
|
++# General setup
|
|
|
++#
|
|
|
++CONFIG_BROKEN_ON_SMP=y
|
|
|
++CONFIG_INIT_ENV_ARG_LIMIT=32
|
|
|
++CONFIG_CROSS_COMPILE="riscv64-unknown-linux-gnu-"
|
|
|
++# CONFIG_COMPILE_TEST is not set
|
|
|
++CONFIG_LOCALVERSION=""
|
|
|
++CONFIG_LOCALVERSION_AUTO=y
|
|
|
++CONFIG_DEFAULT_HOSTNAME="ucbvax"
|
|
|
++CONFIG_SWAP=y
|
|
|
++# CONFIG_SYSVIPC is not set
|
|
|
++# CONFIG_POSIX_MQUEUE is not set
|
|
|
++# CONFIG_FHANDLE is not set
|
|
|
++# CONFIG_AUDIT is not set
|
|
|
++
|
|
|
++#
|
|
|
++# IRQ subsystem
|
|
|
++#
|
|
|
++CONFIG_GENERIC_IRQ_SHOW=y
|
|
|
++CONFIG_GENERIC_CLOCKEVENTS=y
|
|
|
++CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
|
|
|
++
|
|
|
++#
|
|
|
++# Timers subsystem
|
|
|
++#
|
|
|
++CONFIG_HZ_PERIODIC=y
|
|
|
++# CONFIG_NO_HZ_IDLE is not set
|
|
|
++# CONFIG_NO_HZ is not set
|
|
|
++# CONFIG_HIGH_RES_TIMERS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# CPU/Task time and stats accounting
|
|
|
++#
|
|
|
++CONFIG_TICK_CPU_ACCOUNTING=y
|
|
|
++# CONFIG_BSD_PROCESS_ACCT is not set
|
|
|
++# CONFIG_TASKSTATS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# RCU Subsystem
|
|
|
++#
|
|
|
++CONFIG_TINY_RCU=y
|
|
|
++# CONFIG_PREEMPT_RCU is not set
|
|
|
++# CONFIG_RCU_STALL_COMMON is not set
|
|
|
++# CONFIG_TREE_RCU_TRACE is not set
|
|
|
++# CONFIG_IKCONFIG is not set
|
|
|
++CONFIG_LOG_BUF_SHIFT=17
|
|
|
++# CONFIG_CGROUPS is not set
|
|
|
++# CONFIG_CHECKPOINT_RESTORE is not set
|
|
|
++CONFIG_NAMESPACES=y
|
|
|
++CONFIG_UTS_NS=y
|
|
|
++# CONFIG_USER_NS is not set
|
|
|
++CONFIG_PID_NS=y
|
|
|
++CONFIG_NET_NS=y
|
|
|
++# CONFIG_SCHED_AUTOGROUP is not set
|
|
|
++# CONFIG_RELAY is not set
|
|
|
++# CONFIG_BLK_DEV_INITRD is not set
|
|
|
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
|
|
++CONFIG_SYSCTL=y
|
|
|
++CONFIG_ANON_INODES=y
|
|
|
++CONFIG_SYSCTL_EXCEPTION_TRACE=y
|
|
|
++CONFIG_EXPERT=y
|
|
|
++# CONFIG_SYSCTL_SYSCALL is not set
|
|
|
++CONFIG_KALLSYMS=y
|
|
|
++# CONFIG_KALLSYMS_ALL is not set
|
|
|
++CONFIG_PRINTK=y
|
|
|
++CONFIG_BUG=y
|
|
|
++CONFIG_ELF_CORE=y
|
|
|
++CONFIG_BASE_FULL=y
|
|
|
++CONFIG_FUTEX=y
|
|
|
++CONFIG_EPOLL=y
|
|
|
++CONFIG_SIGNALFD=y
|
|
|
++CONFIG_TIMERFD=y
|
|
|
++CONFIG_EVENTFD=y
|
|
|
++CONFIG_SHMEM=y
|
|
|
++CONFIG_AIO=y
|
|
|
++CONFIG_EMBEDDED=y
|
|
|
++
|
|
|
++#
|
|
|
++# Kernel Performance Events And Counters
|
|
|
++#
|
|
|
++CONFIG_VM_EVENT_COUNTERS=y
|
|
|
++CONFIG_COMPAT_BRK=y
|
|
|
++# CONFIG_SLAB is not set
|
|
|
++CONFIG_SLUB=y
|
|
|
++# CONFIG_SLOB is not set
|
|
|
++# CONFIG_PROFILING is not set
|
|
|
++CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y
|
|
|
++# CONFIG_CC_STACKPROTECTOR is not set
|
|
|
++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
|
|
|
++CONFIG_CLONE_BACKWARDS=y
|
|
|
++
|
|
|
++#
|
|
|
++# GCOV-based kernel profiling
|
|
|
++#
|
|
|
++# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
|
|
|
++CONFIG_RT_MUTEXES=y
|
|
|
++CONFIG_BASE_SMALL=0
|
|
|
++# CONFIG_MODULES is not set
|
|
|
++CONFIG_BLOCK=y
|
|
|
++# CONFIG_BLK_DEV_BSG is not set
|
|
|
++# CONFIG_BLK_DEV_BSGLIB is not set
|
|
|
++# CONFIG_BLK_DEV_INTEGRITY is not set
|
|
|
++# CONFIG_BLK_CMDLINE_PARSER is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Partition Types
|
|
|
++#
|
|
|
++CONFIG_PARTITION_ADVANCED=y
|
|
|
++# CONFIG_ACORN_PARTITION is not set
|
|
|
++# CONFIG_AIX_PARTITION is not set
|
|
|
++# CONFIG_OSF_PARTITION is not set
|
|
|
++# CONFIG_AMIGA_PARTITION is not set
|
|
|
++# CONFIG_ATARI_PARTITION is not set
|
|
|
++# CONFIG_MAC_PARTITION is not set
|
|
|
++CONFIG_MSDOS_PARTITION=y
|
|
|
++# CONFIG_BSD_DISKLABEL is not set
|
|
|
++# CONFIG_MINIX_SUBPARTITION is not set
|
|
|
++# CONFIG_SOLARIS_X86_PARTITION is not set
|
|
|
++# CONFIG_UNIXWARE_DISKLABEL is not set
|
|
|
++# CONFIG_LDM_PARTITION is not set
|
|
|
++# CONFIG_SGI_PARTITION is not set
|
|
|
++# CONFIG_ULTRIX_PARTITION is not set
|
|
|
++# CONFIG_SUN_PARTITION is not set
|
|
|
++# CONFIG_KARMA_PARTITION is not set
|
|
|
++# CONFIG_EFI_PARTITION is not set
|
|
|
++# CONFIG_SYSV68_PARTITION is not set
|
|
|
++# CONFIG_CMDLINE_PARTITION is not set
|
|
|
++
|
|
|
++#
|
|
|
++# IO Schedulers
|
|
|
++#
|
|
|
++CONFIG_IOSCHED_NOOP=y
|
|
|
++# CONFIG_IOSCHED_DEADLINE is not set
|
|
|
++CONFIG_IOSCHED_CFQ=y
|
|
|
++CONFIG_DEFAULT_CFQ=y
|
|
|
++# CONFIG_DEFAULT_NOOP is not set
|
|
|
++CONFIG_DEFAULT_IOSCHED="cfq"
|
|
|
++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
|
|
|
++CONFIG_INLINE_READ_UNLOCK=y
|
|
|
++CONFIG_INLINE_READ_UNLOCK_IRQ=y
|
|
|
++CONFIG_INLINE_WRITE_UNLOCK=y
|
|
|
++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
|
|
|
++# CONFIG_FREEZER is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Executable file formats
|
|
|
++#
|
|
|
++CONFIG_BINFMT_ELF=y
|
|
|
++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
|
|
|
++CONFIG_BINFMT_SCRIPT=y
|
|
|
++# CONFIG_HAVE_AOUT is not set
|
|
|
++# CONFIG_BINFMT_MISC is not set
|
|
|
++CONFIG_COREDUMP=y
|
|
|
++
|
|
|
++#
|
|
|
++# Power management options
|
|
|
++#
|
|
|
++# CONFIG_PM_RUNTIME is not set
|
|
|
++CONFIG_NET=y
|
|
|
++
|
|
|
++#
|
|
|
++# Networking options
|
|
|
++#
|
|
|
++# CONFIG_PACKET is not set
|
|
|
++CONFIG_UNIX=y
|
|
|
++# CONFIG_UNIX_DIAG is not set
|
|
|
++# CONFIG_XFRM_USER is not set
|
|
|
++# CONFIG_NET_KEY is not set
|
|
|
++CONFIG_INET=y
|
|
|
++# CONFIG_IP_MULTICAST is not set
|
|
|
++# CONFIG_IP_ADVANCED_ROUTER is not set
|
|
|
++# CONFIG_IP_PNP is not set
|
|
|
++# CONFIG_NET_IPIP is not set
|
|
|
++# CONFIG_NET_IPGRE_DEMUX is not set
|
|
|
++# CONFIG_NET_IP_TUNNEL is not set
|
|
|
++# CONFIG_SYN_COOKIES is not set
|
|
|
++# CONFIG_INET_AH is not set
|
|
|
++# CONFIG_INET_ESP is not set
|
|
|
++# CONFIG_INET_IPCOMP is not set
|
|
|
++# CONFIG_INET_XFRM_TUNNEL is not set
|
|
|
++# CONFIG_INET_TUNNEL is not set
|
|
|
++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
|
|
|
++# CONFIG_INET_XFRM_MODE_TUNNEL is not set
|
|
|
++# CONFIG_INET_XFRM_MODE_BEET is not set
|
|
|
++# CONFIG_INET_LRO is not set
|
|
|
++# CONFIG_INET_DIAG is not set
|
|
|
++# CONFIG_TCP_CONG_ADVANCED is not set
|
|
|
++CONFIG_TCP_CONG_CUBIC=y
|
|
|
++CONFIG_DEFAULT_TCP_CONG="cubic"
|
|
|
++# CONFIG_TCP_MD5SIG is not set
|
|
|
++# CONFIG_IPV6 is not set
|
|
|
++# CONFIG_NETWORK_SECMARK is not set
|
|
|
++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
|
|
|
++# CONFIG_NETFILTER is not set
|
|
|
++# CONFIG_IP_DCCP is not set
|
|
|
++# CONFIG_IP_SCTP is not set
|
|
|
++# CONFIG_RDS is not set
|
|
|
++# CONFIG_TIPC is not set
|
|
|
++# CONFIG_ATM is not set
|
|
|
++# CONFIG_L2TP is not set
|
|
|
++# CONFIG_BRIDGE is not set
|
|
|
++# CONFIG_VLAN_8021Q is not set
|
|
|
++# CONFIG_DECNET is not set
|
|
|
++# CONFIG_LLC2 is not set
|
|
|
++# CONFIG_IPX is not set
|
|
|
++# CONFIG_ATALK is not set
|
|
|
++# CONFIG_X25 is not set
|
|
|
++# CONFIG_LAPB is not set
|
|
|
++# CONFIG_PHONET is not set
|
|
|
++# CONFIG_IEEE802154 is not set
|
|
|
++# CONFIG_NET_SCHED is not set
|
|
|
++# CONFIG_DCB is not set
|
|
|
++# CONFIG_BATMAN_ADV is not set
|
|
|
++# CONFIG_OPENVSWITCH is not set
|
|
|
++# CONFIG_VSOCKETS is not set
|
|
|
++# CONFIG_NETLINK_MMAP is not set
|
|
|
++# CONFIG_NETLINK_DIAG is not set
|
|
|
++# CONFIG_NET_MPLS_GSO is not set
|
|
|
++# CONFIG_HSR is not set
|
|
|
++CONFIG_NET_RX_BUSY_POLL=y
|
|
|
++
|
|
|
++#
|
|
|
++# Network testing
|
|
|
++#
|
|
|
++# CONFIG_NET_PKTGEN is not set
|
|
|
++# CONFIG_HAMRADIO is not set
|
|
|
++# CONFIG_CAN is not set
|
|
|
++# CONFIG_IRDA is not set
|
|
|
++# CONFIG_BT is not set
|
|
|
++# CONFIG_AF_RXRPC is not set
|
|
|
++# CONFIG_WIRELESS is not set
|
|
|
++# CONFIG_WIMAX is not set
|
|
|
++# CONFIG_RFKILL is not set
|
|
|
++# CONFIG_NET_9P is not set
|
|
|
++# CONFIG_CAIF is not set
|
|
|
++# CONFIG_CEPH_LIB is not set
|
|
|
++# CONFIG_NFC is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Device Drivers
|
|
|
++#
|
|
|
++
|
|
|
++#
|
|
|
++# Generic Driver Options
|
|
|
++#
|
|
|
++CONFIG_UEVENT_HELPER_PATH=""
|
|
|
++CONFIG_DEVTMPFS=y
|
|
|
++CONFIG_DEVTMPFS_MOUNT=y
|
|
|
++CONFIG_STANDALONE=y
|
|
|
++CONFIG_PREVENT_FIRMWARE_BUILD=y
|
|
|
++CONFIG_FW_LOADER=y
|
|
|
++# CONFIG_FIRMWARE_IN_KERNEL is not set
|
|
|
++CONFIG_EXTRA_FIRMWARE=""
|
|
|
++CONFIG_FW_LOADER_USER_HELPER=y
|
|
|
++# CONFIG_DEBUG_DRIVER is not set
|
|
|
++# CONFIG_DEBUG_DEVRES is not set
|
|
|
++# CONFIG_SYS_HYPERVISOR is not set
|
|
|
++CONFIG_GENERIC_CPU_DEVICES=y
|
|
|
++# CONFIG_DMA_SHARED_BUFFER is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Bus devices
|
|
|
++#
|
|
|
++# CONFIG_CONNECTOR is not set
|
|
|
++# CONFIG_MTD is not set
|
|
|
++# CONFIG_PARPORT is not set
|
|
|
++# CONFIG_BLK_DEV is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Misc devices
|
|
|
++#
|
|
|
++# CONFIG_SENSORS_LIS3LV02D is not set
|
|
|
++# CONFIG_DUMMY_IRQ is not set
|
|
|
++# CONFIG_ATMEL_SSC is not set
|
|
|
++# CONFIG_ENCLOSURE_SERVICES is not set
|
|
|
++# CONFIG_SRAM is not set
|
|
|
++# CONFIG_C2PORT is not set
|
|
|
++
|
|
|
++#
|
|
|
++# EEPROM support
|
|
|
++#
|
|
|
++# CONFIG_EEPROM_93CX6 is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Texas Instruments shared transport line discipline
|
|
|
++#
|
|
|
++
|
|
|
++#
|
|
|
++# Altera FPGA firmware download module
|
|
|
++#
|
|
|
++
|
|
|
++#
|
|
|
++# Intel MIC Host Driver
|
|
|
++#
|
|
|
++
|
|
|
++#
|
|
|
++# Intel MIC Card Driver
|
|
|
++#
|
|
|
++
|
|
|
++#
|
|
|
++# SCSI device support
|
|
|
++#
|
|
|
++CONFIG_SCSI_MOD=y
|
|
|
++# CONFIG_RAID_ATTRS is not set
|
|
|
++# CONFIG_SCSI is not set
|
|
|
++# CONFIG_SCSI_DMA is not set
|
|
|
++# CONFIG_SCSI_NETLINK is not set
|
|
|
++# CONFIG_ATA is not set
|
|
|
++# CONFIG_MD is not set
|
|
|
++# CONFIG_NETDEVICES is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Input device support
|
|
|
++#
|
|
|
++CONFIG_INPUT=y
|
|
|
++# CONFIG_INPUT_FF_MEMLESS is not set
|
|
|
++# CONFIG_INPUT_POLLDEV is not set
|
|
|
++# CONFIG_INPUT_SPARSEKMAP is not set
|
|
|
++# CONFIG_INPUT_MATRIXKMAP is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Userland interfaces
|
|
|
++#
|
|
|
++# CONFIG_INPUT_MOUSEDEV is not set
|
|
|
++# CONFIG_INPUT_JOYDEV is not set
|
|
|
++# CONFIG_INPUT_EVDEV is not set
|
|
|
++# CONFIG_INPUT_EVBUG is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Input Device Drivers
|
|
|
++#
|
|
|
++# CONFIG_INPUT_KEYBOARD is not set
|
|
|
++# CONFIG_INPUT_MOUSE is not set
|
|
|
++# CONFIG_INPUT_JOYSTICK is not set
|
|
|
++# CONFIG_INPUT_TABLET is not set
|
|
|
++# CONFIG_INPUT_TOUCHSCREEN is not set
|
|
|
++# CONFIG_INPUT_MISC is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Hardware I/O ports
|
|
|
++#
|
|
|
++CONFIG_SERIO=y
|
|
|
++CONFIG_SERIO_SERPORT=y
|
|
|
++# CONFIG_SERIO_LIBPS2 is not set
|
|
|
++# CONFIG_SERIO_RAW is not set
|
|
|
++# CONFIG_SERIO_ALTERA_PS2 is not set
|
|
|
++# CONFIG_SERIO_PS2MULT is not set
|
|
|
++# CONFIG_SERIO_ARC_PS2 is not set
|
|
|
++# CONFIG_GAMEPORT is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Character devices
|
|
|
++#
|
|
|
++CONFIG_TTY=y
|
|
|
++# CONFIG_VT is not set
|
|
|
++CONFIG_UNIX98_PTYS=y
|
|
|
++# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
|
|
|
++CONFIG_LEGACY_PTYS=y
|
|
|
++CONFIG_LEGACY_PTY_COUNT=256
|
|
|
++# CONFIG_SERIAL_NONSTANDARD is not set
|
|
|
++# CONFIG_N_GSM is not set
|
|
|
++# CONFIG_TRACE_SINK is not set
|
|
|
++CONFIG_DEVKMEM=y
|
|
|
++
|
|
|
++#
|
|
|
++# Serial drivers
|
|
|
++#
|
|
|
++# CONFIG_SERIAL_8250 is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Non-8250 serial port support
|
|
|
++#
|
|
|
++# CONFIG_SERIAL_SCCNXP is not set
|
|
|
++# CONFIG_SERIAL_TIMBERDALE is not set
|
|
|
++# CONFIG_SERIAL_ALTERA_JTAGUART is not set
|
|
|
++# CONFIG_SERIAL_ALTERA_UART is not set
|
|
|
++# CONFIG_SERIAL_ARC is not set
|
|
|
++# CONFIG_SERIAL_FSL_LPUART is not set
|
|
|
++# CONFIG_TTY_PRINTK is not set
|
|
|
++# CONFIG_IPMI_HANDLER is not set
|
|
|
++# CONFIG_HW_RANDOM is not set
|
|
|
++# CONFIG_RTC is not set
|
|
|
++# CONFIG_GEN_RTC is not set
|
|
|
++# CONFIG_R3964 is not set
|
|
|
++
|
|
|
++#
|
|
|
++# PCMCIA character devices
|
|
|
++#
|
|
|
++# CONFIG_RAW_DRIVER is not set
|
|
|
++# CONFIG_TCG_TPM is not set
|
|
|
++# CONFIG_I2C is not set
|
|
|
++# CONFIG_SPI is not set
|
|
|
++# CONFIG_HSI is not set
|
|
|
++
|
|
|
++#
|
|
|
++# PPS support
|
|
|
++#
|
|
|
++# CONFIG_PPS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# PPS generators support
|
|
|
++#
|
|
|
++
|
|
|
++#
|
|
|
++# PTP clock support
|
|
|
++#
|
|
|
++# CONFIG_PTP_1588_CLOCK is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
|
|
|
++#
|
|
|
++# CONFIG_W1 is not set
|
|
|
++# CONFIG_POWER_SUPPLY is not set
|
|
|
++# CONFIG_POWER_AVS is not set
|
|
|
++# CONFIG_HWMON is not set
|
|
|
++# CONFIG_THERMAL is not set
|
|
|
++# CONFIG_WATCHDOG is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Multifunction device drivers
|
|
|
++#
|
|
|
++# CONFIG_MFD_CORE is not set
|
|
|
++# CONFIG_MFD_CROS_EC is not set
|
|
|
++# CONFIG_HTC_PASIC3 is not set
|
|
|
++# CONFIG_MFD_KEMPLD is not set
|
|
|
++# CONFIG_MFD_SM501 is not set
|
|
|
++# CONFIG_ABX500_CORE is not set
|
|
|
++# CONFIG_MFD_SYSCON is not set
|
|
|
++# CONFIG_MFD_TI_AM335X_TSCADC is not set
|
|
|
++# CONFIG_MFD_TMIO is not set
|
|
|
++# CONFIG_REGULATOR is not set
|
|
|
++# CONFIG_MEDIA_SUPPORT is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Graphics support
|
|
|
++#
|
|
|
++# CONFIG_VGASTATE is not set
|
|
|
++# CONFIG_VIDEO_OUTPUT_CONTROL is not set
|
|
|
++CONFIG_FB=y
|
|
|
++# CONFIG_FIRMWARE_EDID is not set
|
|
|
++# CONFIG_FB_DDC is not set
|
|
|
++# CONFIG_FB_BOOT_VESA_SUPPORT is not set
|
|
|
++# CONFIG_FB_CFB_FILLRECT is not set
|
|
|
++# CONFIG_FB_CFB_COPYAREA is not set
|
|
|
++# CONFIG_FB_CFB_IMAGEBLIT is not set
|
|
|
++# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
|
|
|
++# CONFIG_FB_SYS_FILLRECT is not set
|
|
|
++# CONFIG_FB_SYS_COPYAREA is not set
|
|
|
++# CONFIG_FB_SYS_IMAGEBLIT is not set
|
|
|
++# CONFIG_FB_FOREIGN_ENDIAN is not set
|
|
|
++# CONFIG_FB_SYS_FOPS is not set
|
|
|
++# CONFIG_FB_SVGALIB is not set
|
|
|
++# CONFIG_FB_MACMODES is not set
|
|
|
++# CONFIG_FB_BACKLIGHT is not set
|
|
|
++# CONFIG_FB_MODE_HELPERS is not set
|
|
|
++# CONFIG_FB_TILEBLITTING is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Frame buffer hardware drivers
|
|
|
++#
|
|
|
++# CONFIG_FB_OPENCORES is not set
|
|
|
++# CONFIG_FB_S1D13XXX is not set
|
|
|
++# CONFIG_FB_VIRTUAL is not set
|
|
|
++# CONFIG_FB_METRONOME is not set
|
|
|
++# CONFIG_FB_BROADSHEET is not set
|
|
|
++# CONFIG_FB_AUO_K190X is not set
|
|
|
++# CONFIG_FB_SIMPLE is not set
|
|
|
++# CONFIG_EXYNOS_VIDEO is not set
|
|
|
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
|
|
|
++# CONFIG_LOGO is not set
|
|
|
++# CONFIG_SOUND is not set
|
|
|
++
|
|
|
++#
|
|
|
++# HID support
|
|
|
++#
|
|
|
++CONFIG_HID=y
|
|
|
++# CONFIG_HIDRAW is not set
|
|
|
++# CONFIG_UHID is not set
|
|
|
++CONFIG_HID_GENERIC=y
|
|
|
++
|
|
|
++#
|
|
|
++# Special HID drivers
|
|
|
++#
|
|
|
++# CONFIG_HID_A4TECH is not set
|
|
|
++# CONFIG_HID_ACRUX is not set
|
|
|
++# CONFIG_HID_APPLE is not set
|
|
|
++# CONFIG_HID_AUREAL is not set
|
|
|
++# CONFIG_HID_BELKIN is not set
|
|
|
++# CONFIG_HID_CHERRY is not set
|
|
|
++# CONFIG_HID_CHICONY is not set
|
|
|
++# CONFIG_HID_CYPRESS is not set
|
|
|
++# CONFIG_HID_DRAGONRISE is not set
|
|
|
++# CONFIG_HID_EMS_FF is not set
|
|
|
++# CONFIG_HID_ELECOM is not set
|
|
|
++# CONFIG_HID_EZKEY is not set
|
|
|
++# CONFIG_HID_KEYTOUCH is not set
|
|
|
++# CONFIG_HID_KYE is not set
|
|
|
++# CONFIG_HID_UCLOGIC is not set
|
|
|
++# CONFIG_HID_WALTOP is not set
|
|
|
++# CONFIG_HID_GYRATION is not set
|
|
|
++# CONFIG_HID_ICADE is not set
|
|
|
++# CONFIG_HID_TWINHAN is not set
|
|
|
++# CONFIG_HID_KENSINGTON is not set
|
|
|
++# CONFIG_HID_LCPOWER is not set
|
|
|
++# CONFIG_HID_LENOVO_TPKBD is not set
|
|
|
++# CONFIG_HID_LOGITECH is not set
|
|
|
++# CONFIG_HID_MAGICMOUSE is not set
|
|
|
++# CONFIG_HID_MICROSOFT is not set
|
|
|
++# CONFIG_HID_MONTEREY is not set
|
|
|
++# CONFIG_HID_MULTITOUCH is not set
|
|
|
++# CONFIG_HID_ORTEK is not set
|
|
|
++# CONFIG_HID_PANTHERLORD is not set
|
|
|
++# CONFIG_HID_PETALYNX is not set
|
|
|
++# CONFIG_HID_PICOLCD is not set
|
|
|
++# CONFIG_HID_PRIMAX is not set
|
|
|
++# CONFIG_HID_SAITEK is not set
|
|
|
++# CONFIG_HID_SAMSUNG is not set
|
|
|
++# CONFIG_HID_SPEEDLINK is not set
|
|
|
++# CONFIG_HID_STEELSERIES is not set
|
|
|
++# CONFIG_HID_SUNPLUS is not set
|
|
|
++# CONFIG_HID_GREENASIA is not set
|
|
|
++# CONFIG_HID_SMARTJOYPLUS is not set
|
|
|
++# CONFIG_HID_TIVO is not set
|
|
|
++# CONFIG_HID_TOPSEED is not set
|
|
|
++# CONFIG_HID_THRUSTMASTER is not set
|
|
|
++# CONFIG_HID_XINMO is not set
|
|
|
++# CONFIG_HID_ZEROPLUS is not set
|
|
|
++# CONFIG_HID_ZYDACRON is not set
|
|
|
++# CONFIG_HID_SENSOR_HUB is not set
|
|
|
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
|
|
|
++# CONFIG_USB_SUPPORT is not set
|
|
|
++# CONFIG_MMC is not set
|
|
|
++# CONFIG_MEMSTICK is not set
|
|
|
++# CONFIG_NEW_LEDS is not set
|
|
|
++# CONFIG_ACCESSIBILITY is not set
|
|
|
++# CONFIG_RTC_CLASS is not set
|
|
|
++# CONFIG_AUXDISPLAY is not set
|
|
|
++# CONFIG_UIO is not set
|
|
|
++# CONFIG_VIRT_DRIVERS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Virtio drivers
|
|
|
++#
|
|
|
++# CONFIG_VIRTIO_MMIO is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Microsoft Hyper-V guest support
|
|
|
++#
|
|
|
++# CONFIG_STAGING is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Hardware Spinlock drivers
|
|
|
++#
|
|
|
++# CONFIG_MAILBOX is not set
|
|
|
++# CONFIG_IOMMU_SUPPORT is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Remoteproc drivers
|
|
|
++#
|
|
|
++
|
|
|
++#
|
|
|
++# Rpmsg drivers
|
|
|
++#
|
|
|
++# CONFIG_PM_DEVFREQ is not set
|
|
|
++# CONFIG_EXTCON is not set
|
|
|
++# CONFIG_MEMORY is not set
|
|
|
++# CONFIG_IIO is not set
|
|
|
++# CONFIG_PWM is not set
|
|
|
++# CONFIG_IPACK_BUS is not set
|
|
|
++# CONFIG_RESET_CONTROLLER is not set
|
|
|
++# CONFIG_FMC is not set
|
|
|
++
|
|
|
++#
|
|
|
++# PHY Subsystem
|
|
|
++#
|
|
|
++# CONFIG_GENERIC_PHY is not set
|
|
|
++# CONFIG_PHY_EXYNOS_MIPI_VIDEO is not set
|
|
|
++# CONFIG_POWERCAP is not set
|
|
|
++
|
|
|
++#
|
|
|
++# File systems
|
|
|
++#
|
|
|
++CONFIG_EXT2_FS=y
|
|
|
++# CONFIG_EXT2_FS_XATTR is not set
|
|
|
++# CONFIG_EXT2_FS_XIP is not set
|
|
|
++# CONFIG_EXT3_FS is not set
|
|
|
++# CONFIG_EXT4_FS is not set
|
|
|
++# CONFIG_REISERFS_FS is not set
|
|
|
++# CONFIG_JFS_FS is not set
|
|
|
++# CONFIG_XFS_FS is not set
|
|
|
++# CONFIG_GFS2_FS is not set
|
|
|
++# CONFIG_BTRFS_FS is not set
|
|
|
++# CONFIG_NILFS2_FS is not set
|
|
|
++# CONFIG_FS_POSIX_ACL is not set
|
|
|
++# CONFIG_FILE_LOCKING is not set
|
|
|
++# CONFIG_FSNOTIFY is not set
|
|
|
++# CONFIG_DNOTIFY is not set
|
|
|
++# CONFIG_INOTIFY_USER is not set
|
|
|
++# CONFIG_FANOTIFY is not set
|
|
|
++# CONFIG_QUOTA is not set
|
|
|
++# CONFIG_QUOTACTL is not set
|
|
|
++# CONFIG_AUTOFS4_FS is not set
|
|
|
++# CONFIG_FUSE_FS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Caches
|
|
|
++#
|
|
|
++# CONFIG_FSCACHE is not set
|
|
|
++
|
|
|
++#
|
|
|
++# CD-ROM/DVD Filesystems
|
|
|
++#
|
|
|
++# CONFIG_ISO9660_FS is not set
|
|
|
++# CONFIG_UDF_FS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# DOS/FAT/NT Filesystems
|
|
|
++#
|
|
|
++# CONFIG_MSDOS_FS is not set
|
|
|
++# CONFIG_VFAT_FS is not set
|
|
|
++# CONFIG_NTFS_FS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Pseudo filesystems
|
|
|
++#
|
|
|
++CONFIG_PROC_FS=y
|
|
|
++# CONFIG_PROC_KCORE is not set
|
|
|
++CONFIG_PROC_SYSCTL=y
|
|
|
++# CONFIG_PROC_PAGE_MONITOR is not set
|
|
|
++# CONFIG_SYSFS is not set
|
|
|
++CONFIG_TMPFS=y
|
|
|
++# CONFIG_TMPFS_POSIX_ACL is not set
|
|
|
++# CONFIG_TMPFS_XATTR is not set
|
|
|
++# CONFIG_HUGETLB_PAGE is not set
|
|
|
++# CONFIG_CONFIGFS_FS is not set
|
|
|
++# CONFIG_MISC_FILESYSTEMS is not set
|
|
|
++# CONFIG_NETWORK_FILESYSTEMS is not set
|
|
|
++# CONFIG_NLS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Kernel hacking
|
|
|
++#
|
|
|
++# CONFIG_CMDLINE_BOOL is not set
|
|
|
++# CONFIG_EARLY_PRINTK is not set
|
|
|
++
|
|
|
++#
|
|
|
++# printk and dmesg options
|
|
|
++#
|
|
|
++CONFIG_PRINTK_TIME=y
|
|
|
++CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
|
|
|
++# CONFIG_BOOT_PRINTK_DELAY is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Compile-time checks and compiler options
|
|
|
++#
|
|
|
++# CONFIG_DEBUG_INFO is not set
|
|
|
++CONFIG_ENABLE_WARN_DEPRECATED=y
|
|
|
++CONFIG_ENABLE_MUST_CHECK=y
|
|
|
++CONFIG_FRAME_WARN=2048
|
|
|
++# CONFIG_STRIP_ASM_SYMS is not set
|
|
|
++# CONFIG_READABLE_ASM is not set
|
|
|
++# CONFIG_UNUSED_SYMBOLS is not set
|
|
|
++# CONFIG_DEBUG_FS is not set
|
|
|
++# CONFIG_HEADERS_CHECK is not set
|
|
|
++CONFIG_DEBUG_SECTION_MISMATCH=y
|
|
|
++CONFIG_ARCH_WANT_FRAME_POINTERS=y
|
|
|
++# CONFIG_FRAME_POINTER is not set
|
|
|
++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
|
|
|
++# CONFIG_MAGIC_SYSRQ is not set
|
|
|
++CONFIG_DEBUG_KERNEL=y
|
|
|
++
|
|
|
++#
|
|
|
++# Memory Debugging
|
|
|
++#
|
|
|
++# CONFIG_DEBUG_PAGEALLOC is not set
|
|
|
++# CONFIG_DEBUG_OBJECTS is not set
|
|
|
++# CONFIG_DEBUG_STACK_USAGE is not set
|
|
|
++# CONFIG_DEBUG_VM is not set
|
|
|
++# CONFIG_DEBUG_MEMORY_INIT is not set
|
|
|
++# CONFIG_DEBUG_SHIRQ is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Debug Lockups and Hangs
|
|
|
++#
|
|
|
++# CONFIG_LOCKUP_DETECTOR is not set
|
|
|
++# CONFIG_DETECT_HUNG_TASK is not set
|
|
|
++# CONFIG_PANIC_ON_OOPS is not set
|
|
|
++CONFIG_PANIC_ON_OOPS_VALUE=0
|
|
|
++CONFIG_PANIC_TIMEOUT=0
|
|
|
++CONFIG_SCHED_DEBUG=y
|
|
|
++# CONFIG_SCHEDSTATS is not set
|
|
|
++# CONFIG_TIMER_STATS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Lock Debugging (spinlocks, mutexes, etc...)
|
|
|
++#
|
|
|
++# CONFIG_DEBUG_RT_MUTEXES is not set
|
|
|
++# CONFIG_RT_MUTEX_TESTER is not set
|
|
|
++# CONFIG_DEBUG_SPINLOCK is not set
|
|
|
++# CONFIG_DEBUG_MUTEXES is not set
|
|
|
++# CONFIG_DEBUG_ATOMIC_SLEEP is not set
|
|
|
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
|
|
|
++# CONFIG_DEBUG_KOBJECT is not set
|
|
|
++CONFIG_DEBUG_BUGVERBOSE=y
|
|
|
++# CONFIG_DEBUG_WRITECOUNT is not set
|
|
|
++# CONFIG_DEBUG_LIST is not set
|
|
|
++# CONFIG_DEBUG_SG is not set
|
|
|
++# CONFIG_DEBUG_NOTIFIERS is not set
|
|
|
++# CONFIG_DEBUG_CREDENTIALS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# RCU Debugging
|
|
|
++#
|
|
|
++# CONFIG_SPARSE_RCU_POINTER is not set
|
|
|
++# CONFIG_RCU_TORTURE_TEST is not set
|
|
|
++# CONFIG_RCU_TRACE is not set
|
|
|
++# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
|
|
|
++# CONFIG_NOTIFIER_ERROR_INJECTION is not set
|
|
|
++# CONFIG_FAULT_INJECTION is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Runtime Testing
|
|
|
++#
|
|
|
++# CONFIG_TEST_LIST_SORT is not set
|
|
|
++# CONFIG_BACKTRACE_SELF_TEST is not set
|
|
|
++# CONFIG_RBTREE_TEST is not set
|
|
|
++# CONFIG_ATOMIC64_SELFTEST is not set
|
|
|
++# CONFIG_TEST_STRING_HELPERS is not set
|
|
|
++# CONFIG_TEST_KSTRTOX is not set
|
|
|
++# CONFIG_SAMPLES is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Security options
|
|
|
++#
|
|
|
++# CONFIG_KEYS is not set
|
|
|
++# CONFIG_SECURITY_DMESG_RESTRICT is not set
|
|
|
++# CONFIG_SECURITYFS is not set
|
|
|
++CONFIG_DEFAULT_SECURITY_DAC=y
|
|
|
++CONFIG_DEFAULT_SECURITY=""
|
|
|
++CONFIG_CRYPTO=y
|
|
|
++
|
|
|
++#
|
|
|
++# Crypto core or helper
|
|
|
++#
|
|
|
++CONFIG_CRYPTO_ALGAPI=y
|
|
|
++CONFIG_CRYPTO_ALGAPI2=y
|
|
|
++# CONFIG_CRYPTO_MANAGER is not set
|
|
|
++# CONFIG_CRYPTO_MANAGER2 is not set
|
|
|
++# CONFIG_CRYPTO_USER is not set
|
|
|
++# CONFIG_CRYPTO_GF128MUL is not set
|
|
|
++# CONFIG_CRYPTO_NULL is not set
|
|
|
++# CONFIG_CRYPTO_CRYPTD is not set
|
|
|
++# CONFIG_CRYPTO_AUTHENC is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Authenticated Encryption with Associated Data
|
|
|
++#
|
|
|
++# CONFIG_CRYPTO_CCM is not set
|
|
|
++# CONFIG_CRYPTO_GCM is not set
|
|
|
++# CONFIG_CRYPTO_SEQIV is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Block modes
|
|
|
++#
|
|
|
++# CONFIG_CRYPTO_CBC is not set
|
|
|
++# CONFIG_CRYPTO_CTR is not set
|
|
|
++# CONFIG_CRYPTO_CTS is not set
|
|
|
++# CONFIG_CRYPTO_ECB is not set
|
|
|
++# CONFIG_CRYPTO_LRW is not set
|
|
|
++# CONFIG_CRYPTO_PCBC is not set
|
|
|
++# CONFIG_CRYPTO_XTS is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Hash modes
|
|
|
++#
|
|
|
++# CONFIG_CRYPTO_CMAC is not set
|
|
|
++# CONFIG_CRYPTO_HMAC is not set
|
|
|
++# CONFIG_CRYPTO_XCBC is not set
|
|
|
++# CONFIG_CRYPTO_VMAC is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Digest
|
|
|
++#
|
|
|
++# CONFIG_CRYPTO_CRC32C is not set
|
|
|
++# CONFIG_CRYPTO_CRC32 is not set
|
|
|
++# CONFIG_CRYPTO_CRCT10DIF is not set
|
|
|
++# CONFIG_CRYPTO_GHASH is not set
|
|
|
++# CONFIG_CRYPTO_MD4 is not set
|
|
|
++# CONFIG_CRYPTO_MD5 is not set
|
|
|
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
|
|
|
++# CONFIG_CRYPTO_RMD128 is not set
|
|
|
++# CONFIG_CRYPTO_RMD160 is not set
|
|
|
++# CONFIG_CRYPTO_RMD256 is not set
|
|
|
++# CONFIG_CRYPTO_RMD320 is not set
|
|
|
++# CONFIG_CRYPTO_SHA1 is not set
|
|
|
++# CONFIG_CRYPTO_SHA256 is not set
|
|
|
++# CONFIG_CRYPTO_SHA512 is not set
|
|
|
++# CONFIG_CRYPTO_TGR192 is not set
|
|
|
++# CONFIG_CRYPTO_WP512 is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Ciphers
|
|
|
++#
|
|
|
++CONFIG_CRYPTO_AES=y
|
|
|
++# CONFIG_CRYPTO_ANUBIS is not set
|
|
|
++# CONFIG_CRYPTO_ARC4 is not set
|
|
|
++# CONFIG_CRYPTO_BLOWFISH is not set
|
|
|
++# CONFIG_CRYPTO_CAMELLIA is not set
|
|
|
++# CONFIG_CRYPTO_CAST5 is not set
|
|
|
++# CONFIG_CRYPTO_CAST6 is not set
|
|
|
++# CONFIG_CRYPTO_DES is not set
|
|
|
++# CONFIG_CRYPTO_FCRYPT is not set
|
|
|
++# CONFIG_CRYPTO_KHAZAD is not set
|
|
|
++# CONFIG_CRYPTO_SALSA20 is not set
|
|
|
++# CONFIG_CRYPTO_SEED is not set
|
|
|
++# CONFIG_CRYPTO_SERPENT is not set
|
|
|
++# CONFIG_CRYPTO_TEA is not set
|
|
|
++# CONFIG_CRYPTO_TWOFISH is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Compression
|
|
|
++#
|
|
|
++# CONFIG_CRYPTO_DEFLATE is not set
|
|
|
++# CONFIG_CRYPTO_ZLIB is not set
|
|
|
++# CONFIG_CRYPTO_LZO is not set
|
|
|
++# CONFIG_CRYPTO_LZ4 is not set
|
|
|
++# CONFIG_CRYPTO_LZ4HC is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Random Number Generation
|
|
|
++#
|
|
|
++# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
|
|
++# CONFIG_CRYPTO_USER_API_HASH is not set
|
|
|
++# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
|
|
|
++# CONFIG_CRYPTO_HW is not set
|
|
|
++# CONFIG_BINARY_PRINTF is not set
|
|
|
++
|
|
|
++#
|
|
|
++# Library routines
|
|
|
++#
|
|
|
++CONFIG_BITREVERSE=y
|
|
|
++CONFIG_GENERIC_STRNCPY_FROM_USER=y
|
|
|
++CONFIG_GENERIC_STRNLEN_USER=y
|
|
|
++CONFIG_GENERIC_NET_UTILS=y
|
|
|
++CONFIG_GENERIC_IO=y
|
|
|
++# CONFIG_CRC_CCITT is not set
|
|
|
++# CONFIG_CRC16 is not set
|
|
|
++# CONFIG_CRC_T10DIF is not set
|
|
|
++# CONFIG_CRC_ITU_T is not set
|
|
|
++CONFIG_CRC32=y
|
|
|
++# CONFIG_CRC32_SELFTEST is not set
|
|
|
++CONFIG_CRC32_SLICEBY8=y
|
|
|
++# CONFIG_CRC32_SLICEBY4 is not set
|
|
|
++# CONFIG_CRC32_SARWATE is not set
|
|
|
++# CONFIG_CRC32_BIT is not set
|
|
|
++# CONFIG_CRC7 is not set
|
|
|
++# CONFIG_LIBCRC32C is not set
|
|
|
++# CONFIG_CRC8 is not set
|
|
|
++# CONFIG_RANDOM32_SELFTEST is not set
|
|
|
++# CONFIG_XZ_DEC is not set
|
|
|
++# CONFIG_XZ_DEC_BCJ is not set
|
|
|
++CONFIG_HAS_IOMEM=y
|
|
|
++CONFIG_HAS_IOPORT=y
|
|
|
++CONFIG_NLATTR=y
|
|
|
++CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
|
|
|
++# CONFIG_AVERAGE is not set
|
|
|
++# CONFIG_CORDIC is not set
|
|
|
++# CONFIG_DDR is not set
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/.gitignore linux-4.6.2.riscv/arch/riscv/.gitignore
|
|
|
+--- linux-4.6.2/arch/riscv/.gitignore 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/.gitignore 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -0,0 +1,35 @@
|
|
|
++# Now un-ignore all files.
|
|
|
++!*
|
|
|
++
|
|
|
++# But then re-ignore the files listed in the Linux .gitignore
|
|
|
++# Normal rules
|
|
|
++#
|
|
|
++.*
|
|
|
++*.o
|
|
|
++*.o.*
|
|
|
++*.a
|
|
|
++*.s
|
|
|
++*.ko
|
|
|
++*.so
|
|
|
++*.so.dbg
|
|
|
++*.mod.c
|
|
|
++*.i
|
|
|
++*.lst
|
|
|
++*.symtypes
|
|
|
++*.order
|
|
|
++modules.builtin
|
|
|
++*.elf
|
|
|
++*.bin
|
|
|
++*.gz
|
|
|
++*.bz2
|
|
|
++*.lzma
|
|
|
++*.xz
|
|
|
++*.lzo
|
|
|
++*.patch
|
|
|
++*.gcno
|
|
|
++
|
|
|
++include/generated
|
|
|
++kernel/vmlinux.lds
|
|
|
++
|
|
|
++# Then reinclude .gitignore.
|
|
|
++!.gitignore
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/asm.h linux-4.6.2.riscv/arch/riscv/include/asm/asm.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/asm.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/asm.h 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -0,0 +1,51 @@
|
|
|
++#ifndef _ASM_RISCV_ASM_H
|
|
|
++#define _ASM_RISCV_ASM_H
|
|
|
++
|
|
|
++#ifdef __ASSEMBLY__
|
|
|
++#define __ASM_STR(x) x
|
|
|
++#else
|
|
|
++#define __ASM_STR(x) #x
|
|
|
++#endif
|
|
|
++
|
|
|
++#if __riscv_xlen == 64
|
|
|
++#define __REG_SEL(a,b) __ASM_STR(a)
|
|
|
++#elif __riscv_xlen == 32
|
|
|
++#define __REG_SEL(a,b) __ASM_STR(b)
|
|
|
++#else
|
|
|
++#error "Unexpected __riscv_xlen"
|
|
|
++#endif
|
|
|
++
|
|
|
++#define REG_L __REG_SEL(ld, lw)
|
|
|
++#define REG_S __REG_SEL(sd, sw)
|
|
|
++#define SZREG __REG_SEL(8, 4)
|
|
|
++#define LGREG __REG_SEL(3, 2)
|
|
|
++
|
|
|
++#if __SIZEOF_POINTER__ == 8
|
|
|
++#define __PTR_SEL(a,b) __ASM_STR(a)
|
|
|
++#elif __SIZEOF_POINTER__ == 4
|
|
|
++#define __PTR_SEL(a,b) __ASM_STR(b)
|
|
|
++#else
|
|
|
++#error "Unexpected __SIZEOF_POINTER__"
|
|
|
++#endif
|
|
|
++
|
|
|
++#define PTR __PTR_SEL(.dword, .word)
|
|
|
++#define SZPTR __PTR_SEL(8, 4)
|
|
|
++#define LGPTR __PTR_SEL(3, 2)
|
|
|
++
|
|
|
++#if (__SIZEOF_INT__ == 4)
|
|
|
++#define INT __ASM_STR(.word)
|
|
|
++#define SZINT __ASM_STR(4)
|
|
|
++#define LGINT __ASM_STR(2)
|
|
|
++#else
|
|
|
++#error "Unexpected __SIZEOF_INT__"
|
|
|
++#endif
|
|
|
++
|
|
|
++#if (__SIZEOF_SHORT__ == 2)
|
|
|
++#define SHORT __ASM_STR(.half)
|
|
|
++#define SZSHORT __ASM_STR(2)
|
|
|
++#define LGSHORT __ASM_STR(1)
|
|
|
++#else
|
|
|
++#error "Unexpected __SIZEOF_SHORT__"
|
|
|
++#endif
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_ASM_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/asm-offsets.h linux-4.6.2.riscv/arch/riscv/include/asm/asm-offsets.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/asm-offsets.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/asm-offsets.h 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -0,0 +1 @@
|
|
|
++#include <generated/asm-offsets.h>
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/atomic64.h linux-4.6.2.riscv/arch/riscv/include/asm/atomic64.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/atomic64.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/atomic64.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,287 @@
|
|
|
++#ifndef _ASM_RISCV_ATOMIC64_H
|
|
|
++#define _ASM_RISCV_ATOMIC64_H
|
|
|
++
|
|
|
++#ifdef CONFIG_GENERIC_ATOMIC64
|
|
|
++#include <asm-generic/atomic64.h>
|
|
|
++#else /* !CONFIG_GENERIC_ATOMIC64 */
|
|
|
++
|
|
|
++#include <linux/types.h>
|
|
|
++
|
|
|
++#define ATOMIC64_INIT(i) { (i) }
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_read - read atomic64 variable
|
|
|
++ * @v: pointer of type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically reads the value of @v.
|
|
|
++ */
|
|
|
++static inline s64 atomic64_read(const atomic64_t *v)
|
|
|
++{
|
|
|
++ return *((volatile long *)(&(v->counter)));
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_set - set atomic64 variable
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ * @i: required value
|
|
|
++ *
|
|
|
++ * Atomically sets the value of @v to @i.
|
|
|
++ */
|
|
|
++static inline void atomic64_set(atomic64_t *v, s64 i)
|
|
|
++{
|
|
|
++ v->counter = i;
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_add - add integer to atomic64 variable
|
|
|
++ * @i: integer value to add
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically adds @i to @v.
|
|
|
++ */
|
|
|
++static inline void atomic64_add(s64 a, atomic64_t *v)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoadd.d zero, %1, %0"
|
|
|
++ : "+A" (v->counter)
|
|
|
++ : "r" (a));
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_sub - subtract the atomic64 variable
|
|
|
++ * @i: integer value to subtract
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically subtracts @i from @v.
|
|
|
++ */
|
|
|
++static inline void atomic64_sub(s64 a, atomic64_t *v)
|
|
|
++{
|
|
|
++ atomic64_add(-a, v);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_add_return - add and return
|
|
|
++ * @i: integer value to add
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically adds @i to @v and returns @i + @v
|
|
|
++ */
|
|
|
++static inline s64 atomic64_add_return(s64 a, atomic64_t *v)
|
|
|
++{
|
|
|
++ register s64 c;
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoadd.d %0, %2, %1"
|
|
|
++ : "=r" (c), "+A" (v->counter)
|
|
|
++ : "r" (a));
|
|
|
++ return (c + a);
|
|
|
++}
|
|
|
++
|
|
|
++static inline s64 atomic64_sub_return(s64 a, atomic64_t *v)
|
|
|
++{
|
|
|
++ return atomic64_add_return(-a, v);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_inc - increment atomic64 variable
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically increments @v by 1.
|
|
|
++ */
|
|
|
++static inline void atomic64_inc(atomic64_t *v)
|
|
|
++{
|
|
|
++ atomic64_add(1L, v);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_dec - decrement atomic64 variable
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically decrements @v by 1.
|
|
|
++ */
|
|
|
++static inline void atomic64_dec(atomic64_t *v)
|
|
|
++{
|
|
|
++ atomic64_add(-1L, v);
|
|
|
++}
|
|
|
++
|
|
|
++static inline s64 atomic64_inc_return(atomic64_t *v)
|
|
|
++{
|
|
|
++ return atomic64_add_return(1L, v);
|
|
|
++}
|
|
|
++
|
|
|
++static inline s64 atomic64_dec_return(atomic64_t *v)
|
|
|
++{
|
|
|
++ return atomic64_add_return(-1L, v);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_inc_and_test - increment and test
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically increments @v by 1
|
|
|
++ * and returns true if the result is zero, or false for all
|
|
|
++ * other cases.
|
|
|
++ */
|
|
|
++static inline int atomic64_inc_and_test(atomic64_t *v)
|
|
|
++{
|
|
|
++ return (atomic64_inc_return(v) == 0);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_dec_and_test - decrement and test
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically decrements @v by 1 and
|
|
|
++ * returns true if the result is 0, or false for all other
|
|
|
++ * cases.
|
|
|
++ */
|
|
|
++static inline int atomic64_dec_and_test(atomic64_t *v)
|
|
|
++{
|
|
|
++ return (atomic64_dec_return(v) == 0);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_sub_and_test - subtract value from variable and test result
|
|
|
++ * @a: integer value to subtract
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically subtracts @a from @v and returns
|
|
|
++ * true if the result is zero, or false for all
|
|
|
++ * other cases.
|
|
|
++ */
|
|
|
++static inline int atomic64_sub_and_test(s64 a, atomic64_t *v)
|
|
|
++{
|
|
|
++ return (atomic64_sub_return(a, v) == 0);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_add_negative - add and test if negative
|
|
|
++ * @a: integer value to add
|
|
|
++ * @v: pointer to type atomic64_t
|
|
|
++ *
|
|
|
++ * Atomically adds @a to @v and returns true
|
|
|
++ * if the result is negative, or false when
|
|
|
++ * result is greater than or equal to zero.
|
|
|
++ */
|
|
|
++static inline int atomic64_add_negative(s64 a, atomic64_t *v)
|
|
|
++{
|
|
|
++ return (atomic64_add_return(a, v) < 0);
|
|
|
++}
|
|
|
++
|
|
|
++
|
|
|
++static inline s64 atomic64_xchg(atomic64_t *v, s64 n)
|
|
|
++{
|
|
|
++ register s64 c;
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoswap.d %0, %2, %1"
|
|
|
++ : "=r" (c), "+A" (v->counter)
|
|
|
++ : "r" (n));
|
|
|
++ return c;
|
|
|
++}
|
|
|
++
|
|
|
++static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
|
|
|
++{
|
|
|
++ return cmpxchg(&(v->counter), o, n);
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * atomic64_dec_if_positive - decrement by 1 if old value positive
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * The function returns the old value of *v minus 1, even if
|
|
|
++ * the atomic variable, v, was not decremented.
|
|
|
++ */
|
|
|
++static inline s64 atomic64_dec_if_positive(atomic64_t *v)
|
|
|
++{
|
|
|
++ register s64 prev, rc;
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "0:"
|
|
|
++ "lr.d %0, %2\n"
|
|
|
++ "add %0, %0, -1\n"
|
|
|
++ "bltz %0, 1f\n"
|
|
|
++ "sc.w %1, %0, %2\n"
|
|
|
++ "bnez %1, 0b\n"
|
|
|
++ "1:"
|
|
|
++ : "=&r" (prev), "=r" (rc), "+A" (v->counter));
|
|
|
++ return prev;
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_add_unless - add unless the number is a given value
|
|
|
++ * @v: pointer of type atomic64_t
|
|
|
++ * @a: the amount to add to v...
|
|
|
++ * @u: ...unless v is equal to u.
|
|
|
++ *
|
|
|
++ * Atomically adds @a to @v, so long as it was not @u.
|
|
|
++ * Returns true if the addition occurred and false otherwise.
|
|
|
++ */
|
|
|
++static inline int atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
|
|
|
++{
|
|
|
++ register s64 tmp;
|
|
|
++ register int rc = 1;
|
|
|
++
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "0:"
|
|
|
++ "lr.d %0, %2\n"
|
|
|
++ "beq %0, %z4, 1f\n"
|
|
|
++ "add %0, %0, %3\n"
|
|
|
++ "sc.d %1, %0, %2\n"
|
|
|
++ "bnez %1, 0b\n"
|
|
|
++ "1:"
|
|
|
++ : "=&r" (tmp), "=&r" (rc), "+A" (v->counter)
|
|
|
++ : "rI" (a), "rJ" (u));
|
|
|
++ return !rc;
|
|
|
++}
|
|
|
++
|
|
|
++static inline int atomic64_inc_not_zero(atomic64_t *v)
|
|
|
++{
|
|
|
++ return atomic64_add_unless(v, 1, 0);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_and - Atomically clear bits in atomic variable
|
|
|
++ * @mask: Mask of the bits to be retained
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically retains the bits set in @mask from @v
|
|
|
++ */
|
|
|
++static inline void atomic64_and(s64 mask, atomic64_t *v)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoand.d zero, %1, %0"
|
|
|
++ : "+A" (v->counter)
|
|
|
++ : "r" (mask));
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_or - Atomically set bits in atomic variable
|
|
|
++ * @mask: Mask of the bits to be set
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically sets the bits set in @mask in @v
|
|
|
++ */
|
|
|
++static inline void atomic64_or(s64 mask, atomic64_t *v)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoor.d zero, %1, %0"
|
|
|
++ : "+A" (v->counter)
|
|
|
++ : "r" (mask));
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic64_xor - Atomically flips bits in atomic variable
|
|
|
++ * @mask: Mask of the bits to be flipped
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically flips the bits set in @mask in @v
|
|
|
++ */
|
|
|
++static inline void atomic64_xor(s64 mask, atomic64_t *v)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoxor.d zero, %1, %0"
|
|
|
++ : "+A" (v->counter)
|
|
|
++ : "r" (mask));
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* CONFIG_GENERIC_ATOMIC64 */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_ATOMIC64_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/atomic.h linux-4.6.2.riscv/arch/riscv/include/asm/atomic.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/atomic.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/atomic.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,276 @@
|
|
|
++#ifndef _ASM_RISCV_ATOMIC_H
|
|
|
++#define _ASM_RISCV_ATOMIC_H
|
|
|
++
|
|
|
++#ifdef CONFIG_RV_ATOMIC
|
|
|
++
|
|
|
++#include <asm/cmpxchg.h>
|
|
|
++#include <asm/barrier.h>
|
|
|
++
|
|
|
++#define ATOMIC_INIT(i) { (i) }
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_read - read atomic variable
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically reads the value of @v.
|
|
|
++ */
|
|
|
++static inline int atomic_read(const atomic_t *v)
|
|
|
++{
|
|
|
++ return *((volatile int *)(&(v->counter)));
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_set - set atomic variable
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ * @i: required value
|
|
|
++ *
|
|
|
++ * Atomically sets the value of @v to @i.
|
|
|
++ */
|
|
|
++static inline void atomic_set(atomic_t *v, int i)
|
|
|
++{
|
|
|
++ v->counter = i;
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_add - add integer to atomic variable
|
|
|
++ * @i: integer value to add
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically adds @i to @v.
|
|
|
++ */
|
|
|
++static inline void atomic_add(int i, atomic_t *v)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoadd.w zero, %1, %0"
|
|
|
++ : "+A" (v->counter)
|
|
|
++ : "r" (i));
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_sub - subtract integer from atomic variable
|
|
|
++ * @i: integer value to subtract
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically subtracts @i from @v.
|
|
|
++ */
|
|
|
++static inline void atomic_sub(int i, atomic_t *v)
|
|
|
++{
|
|
|
++ atomic_add(-i, v);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_add_return - add integer to atomic variable
|
|
|
++ * @i: integer value to add
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically adds @i to @v and returns the result
|
|
|
++ */
|
|
|
++static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
++{
|
|
|
++ register int c;
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoadd.w %0, %2, %1"
|
|
|
++ : "=r" (c), "+A" (v->counter)
|
|
|
++ : "r" (i));
|
|
|
++ return (c + i);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_sub_return - subtract integer from atomic variable
|
|
|
++ * @i: integer value to subtract
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically subtracts @i from @v and returns the result
|
|
|
++ */
|
|
|
++static inline int atomic_sub_return(int i, atomic_t *v)
|
|
|
++{
|
|
|
++ return atomic_add_return(-i, v);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_inc - increment atomic variable
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically increments @v by 1.
|
|
|
++ */
|
|
|
++static inline void atomic_inc(atomic_t *v)
|
|
|
++{
|
|
|
++ atomic_add(1, v);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_dec - decrement atomic variable
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically decrements @v by 1.
|
|
|
++ */
|
|
|
++static inline void atomic_dec(atomic_t *v)
|
|
|
++{
|
|
|
++ atomic_add(-1, v);
|
|
|
++}
|
|
|
++
|
|
|
++static inline int atomic_inc_return(atomic_t *v)
|
|
|
++{
|
|
|
++ return atomic_add_return(1, v);
|
|
|
++}
|
|
|
++
|
|
|
++static inline int atomic_dec_return(atomic_t *v)
|
|
|
++{
|
|
|
++ return atomic_sub_return(1, v);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_sub_and_test - subtract value from variable and test result
|
|
|
++ * @i: integer value to subtract
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically subtracts @i from @v and returns
|
|
|
++ * true if the result is zero, or false for all
|
|
|
++ * other cases.
|
|
|
++ */
|
|
|
++static inline int atomic_sub_and_test(int i, atomic_t *v)
|
|
|
++{
|
|
|
++ return (atomic_sub_return(i, v) == 0);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_inc_and_test - increment and test
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically increments @v by 1
|
|
|
++ * and returns true if the result is zero, or false for all
|
|
|
++ * other cases.
|
|
|
++ */
|
|
|
++static inline int atomic_inc_and_test(atomic_t *v)
|
|
|
++{
|
|
|
++ return (atomic_inc_return(v) == 0);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_dec_and_test - decrement and test
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically decrements @v by 1 and
|
|
|
++ * returns true if the result is 0, or false for all other
|
|
|
++ * cases.
|
|
|
++ */
|
|
|
++static inline int atomic_dec_and_test(atomic_t *v)
|
|
|
++{
|
|
|
++ return (atomic_dec_return(v) == 0);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_add_negative - add and test if negative
|
|
|
++ * @i: integer value to add
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically adds @i to @v and returns true
|
|
|
++ * if the result is negative, or false when
|
|
|
++ * result is greater than or equal to zero.
|
|
|
++ */
|
|
|
++static inline int atomic_add_negative(int i, atomic_t *v)
|
|
|
++{
|
|
|
++ return (atomic_add_return(i, v) < 0);
|
|
|
++}
|
|
|
++
|
|
|
++
|
|
|
++static inline int atomic_xchg(atomic_t *v, int n)
|
|
|
++{
|
|
|
++ register int c;
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoswap.w %0, %2, %1"
|
|
|
++ : "=r" (c), "+A" (v->counter)
|
|
|
++ : "r" (n));
|
|
|
++ return c;
|
|
|
++}
|
|
|
++
|
|
|
++static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
|
|
++{
|
|
|
++ return cmpxchg(&(v->counter), o, n);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * __atomic_add_unless - add unless the number is already a given value
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ * @a: the amount to add to v...
|
|
|
++ * @u: ...unless v is equal to u.
|
|
|
++ *
|
|
|
++ * Atomically adds @a to @v, so long as @v was not already @u.
|
|
|
++ * Returns the old value of @v.
|
|
|
++ */
|
|
|
++static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
++{
|
|
|
++ register int prev, rc;
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "0:"
|
|
|
++ "lr.w %0, %2\n"
|
|
|
++ "beq %0, %4, 1f\n"
|
|
|
++ "add %1, %0, %3\n"
|
|
|
++ "sc.w %1, %1, %2\n"
|
|
|
++ "bnez %1, 0b\n"
|
|
|
++ "1:"
|
|
|
++ : "=&r" (prev), "=&r" (rc), "+A" (v->counter)
|
|
|
++ : "r" (a), "r" (u));
|
|
|
++ return prev;
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_and - Atomically clear bits in atomic variable
|
|
|
++ * @mask: Mask of the bits to be retained
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically retains the bits set in @mask from @v
|
|
|
++ */
|
|
|
++static inline void atomic_and(unsigned int mask, atomic_t *v)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoand.w zero, %1, %0"
|
|
|
++ : "+A" (v->counter)
|
|
|
++ : "r" (mask));
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_or - Atomically set bits in atomic variable
|
|
|
++ * @mask: Mask of the bits to be set
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically sets the bits set in @mask in @v
|
|
|
++ */
|
|
|
++static inline void atomic_or(unsigned int mask, atomic_t *v)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoor.w zero, %1, %0"
|
|
|
++ : "+A" (v->counter)
|
|
|
++ : "r" (mask));
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * atomic_xor - Atomically flips bits in atomic variable
|
|
|
++ * @mask: Mask of the bits to be flipped
|
|
|
++ * @v: pointer of type atomic_t
|
|
|
++ *
|
|
|
++ * Atomically flips the bits set in @mask in @v
|
|
|
++ */
|
|
|
++static inline void atomic_xor(unsigned int mask, atomic_t *v)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoxor.w zero, %1, %0"
|
|
|
++ : "+A" (v->counter)
|
|
|
++ : "r" (mask));
|
|
|
++}
|
|
|
++
|
|
|
++/* Assume that atomic operations are already serializing */
|
|
|
++#define smp_mb__before_atomic_dec() barrier()
|
|
|
++#define smp_mb__after_atomic_dec() barrier()
|
|
|
++#define smp_mb__before_atomic_inc() barrier()
|
|
|
++#define smp_mb__after_atomic_inc() barrier()
|
|
|
++
|
|
|
++#else /* !CONFIG_RV_ATOMIC */
|
|
|
++
|
|
|
++#include <asm-generic/atomic.h>
|
|
|
++
|
|
|
++#endif /* CONFIG_RV_ATOMIC */
|
|
|
++
|
|
|
++#include <asm/atomic64.h>
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_ATOMIC_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/barrier.h linux-4.6.2.riscv/arch/riscv/include/asm/barrier.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/barrier.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/barrier.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,14 @@
|
|
|
++#ifndef _ASM_RISCV_BARRIER_H
|
|
|
++#define _ASM_RISCV_BARRIER_H
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++#define nop() __asm__ __volatile__ ("nop")
|
|
|
++
|
|
|
++#define mb() __asm__ __volatile__ ("fence" : : : "memory")
|
|
|
++
|
|
|
++#include <asm-generic/barrier.h>
|
|
|
++
|
|
|
++#endif /* __ASSEMBLY__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_BARRIER_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/bitops.h linux-4.6.2.riscv/arch/riscv/include/asm/bitops.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/bitops.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/bitops.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,271 @@
|
|
|
++#ifndef _ASM_RISCV_BITOPS_H
|
|
|
++#define _ASM_RISCV_BITOPS_H
|
|
|
++
|
|
|
++#ifndef _LINUX_BITOPS_H
|
|
|
++#error "Only <linux/bitops.h> can be included directly"
|
|
|
++#endif /* _LINUX_BITOPS_H */
|
|
|
++
|
|
|
++#ifdef __KERNEL__
|
|
|
++
|
|
|
++#include <linux/compiler.h>
|
|
|
++#include <linux/irqflags.h>
|
|
|
++#include <asm/barrier.h>
|
|
|
++#include <asm/bitsperlong.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_RV_ATOMIC
|
|
|
++
|
|
|
++#ifndef smp_mb__before_clear_bit
|
|
|
++#define smp_mb__before_clear_bit() smp_mb()
|
|
|
++#define smp_mb__after_clear_bit() smp_mb()
|
|
|
++#endif /* smp_mb__before_clear_bit */
|
|
|
++
|
|
|
++/**
|
|
|
++ * __ffs - find first bit in word.
|
|
|
++ * @word: The word to search
|
|
|
++ *
|
|
|
++ * Undefined if no bit exists, so code should check against 0 first.
|
|
|
++ */
|
|
|
++/*
|
|
|
++static __always_inline unsigned long __ffs(unsigned long word)
|
|
|
++{
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++*/
|
|
|
++#include <asm-generic/bitops/__ffs.h>
|
|
|
++
|
|
|
++#include <asm-generic/bitops/ffz.h>
|
|
|
++
|
|
|
++/**
|
|
|
++ * fls - find last (most-significant) bit set
|
|
|
++ * @x: the word to search
|
|
|
++ *
|
|
|
++ * This is defined the same way as ffs.
|
|
|
++ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
|
|
|
++ */
|
|
|
++/*
|
|
|
++static __always_inline int fls(int x)
|
|
|
++{
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++*/
|
|
|
++#include <asm-generic/bitops/fls.h>
|
|
|
++
|
|
|
++/**
|
|
|
++ * __fls - find last (most-significant) set bit in a long word
|
|
|
++ * @word: the word to search
|
|
|
++ *
|
|
|
++ * Undefined if no set bit exists, so code should check against 0 first.
|
|
|
++ */
|
|
|
++/*
|
|
|
++static __always_inline unsigned long __fls(unsigned long word)
|
|
|
++{
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++*/
|
|
|
++#include <asm-generic/bitops/__fls.h>
|
|
|
++
|
|
|
++#include <asm-generic/bitops/fls64.h>
|
|
|
++#include <asm-generic/bitops/find.h>
|
|
|
++#include <asm-generic/bitops/sched.h>
|
|
|
++
|
|
|
++/**
|
|
|
++ * ffs - find first bit set
|
|
|
++ * @x: the word to search
|
|
|
++ *
|
|
|
++ * This is defined the same way as
|
|
|
++ * the libc and compiler builtin ffs routines, therefore
|
|
|
++ * differs in spirit from the above ffz (man ffs).
|
|
|
++ */
|
|
|
++/*
|
|
|
++static __always_inline int ffs(int x)
|
|
|
++{
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++*/
|
|
|
++#include <asm-generic/bitops/ffs.h>
|
|
|
++
|
|
|
++#include <asm-generic/bitops/hweight.h>
|
|
|
++
|
|
|
++#if (BITS_PER_LONG == 64)
|
|
|
++#define __AMO(op) "amo" #op ".d"
|
|
|
++#elif (BITS_PER_LONG == 32)
|
|
|
++#define __AMO(op) "amo" #op ".w"
|
|
|
++#else
|
|
|
++#error "Unexpected BITS_PER_LONG"
|
|
|
++#endif
|
|
|
++
|
|
|
++#define __test_and_op_bit(op, mod, nr, addr) \
|
|
|
++({ \
|
|
|
++ unsigned long __res, __mask; \
|
|
|
++ __mask = BIT_MASK(nr); \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ __AMO(op) " %0, %2, %1" \
|
|
|
++ : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
|
|
|
++ : "r" (mod(__mask))); \
|
|
|
++ ((__res & __mask) != 0); \
|
|
|
++})
|
|
|
++
|
|
|
++#define __op_bit(op, mod, nr, addr) \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ __AMO(op) " zero, %1, %0" \
|
|
|
++ : "+A" (addr[BIT_WORD(nr)]) \
|
|
|
++ : "r" (mod(BIT_MASK(nr))))
|
|
|
++
|
|
|
++/* Bitmask modifiers */
|
|
|
++#define __NOP(x) (x)
|
|
|
++#define __NOT(x) (~(x))
|
|
|
++
|
|
|
++/**
|
|
|
++ * test_and_set_bit - Set a bit and return its old value
|
|
|
++ * @nr: Bit to set
|
|
|
++ * @addr: Address to count from
|
|
|
++ *
|
|
|
++ * This operation is atomic and cannot be reordered.
|
|
|
++ * It may be reordered on other architectures than x86.
|
|
|
++ * It also implies a memory barrier.
|
|
|
++ */
|
|
|
++static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
|
|
|
++{
|
|
|
++ return __test_and_op_bit(or, __NOP, nr, addr);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * test_and_clear_bit - Clear a bit and return its old value
|
|
|
++ * @nr: Bit to clear
|
|
|
++ * @addr: Address to count from
|
|
|
++ *
|
|
|
++ * This operation is atomic and cannot be reordered.
|
|
|
++ * It can be reordered on other architectures other than x86.
|
|
|
++ * It also implies a memory barrier.
|
|
|
++ */
|
|
|
++static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
|
|
|
++{
|
|
|
++ return __test_and_op_bit(and, __NOT, nr, addr);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * test_and_change_bit - Change a bit and return its old value
|
|
|
++ * @nr: Bit to change
|
|
|
++ * @addr: Address to count from
|
|
|
++ *
|
|
|
++ * This operation is atomic and cannot be reordered.
|
|
|
++ * It also implies a memory barrier.
|
|
|
++ */
|
|
|
++static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
|
|
|
++{
|
|
|
++ return __test_and_op_bit(xor, __NOP, nr, addr);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * set_bit - Atomically set a bit in memory
|
|
|
++ * @nr: the bit to set
|
|
|
++ * @addr: the address to start counting from
|
|
|
++ *
|
|
|
++ * This function is atomic and may not be reordered. See __set_bit()
|
|
|
++ * if you do not require the atomic guarantees.
|
|
|
++ *
|
|
|
++ * Note: there are no guarantees that this function will not be reordered
|
|
|
++ * on non x86 architectures, so if you are writing portable code,
|
|
|
++ * make sure not to rely on its reordering guarantees.
|
|
|
++ *
|
|
|
++ * Note that @nr may be almost arbitrarily large; this function is not
|
|
|
++ * restricted to acting on a single-word quantity.
|
|
|
++ */
|
|
|
++static inline void set_bit(int nr, volatile unsigned long *addr)
|
|
|
++{
|
|
|
++ __op_bit(or, __NOP, nr, addr);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * clear_bit - Clears a bit in memory
|
|
|
++ * @nr: Bit to clear
|
|
|
++ * @addr: Address to start counting from
|
|
|
++ *
|
|
|
++ * clear_bit() is atomic and may not be reordered. However, it does
|
|
|
++ * not contain a memory barrier, so if it is used for locking purposes,
|
|
|
++ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
|
|
|
++ * in order to ensure changes are visible on other processors.
|
|
|
++ */
|
|
|
++static inline void clear_bit(int nr, volatile unsigned long *addr)
|
|
|
++{
|
|
|
++ __op_bit(and, __NOT, nr, addr);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * change_bit - Toggle a bit in memory
|
|
|
++ * @nr: Bit to change
|
|
|
++ * @addr: Address to start counting from
|
|
|
++ *
|
|
|
++ * change_bit() is atomic and may not be reordered. It may be
|
|
|
++ * reordered on other architectures than x86.
|
|
|
++ * Note that @nr may be almost arbitrarily large; this function is not
|
|
|
++ * restricted to acting on a single-word quantity.
|
|
|
++ */
|
|
|
++static inline void change_bit(int nr, volatile unsigned long *addr)
|
|
|
++{
|
|
|
++ __op_bit(xor, __NOP, nr, addr);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * test_and_set_bit_lock - Set a bit and return its old value, for lock
|
|
|
++ * @nr: Bit to set
|
|
|
++ * @addr: Address to count from
|
|
|
++ *
|
|
|
++ * This operation is atomic and provides acquire barrier semantics.
|
|
|
++ * It can be used to implement bit locks.
|
|
|
++ */
|
|
|
++static inline int test_and_set_bit_lock(
|
|
|
++ unsigned long nr, volatile unsigned long *addr)
|
|
|
++{
|
|
|
++ return test_and_set_bit(nr, addr);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * clear_bit_unlock - Clear a bit in memory, for unlock
|
|
|
++ * @nr: the bit to set
|
|
|
++ * @addr: the address to start counting from
|
|
|
++ *
|
|
|
++ * This operation is atomic and provides release barrier semantics.
|
|
|
++ */
|
|
|
++static inline void clear_bit_unlock(
|
|
|
++ unsigned long nr, volatile unsigned long *addr)
|
|
|
++{
|
|
|
++ clear_bit(nr, addr);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * __clear_bit_unlock - Clear a bit in memory, for unlock
|
|
|
++ * @nr: the bit to set
|
|
|
++ * @addr: the address to start counting from
|
|
|
++ *
|
|
|
++ * This operation is like clear_bit_unlock, however it is not atomic.
|
|
|
++ * It does provide release barrier semantics so it can be used to unlock
|
|
|
++ * a bit lock, however it would only be used if no other CPU can modify
|
|
|
++ * any bits in the memory until the lock is released (a good example is
|
|
|
++ * if the bit lock itself protects access to the other bits in the word).
|
|
|
++ */
|
|
|
++static inline void __clear_bit_unlock(
|
|
|
++ unsigned long nr, volatile unsigned long *addr)
|
|
|
++{
|
|
|
++ clear_bit(nr, addr);
|
|
|
++}
|
|
|
++
|
|
|
++#undef __test_and_op_bit
|
|
|
++#undef __op_bit
|
|
|
++#undef __NOP
|
|
|
++#undef __NOT
|
|
|
++#undef __AMO
|
|
|
++
|
|
|
++#include <asm-generic/bitops/non-atomic.h>
|
|
|
++#include <asm-generic/bitops/le.h>
|
|
|
++#include <asm-generic/bitops/ext2-atomic.h>
|
|
|
++
|
|
|
++#else /* !CONFIG_RV_ATOMIC */
|
|
|
++
|
|
|
++#include <asm-generic/bitops.h>
|
|
|
++
|
|
|
++#endif /* CONFIG_RV_ATOMIC */
|
|
|
++
|
|
|
++#endif /* __KERNEL__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_BITOPS_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/bug.h linux-4.6.2.riscv/arch/riscv/include/asm/bug.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/bug.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/bug.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,67 @@
|
|
|
++#ifndef _ASM_RISCV_BUG_H
|
|
|
++#define _ASM_RISCV_BUG_H
|
|
|
++
|
|
|
++#include <linux/compiler.h>
|
|
|
++#include <linux/const.h>
|
|
|
++#include <linux/types.h>
|
|
|
++
|
|
|
++#include <asm/asm.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_GENERIC_BUG
|
|
|
++#define __BUG_INSN _AC(0x00100073,UL) /* sbreak */
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++typedef u32 bug_insn_t;
|
|
|
++
|
|
|
++#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
|
|
|
++#define __BUG_ENTRY_ADDR INT " 1b - 2b"
|
|
|
++#define __BUG_ENTRY_FILE INT " %0 - 2b"
|
|
|
++#else
|
|
|
++#define __BUG_ENTRY_ADDR PTR " 1b"
|
|
|
++#define __BUG_ENTRY_FILE PTR " %0"
|
|
|
++#endif
|
|
|
++
|
|
|
++#ifdef CONFIG_DEBUG_BUGVERBOSE
|
|
|
++#define __BUG_ENTRY \
|
|
|
++ __BUG_ENTRY_ADDR "\n\t" \
|
|
|
++ __BUG_ENTRY_FILE "\n\t" \
|
|
|
++ SHORT " %1"
|
|
|
++#else
|
|
|
++#define __BUG_ENTRY \
|
|
|
++ __BUG_ENTRY_ADDR
|
|
|
++#endif
|
|
|
++
|
|
|
++#define BUG() \
|
|
|
++do { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "1:\n\t" \
|
|
|
++ "sbreak\n" \
|
|
|
++ ".pushsection __bug_table,\"a\"\n\t" \
|
|
|
++ "2:\n\t" \
|
|
|
++ __BUG_ENTRY "\n\t" \
|
|
|
++ ".org 2b + %2\n\t" \
|
|
|
++ ".popsection" \
|
|
|
++ : \
|
|
|
++ : "i" (__FILE__), "i" (__LINE__), \
|
|
|
++ "i" (sizeof(struct bug_entry))); \
|
|
|
++ unreachable(); \
|
|
|
++} while (0)
|
|
|
++
|
|
|
++#define HAVE_ARCH_BUG
|
|
|
++#endif /* !__ASSEMBLY__ */
|
|
|
++#endif /* CONFIG_GENERIC_BUG */
|
|
|
++
|
|
|
++#include <asm-generic/bug.h>
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++struct pt_regs;
|
|
|
++struct task_struct;
|
|
|
++
|
|
|
++extern void die(struct pt_regs *regs, const char *str);
|
|
|
++extern void do_trap(struct pt_regs *regs, int signo, int code,
|
|
|
++ unsigned long addr, struct task_struct *tsk);
|
|
|
++
|
|
|
++#endif /* !__ASSEMBLY__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_BUG_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/cacheflush.h linux-4.6.2.riscv/arch/riscv/include/asm/cacheflush.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/cacheflush.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/cacheflush.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,26 @@
|
|
|
++#ifndef _ASM_RISCV_CACHEFLUSH_H
|
|
|
++#define _ASM_RISCV_CACHEFLUSH_H
|
|
|
++
|
|
|
++#include <asm-generic/cacheflush.h>
|
|
|
++
|
|
|
++#undef flush_icache_range
|
|
|
++#undef flush_icache_user_range
|
|
|
++
|
|
|
++static inline void local_flush_icache_all(void)
|
|
|
++{
|
|
|
++ asm volatile ("fence.i" ::: "memory");
|
|
|
++}
|
|
|
++
|
|
|
++#ifndef CONFIG_SMP
|
|
|
++
|
|
|
++#define flush_icache_range(start, end) local_flush_icache_all()
|
|
|
++#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all()
|
|
|
++
|
|
|
++#else /* CONFIG_SMP */
|
|
|
++
|
|
|
++#define flush_icache_range(start, end) sbi_remote_fence_i(0)
|
|
|
++#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0)
|
|
|
++
|
|
|
++#endif /* CONFIG_SMP */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_CACHEFLUSH_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/cache.h linux-4.6.2.riscv/arch/riscv/include/asm/cache.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/cache.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/cache.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,12 @@
|
|
|
++#ifndef _ASM_RISCV_CACHE_H
|
|
|
++#define _ASM_RISCV_CACHE_H
|
|
|
++
|
|
|
++#if defined(CONFIG_CPU_RV_ROCKET)
|
|
|
++#define L1_CACHE_SHIFT 6
|
|
|
++#else
|
|
|
++#define L1_CACHE_SHIFT 5
|
|
|
++#endif
|
|
|
++
|
|
|
++#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_CACHE_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/cmpxchg.h linux-4.6.2.riscv/arch/riscv/include/asm/cmpxchg.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/cmpxchg.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/cmpxchg.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,111 @@
|
|
|
++#ifndef _ASM_RISCV_CMPXCHG_H
|
|
|
++#define _ASM_RISCV_CMPXCHG_H
|
|
|
++
|
|
|
++#include <linux/bug.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_RV_ATOMIC
|
|
|
++
|
|
|
++#include <asm/barrier.h>
|
|
|
++
|
|
|
++#define __xchg(new, ptr, size) \
|
|
|
++({ \
|
|
|
++ __typeof__(ptr) __ptr = (ptr); \
|
|
|
++ __typeof__(new) __new = (new); \
|
|
|
++ __typeof__(*(ptr)) __ret; \
|
|
|
++ switch (size) { \
|
|
|
++ case 4: \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "amoswap.w %0, %2, %1" \
|
|
|
++ : "=r" (__ret), "+A" (*__ptr) \
|
|
|
++ : "r" (__new)); \
|
|
|
++ break; \
|
|
|
++ case 8: \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "amoswap.d %0, %2, %1" \
|
|
|
++ : "=r" (__ret), "+A" (*__ptr) \
|
|
|
++ : "r" (__new)); \
|
|
|
++ break; \
|
|
|
++ default: \
|
|
|
++ BUILD_BUG(); \
|
|
|
++ } \
|
|
|
++ __ret; \
|
|
|
++})
|
|
|
++
|
|
|
++#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr))))
|
|
|
++
|
|
|
++
|
|
|
++/*
|
|
|
++ * Atomic compare and exchange. Compare OLD with MEM, if identical,
|
|
|
++ * store NEW in MEM. Return the initial value in MEM. Success is
|
|
|
++ * indicated by comparing RETURN with OLD.
|
|
|
++ */
|
|
|
++#define __cmpxchg(ptr, old, new, size) \
|
|
|
++({ \
|
|
|
++ __typeof__(ptr) __ptr = (ptr); \
|
|
|
++ __typeof__(old) __old = (old); \
|
|
|
++ __typeof__(new) __new = (new); \
|
|
|
++ __typeof__(*(ptr)) __ret; \
|
|
|
++ register unsigned int __rc; \
|
|
|
++ switch (size) { \
|
|
|
++ case 4: \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "0:" \
|
|
|
++ "lr.w %0, %2\n" \
|
|
|
++ "bne %0, %z3, 1f\n" \
|
|
|
++ "sc.w %1, %z4, %2\n" \
|
|
|
++ "bnez %1, 0b\n" \
|
|
|
++ "1:" \
|
|
|
++ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
|
++ : "rJ" (__old), "rJ" (__new)); \
|
|
|
++ break; \
|
|
|
++ case 8: \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "0:" \
|
|
|
++ "lr.d %0, %2\n" \
|
|
|
++ "bne %0, %z3, 1f\n" \
|
|
|
++ "sc.d %1, %z4, %2\n" \
|
|
|
++ "bnez %1, 0b\n" \
|
|
|
++ "1:" \
|
|
|
++ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
|
|
|
++ : "rJ" (__old), "rJ" (__new)); \
|
|
|
++ break; \
|
|
|
++ default: \
|
|
|
++ BUILD_BUG(); \
|
|
|
++ } \
|
|
|
++ __ret; \
|
|
|
++})
|
|
|
++
|
|
|
++#define __cmpxchg_mb(ptr, old, new, size) \
|
|
|
++({ \
|
|
|
++ __typeof__(*(ptr)) __ret; \
|
|
|
++ smp_mb(); \
|
|
|
++ __ret = __cmpxchg((ptr), (old), (new), (size)); \
|
|
|
++ smp_mb(); \
|
|
|
++ __ret; \
|
|
|
++})
|
|
|
++
|
|
|
++#define cmpxchg(ptr, o, n) \
|
|
|
++ (__cmpxchg_mb((ptr), (o), (n), sizeof(*(ptr))))
|
|
|
++
|
|
|
++#define cmpxchg_local(ptr, o, n) \
|
|
|
++ (__cmpxchg((ptr), (o), (n), sizeof(*(ptr))))
|
|
|
++
|
|
|
++#define cmpxchg64(ptr, o, n) \
|
|
|
++({ \
|
|
|
++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
|
|
++ cmpxchg((ptr), (o), (n)); \
|
|
|
++})
|
|
|
++
|
|
|
++#define cmpxchg64_local(ptr, o, n) \
|
|
|
++({ \
|
|
|
++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
|
|
++ cmpxchg_local((ptr), (o), (n)); \
|
|
|
++})
|
|
|
++
|
|
|
++#else /* !CONFIG_RV_ATOMIC */
|
|
|
++
|
|
|
++#include <asm-generic/cmpxchg.h>
|
|
|
++
|
|
|
++#endif /* CONFIG_RV_ATOMIC */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_CMPXCHG_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/csr.h linux-4.6.2.riscv/arch/riscv/include/asm/csr.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/csr.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/csr.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,137 @@
|
|
|
++#ifndef _ASM_RISCV_CSR_H
|
|
|
++#define _ASM_RISCV_CSR_H
|
|
|
++
|
|
|
++#include <linux/const.h>
|
|
|
++
|
|
|
++/* Status register flags */
|
|
|
++#define SR_IE _AC(0x00000002,UL) /* Interrupt Enable */
|
|
|
++#define SR_PIE _AC(0x00000020,UL) /* Previous IE */
|
|
|
++#define SR_PS _AC(0x00000100,UL) /* Previously Supervisor */
|
|
|
++#define SR_PUM _AC(0x00040000,UL) /* Protect User Memory */
|
|
|
++
|
|
|
++#define SR_FS _AC(0x00006000,UL) /* Floating-point Status */
|
|
|
++#define SR_FS_OFF _AC(0x00000000,UL)
|
|
|
++#define SR_FS_INITIAL _AC(0x00002000,UL)
|
|
|
++#define SR_FS_CLEAN _AC(0x00004000,UL)
|
|
|
++#define SR_FS_DIRTY _AC(0x00006000,UL)
|
|
|
++
|
|
|
++#define SR_XS _AC(0x00018000,UL) /* Extension Status */
|
|
|
++#define SR_XS_OFF _AC(0x00000000,UL)
|
|
|
++#define SR_XS_INITIAL _AC(0x00008000,UL)
|
|
|
++#define SR_XS_CLEAN _AC(0x00010000,UL)
|
|
|
++#define SR_XS_DIRTY _AC(0x00018000,UL)
|
|
|
++
|
|
|
++#ifndef CONFIG_64BIT
|
|
|
++#define SR_SD _AC(0x80000000,UL) /* FS/XS dirty */
|
|
|
++#else
|
|
|
++#define SR_SD _AC(0x8000000000000000,UL) /* FS/XS dirty */
|
|
|
++#endif
|
|
|
++
|
|
|
++/* Interrupt Enable and Interrupt Pending flags */
|
|
|
++#define SIE_SSIE _AC(0x00000002,UL) /* Software Interrupt Enable */
|
|
|
++#define SIE_STIE _AC(0x00000020,UL) /* Timer Interrupt Enable */
|
|
|
++
|
|
|
++#define EXC_INST_MISALIGNED 0
|
|
|
++#define EXC_INST_ACCESS 1
|
|
|
++#define EXC_BREAKPOINT 3
|
|
|
++#define EXC_LOAD_ACCESS 5
|
|
|
++#define EXC_STORE_ACCESS 7
|
|
|
++#define EXC_SYSCALL 8
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++#define CSR_ZIMM(val) \
|
|
|
++ (__builtin_constant_p(val) && ((unsigned long)(val) < 0x20))
|
|
|
++
|
|
|
++#define csr_swap(csr,val) \
|
|
|
++({ \
|
|
|
++ unsigned long __v = (unsigned long)(val); \
|
|
|
++ if (CSR_ZIMM(__v)) { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrrw %0, " #csr ", %1" \
|
|
|
++ : "=r" (__v) : "i" (__v)); \
|
|
|
++ } else { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrrw %0, " #csr ", %1" \
|
|
|
++ : "=r" (__v) : "r" (__v)); \
|
|
|
++ } \
|
|
|
++ __v; \
|
|
|
++})
|
|
|
++
|
|
|
++#define csr_read(csr) \
|
|
|
++({ \
|
|
|
++ register unsigned long __v; \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrr %0, " #csr : "=r" (__v)); \
|
|
|
++ __v; \
|
|
|
++})
|
|
|
++
|
|
|
++#define csr_write(csr,val) \
|
|
|
++({ \
|
|
|
++ unsigned long __v = (unsigned long)(val); \
|
|
|
++ if (CSR_ZIMM(__v)) { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrw " #csr ", %0" : : "i" (__v)); \
|
|
|
++ } else { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrw " #csr ", %0" : : "r" (__v)); \
|
|
|
++ } \
|
|
|
++})
|
|
|
++
|
|
|
++#define csr_read_set(csr,val) \
|
|
|
++({ \
|
|
|
++ unsigned long __v = (unsigned long)(val); \
|
|
|
++ if (CSR_ZIMM(val)) { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrrs %0, " #csr ", %1" \
|
|
|
++ : "=r" (__v) : "i" (__v)); \
|
|
|
++ } else { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrrs %0, " #csr ", %1" \
|
|
|
++ : "=r" (__v) : "r" (__v)); \
|
|
|
++ } \
|
|
|
++ __v; \
|
|
|
++})
|
|
|
++
|
|
|
++#define csr_set(csr,val) \
|
|
|
++({ \
|
|
|
++ unsigned long __v = (unsigned long)(val); \
|
|
|
++ if (CSR_ZIMM(__v)) { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrs " #csr ", %0" : : "i" (__v)); \
|
|
|
++ } else { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrs " #csr ", %0" : : "r" (__v)); \
|
|
|
++ } \
|
|
|
++})
|
|
|
++
|
|
|
++#define csr_read_clear(csr,val) \
|
|
|
++({ \
|
|
|
++ unsigned long __v = (unsigned long)(val); \
|
|
|
++ if (CSR_ZIMM(__v)) { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrrc %0, " #csr ", %1" \
|
|
|
++ : "=r" (__v) : "i" (__v)); \
|
|
|
++ } else { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrrc %0, " #csr ", %1" \
|
|
|
++ : "=r" (__v) : "r" (__v)); \
|
|
|
++ } \
|
|
|
++ __v; \
|
|
|
++})
|
|
|
++
|
|
|
++#define csr_clear(csr,val) \
|
|
|
++({ \
|
|
|
++ unsigned long __v = (unsigned long)(val); \
|
|
|
++ if (CSR_ZIMM(__v)) { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrc " #csr ", %0" : : "i" (__v)); \
|
|
|
++ } else { \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "csrc " #csr ", %0" : : "r" (__v)); \
|
|
|
++ } \
|
|
|
++})
|
|
|
++
|
|
|
++#endif /* __ASSEMBLY__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_CSR_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/current.h linux-4.6.2.riscv/arch/riscv/include/asm/current.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/current.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/current.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,16 @@
|
|
|
++#ifndef _ASM_RISCV_CURRENT_H
|
|
|
++#define _ASM_RISCV_CURRENT_H
|
|
|
++
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++struct task_struct;
|
|
|
++
|
|
|
++static inline struct task_struct *get_current(void)
|
|
|
++{
|
|
|
++ register struct task_struct * tp asm("tp");
|
|
|
++ return tp;
|
|
|
++}
|
|
|
++
|
|
|
++#define current (get_current())
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_CURRENT_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/delay.h linux-4.6.2.riscv/arch/riscv/include/asm/delay.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/delay.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/delay.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,14 @@
|
|
|
++#ifndef _ASM_RISCV_DELAY_H
|
|
|
++#define _ASM_RISCV_DELAY_H
|
|
|
++
|
|
|
++extern unsigned long timebase;
|
|
|
++
|
|
|
++#define udelay udelay
|
|
|
++extern void udelay(unsigned long usecs);
|
|
|
++
|
|
|
++#define ndelay ndelay
|
|
|
++extern void ndelay(unsigned long nsecs);
|
|
|
++
|
|
|
++extern void __delay(unsigned long cycles);
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_DELAY_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/device.h linux-4.6.2.riscv/arch/riscv/include/asm/device.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/device.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/device.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,13 @@
|
|
|
++#ifndef _ASM_RISCV_DEVICE_H
|
|
|
++#define _ASM_RISCV_DEVICE_H
|
|
|
++
|
|
|
++#include <linux/sysfs.h>
|
|
|
++
|
|
|
++struct dev_archdata {
|
|
|
++ struct dma_map_ops *dma_ops;
|
|
|
++};
|
|
|
++
|
|
|
++struct pdev_archdata {
|
|
|
++};
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_DEVICE_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/dma-mapping.h linux-4.6.2.riscv/arch/riscv/include/asm/dma-mapping.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/dma-mapping.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/dma-mapping.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,63 @@
|
|
|
++/*
|
|
|
++ * Copyright (C) 2003-2004 Hewlett-Packard Co
|
|
|
++ * David Mosberger-Tang <davidm@hpl.hp.com>
|
|
|
++ * Copyright (C) 2012 ARM Ltd.
|
|
|
++ * Copyright (C) 2016 SiFive, Inc.
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or modify
|
|
|
++ * it under the terms of the GNU General Public License version 2 as
|
|
|
++ * published by the Free Software Foundation.
|
|
|
++ *
|
|
|
++ * This program is distributed in the hope that it will be useful,
|
|
|
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
++ * GNU General Public License for more details.
|
|
|
++ *
|
|
|
++ * You should have received a copy of the GNU General Public License
|
|
|
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
++ */
|
|
|
++#ifndef __ASM_RISCV_DMA_MAPPING_H
|
|
|
++#define __ASM_RISCV_DMA_MAPPING_H
|
|
|
++
|
|
|
++#ifdef __KERNEL__
|
|
|
++
|
|
|
++/* Use ops->dma_mapping_error (if it exists) or assume success */
|
|
|
++// #undef DMA_ERROR_CODE
|
|
|
++
|
|
|
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|
|
++{
|
|
|
++ if (unlikely(dev->archdata.dma_ops))
|
|
|
++ return dev->archdata.dma_ops;
|
|
|
++ else
|
|
|
++ return &dma_noop_ops;
|
|
|
++}
|
|
|
++
|
|
|
++static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|
|
++{
|
|
|
++ if (!dev->dma_mask)
|
|
|
++ return false;
|
|
|
++
|
|
|
++ return addr + size - 1 <= *dev->dma_mask;
|
|
|
++}
|
|
|
++
|
|
|
++static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
|
++{
|
|
|
++ return (dma_addr_t)paddr;
|
|
|
++}
|
|
|
++
|
|
|
++static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
|
|
++{
|
|
|
++ return (phys_addr_t)dev_addr;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir)
|
|
|
++{
|
|
|
++ /*
|
|
|
++ * RISC-V is cache-coherent, so this is mostly a no-op. However, we do need to
|
|
|
++ * ensure that dma_cache_sync() enforces order, hence the mb().
|
|
|
++ */
|
|
|
++ mb();
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* __KERNEL__ */
|
|
|
++#endif /* __ASM_RISCV_DMA_MAPPING_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/elf.h linux-4.6.2.riscv/arch/riscv/include/asm/elf.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/elf.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/elf.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,72 @@
|
|
|
++#ifndef _ASM_RISCV_ELF_H
|
|
|
++#define _ASM_RISCV_ELF_H
|
|
|
++
|
|
|
++#include <uapi/asm/elf.h>
|
|
|
++#include <asm/auxvec.h>
|
|
|
++#include <asm/byteorder.h>
|
|
|
++
|
|
|
++/* TODO: Move definition into include/uapi/linux/elf-em.h */
|
|
|
++#define EM_RISCV 0xF3
|
|
|
++
|
|
|
++/*
|
|
|
++ * These are used to set parameters in the core dumps.
|
|
|
++ */
|
|
|
++#define ELF_ARCH EM_RISCV
|
|
|
++
|
|
|
++#ifdef CONFIG_64BIT
|
|
|
++#define ELF_CLASS ELFCLASS64
|
|
|
++#else
|
|
|
++#define ELF_CLASS ELFCLASS32
|
|
|
++#endif
|
|
|
++
|
|
|
++#if defined(__LITTLE_ENDIAN)
|
|
|
++#define ELF_DATA ELFDATA2LSB
|
|
|
++#elif defined(__BIG_ENDIAN)
|
|
|
++#define ELF_DATA ELFDATA2MSB
|
|
|
++#else
|
|
|
++#error "Unknown endianness"
|
|
|
++#endif
|
|
|
++
|
|
|
++/*
|
|
|
++ * This is used to ensure we don't load something for the wrong architecture.
|
|
|
++ */
|
|
|
++#define elf_check_arch(x) ((x)->e_machine == EM_RISCV)
|
|
|
++
|
|
|
++#define CORE_DUMP_USE_REGSET
|
|
|
++#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
|
|
|
++
|
|
|
++/*
|
|
|
++ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
|
|
++ * use of this is to invoke "./ld.so someprog" to test out a new version of
|
|
|
++ * the loader. We need to make sure that it is out of the way of the program
|
|
|
++ * that it will "exec", and that there is sufficient room for the brk.
|
|
|
++ */
|
|
|
++#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2)
|
|
|
++
|
|
|
++/*
|
|
|
++ * This yields a mask that user programs can use to figure out what
|
|
|
++ * instruction set this CPU supports. This could be done in user space,
|
|
|
++ * but it's not easy, and we've already done it here.
|
|
|
++ */
|
|
|
++#define ELF_HWCAP (0)
|
|
|
++
|
|
|
++/*
|
|
|
++ * This yields a string that ld.so will use to load implementation
|
|
|
++ * specific libraries for optimization. This is more specific in
|
|
|
++ * intent than poking at uname or /proc/cpuinfo.
|
|
|
++ */
|
|
|
++#define ELF_PLATFORM (NULL)
|
|
|
++
|
|
|
++#define ARCH_DLINFO \
|
|
|
++do { \
|
|
|
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
|
|
|
++ (elf_addr_t)current->mm->context.vdso); \
|
|
|
++} while (0)
|
|
|
++
|
|
|
++
|
|
|
++#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
|
|
++struct linux_binprm;
|
|
|
++extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|
|
++ int uses_interp);
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_ELF_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/io.h linux-4.6.2.riscv/arch/riscv/include/asm/io.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/io.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/io.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,63 @@
|
|
|
++#ifndef _ASM_RISCV_IO_H
|
|
|
++#define _ASM_RISCV_IO_H
|
|
|
++
|
|
|
++#include <asm-generic/io.h>
|
|
|
++
|
|
|
++#ifdef __KERNEL__
|
|
|
++
|
|
|
++#ifdef CONFIG_MMU
|
|
|
++
|
|
|
++extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
|
|
|
++
|
|
|
++/*
|
|
|
++ * ioremap_nocache - map bus memory into CPU space
|
|
|
++ * @offset: bus address of the memory
|
|
|
++ * @size: size of the resource to map
|
|
|
++ *
|
|
|
++ * ioremap_nocache performs a platform specific sequence of operations to
|
|
|
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
|
|
|
++ * writew/writel functions and the other mmio helpers. The returned
|
|
|
++ * address is not guaranteed to be usable directly as a virtual
|
|
|
++ * address.
|
|
|
++ *
|
|
|
++ * This version of ioremap ensures that the memory is marked uncachable
|
|
|
++ * on the CPU as well as honouring existing caching rules from things like
|
|
|
++ * the PCI bus. Note that there are other caches and buffers on many
|
|
|
++ * busses. In particular driver authors should read up on PCI writes.
|
|
|
++ *
|
|
|
++ * It's useful if some control registers are in such an area and
|
|
|
++ * write combining or read caching is not desirable.
|
|
|
++ *
|
|
|
++ * Must be freed with iounmap.
|
|
|
++ */
|
|
|
++static inline void __iomem *ioremap_nocache(
|
|
|
++ phys_addr_t offset, unsigned long size)
|
|
|
++{
|
|
|
++ return ioremap(offset, size);
|
|
|
++}
|
|
|
++
|
|
|
++/**
|
|
|
++ * ioremap_wc - map memory into CPU space write combined
|
|
|
++ * @offset: bus address of the memory
|
|
|
++ * @size: size of the resource to map
|
|
|
++ *
|
|
|
++ * This version of ioremap ensures that the memory is marked write combining.
|
|
|
++ * Write combining allows faster writes to some hardware devices.
|
|
|
++ *
|
|
|
++ * Must be freed with iounmap.
|
|
|
++ */
|
|
|
++static inline void __iomem *ioremap_wc(
|
|
|
++ phys_addr_t offset, unsigned long size)
|
|
|
++{
|
|
|
++ return ioremap(offset, size);
|
|
|
++}
|
|
|
++
|
|
|
++#define ioremap_wt ioremap_nocache
|
|
|
++
|
|
|
++extern void iounmap(void __iomem *addr);
|
|
|
++
|
|
|
++#endif /* CONFIG_MMU */
|
|
|
++
|
|
|
++#endif /* __KERNEL__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_IO_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/irqflags.h linux-4.6.2.riscv/arch/riscv/include/asm/irqflags.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/irqflags.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/irqflags.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,49 @@
|
|
|
++#ifndef _ASM_RISCV_IRQFLAGS_H
|
|
|
++#define _ASM_RISCV_IRQFLAGS_H
|
|
|
++
|
|
|
++#include <asm/processor.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++/* read interrupt enabled status */
|
|
|
++static inline unsigned long arch_local_save_flags(void)
|
|
|
++{
|
|
|
++ return csr_read(sstatus);
|
|
|
++}
|
|
|
++
|
|
|
++/* unconditionally enable interrupts */
|
|
|
++static inline void arch_local_irq_enable(void)
|
|
|
++{
|
|
|
++ csr_set(sstatus, SR_IE);
|
|
|
++}
|
|
|
++
|
|
|
++/* unconditionally disable interrupts */
|
|
|
++static inline void arch_local_irq_disable(void)
|
|
|
++{
|
|
|
++ csr_clear(sstatus, SR_IE);
|
|
|
++}
|
|
|
++
|
|
|
++/* get status and disable interrupts */
|
|
|
++static inline unsigned long arch_local_irq_save(void)
|
|
|
++{
|
|
|
++ return csr_read_clear(sstatus, SR_IE);
|
|
|
++}
|
|
|
++
|
|
|
++/* test flags */
|
|
|
++static inline int arch_irqs_disabled_flags(unsigned long flags)
|
|
|
++{
|
|
|
++ return !(flags & SR_IE);
|
|
|
++}
|
|
|
++
|
|
|
++/* test hardware interrupt enable bit */
|
|
|
++static inline int arch_irqs_disabled(void)
|
|
|
++{
|
|
|
++ return arch_irqs_disabled_flags(arch_local_save_flags());
|
|
|
++}
|
|
|
++
|
|
|
++/* set interrupt enabled status */
|
|
|
++static inline void arch_local_irq_restore(unsigned long flags)
|
|
|
++{
|
|
|
++ csr_set(sstatus, flags & SR_IE);
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_IRQFLAGS_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/irq.h linux-4.6.2.riscv/arch/riscv/include/asm/irq.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/irq.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/irq.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,14 @@
|
|
|
++#ifndef _ASM_RISCV_IRQ_H
|
|
|
++#define _ASM_RISCV_IRQ_H
|
|
|
++
|
|
|
++#define NR_IRQS 0
|
|
|
++
|
|
|
++#define INTERRUPT_CAUSE_SOFTWARE 1
|
|
|
++#define INTERRUPT_CAUSE_TIMER 5
|
|
|
++#define INTERRUPT_CAUSE_EXTERNAL 9
|
|
|
++
|
|
|
++void riscv_timer_interrupt(void);
|
|
|
++
|
|
|
++#include <asm-generic/irq.h>
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_IRQ_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/Kbuild linux-4.6.2.riscv/arch/riscv/include/asm/Kbuild
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/Kbuild 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/Kbuild 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -0,0 +1,59 @@
|
|
|
++generic-y += bugs.h
|
|
|
++generic-y += cacheflush.h
|
|
|
++generic-y += checksum.h
|
|
|
++generic-y += clkdev.h
|
|
|
++generic-y += cputime.h
|
|
|
++generic-y += div64.h
|
|
|
++generic-y += dma.h
|
|
|
++generic-y += emergency-restart.h
|
|
|
++generic-y += errno.h
|
|
|
++generic-y += exec.h
|
|
|
++generic-y += fb.h
|
|
|
++generic-y += fcntl.h
|
|
|
++generic-y += ftrace.h
|
|
|
++generic-y += futex.h
|
|
|
++generic-y += hardirq.h
|
|
|
++generic-y += hash.h
|
|
|
++generic-y += hw_irq.h
|
|
|
++generic-y += ioctl.h
|
|
|
++generic-y += ioctls.h
|
|
|
++generic-y += ipcbuf.h
|
|
|
++generic-y += irq_regs.h
|
|
|
++generic-y += irq_work.h
|
|
|
++generic-y += kdebug.h
|
|
|
++generic-y += kmap_types.h
|
|
|
++generic-y += kvm_para.h
|
|
|
++generic-y += local.h
|
|
|
++generic-y += mm-arch-hooks.h
|
|
|
++generic-y += mman.h
|
|
|
++generic-y += module.h
|
|
|
++generic-y += msgbuf.h
|
|
|
++generic-y += mutex.h
|
|
|
++generic-y += param.h
|
|
|
++generic-y += percpu.h
|
|
|
++generic-y += poll.h
|
|
|
++generic-y += posix_types.h
|
|
|
++generic-y += preempt.h
|
|
|
++generic-y += resource.h
|
|
|
++generic-y += scatterlist.h
|
|
|
++generic-y += sections.h
|
|
|
++generic-y += sembuf.h
|
|
|
++generic-y += shmbuf.h
|
|
|
++generic-y += shmparam.h
|
|
|
++generic-y += signal.h
|
|
|
++generic-y += socket.h
|
|
|
++generic-y += sockios.h
|
|
|
++generic-y += stat.h
|
|
|
++generic-y += statfs.h
|
|
|
++generic-y += swab.h
|
|
|
++generic-y += termbits.h
|
|
|
++generic-y += termios.h
|
|
|
++generic-y += topology.h
|
|
|
++generic-y += trace_clock.h
|
|
|
++generic-y += types.h
|
|
|
++generic-y += ucontext.h
|
|
|
++generic-y += unaligned.h
|
|
|
++generic-y += user.h
|
|
|
++generic-y += vga.h
|
|
|
++generic-y += vmlinux.lds.h
|
|
|
++generic-y += xor.h
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/linkage.h linux-4.6.2.riscv/arch/riscv/include/asm/linkage.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/linkage.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/linkage.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,7 @@
|
|
|
++#ifndef _ASM_RISCV_LINKAGE_H
|
|
|
++#define _ASM_RISCV_LINKAGE_H
|
|
|
++
|
|
|
++#define __ALIGN .balign 4
|
|
|
++#define __ALIGN_STR ".balign 4"
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_LINKAGE_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/mmu_context.h linux-4.6.2.riscv/arch/riscv/include/asm/mmu_context.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/mmu_context.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/mmu_context.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,46 @@
|
|
|
++#ifndef _ASM_RISCV_MMU_CONTEXT_H
|
|
|
++#define _ASM_RISCV_MMU_CONTEXT_H
|
|
|
++
|
|
|
++#include <asm-generic/mm_hooks.h>
|
|
|
++
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/sched.h>
|
|
|
++#include <asm/tlbflush.h>
|
|
|
++
|
|
|
++static inline void enter_lazy_tlb(struct mm_struct *mm,
|
|
|
++ struct task_struct *task)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++/* Initialize context-related info for a new mm_struct */
|
|
|
++static inline int init_new_context(struct task_struct *task,
|
|
|
++ struct mm_struct *mm)
|
|
|
++{
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void destroy_context(struct mm_struct *mm)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++static inline void switch_mm(struct mm_struct *prev,
|
|
|
++ struct mm_struct *next, struct task_struct *task)
|
|
|
++{
|
|
|
++ if (likely(prev != next)) {
|
|
|
++ csr_write(sptbr, virt_to_pfn(next->pgd));
|
|
|
++ local_flush_tlb_all();
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++static inline void activate_mm(struct mm_struct *prev,
|
|
|
++ struct mm_struct *next)
|
|
|
++{
|
|
|
++ switch_mm(prev, next, NULL);
|
|
|
++}
|
|
|
++
|
|
|
++static inline void deactivate_mm(struct task_struct *task,
|
|
|
++ struct mm_struct *mm)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_MMU_CONTEXT_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/mmu.h linux-4.6.2.riscv/arch/riscv/include/asm/mmu.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/mmu.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/mmu.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,12 @@
|
|
|
++#ifndef _ASM_RISCV_MMU_H
|
|
|
++#define _ASM_RISCV_MMU_H
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++typedef struct {
|
|
|
++ void *vdso;
|
|
|
++} mm_context_t;
|
|
|
++
|
|
|
++#endif /* __ASSEMBLY__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_MMU_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/page.h linux-4.6.2.riscv/arch/riscv/include/asm/page.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/page.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/page.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,119 @@
|
|
|
++#ifndef _ASM_RISCV_PAGE_H
|
|
|
++#define _ASM_RISCV_PAGE_H
|
|
|
++
|
|
|
++#include <linux/pfn.h>
|
|
|
++#include <linux/const.h>
|
|
|
++
|
|
|
++#define PAGE_SHIFT (12)
|
|
|
++#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
|
|
|
++#define PAGE_MASK (~(PAGE_SIZE - 1))
|
|
|
++
|
|
|
++#ifdef __KERNEL__
|
|
|
++
|
|
|
++/*
|
|
|
++ * PAGE_OFFSET -- the first address of the first page of memory.
|
|
|
++ * When not using MMU this corresponds to the first free page in
|
|
|
++ * physical memory (aligned on a page boundary).
|
|
|
++ */
|
|
|
++#ifdef CONFIG_64BIT
|
|
|
++#define PAGE_OFFSET _AC(0xffffffff80000000,UL)
|
|
|
++#else
|
|
|
++#define PAGE_OFFSET _AC(0xc0000000,UL)
|
|
|
++#endif
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
|
|
|
++#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
|
|
|
++
|
|
|
++/* align addr on a size boundary - adjust address up/down if needed */
|
|
|
++#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
|
|
|
++#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
|
|
|
++
|
|
|
++/* align addr on a size boundary - adjust address up if needed */
|
|
|
++#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
|
|
|
++
|
|
|
++#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
|
|
|
++#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
|
|
|
++
|
|
|
++#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
|
|
|
++#define copy_user_page(vto, vfrom, vaddr, topg) \
|
|
|
++ memcpy((vto), (vfrom), PAGE_SIZE)
|
|
|
++
|
|
|
++/*
|
|
|
++ * Use struct definitions to apply C type checking
|
|
|
++ */
|
|
|
++
|
|
|
++/* Page Global Directory entry */
|
|
|
++typedef struct {
|
|
|
++ unsigned long pgd;
|
|
|
++} pgd_t;
|
|
|
++
|
|
|
++/* Page Table entry */
|
|
|
++typedef struct {
|
|
|
++ unsigned long pte;
|
|
|
++} pte_t;
|
|
|
++
|
|
|
++typedef struct {
|
|
|
++ unsigned long pgprot;
|
|
|
++} pgprot_t;
|
|
|
++
|
|
|
++typedef struct page *pgtable_t;
|
|
|
++
|
|
|
++#define pte_val(x) ((x).pte)
|
|
|
++#define pgd_val(x) ((x).pgd)
|
|
|
++#define pgprot_val(x) ((x).pgprot)
|
|
|
++
|
|
|
++#define __pte(x) ((pte_t) { (x) })
|
|
|
++#define __pgd(x) ((pgd_t) { (x) })
|
|
|
++#define __pgprot(x) ((pgprot_t) { (x) })
|
|
|
++
|
|
|
++#ifdef CONFIG_64BITS
|
|
|
++#define PTE_FMT "%016lx"
|
|
|
++#else
|
|
|
++#define PTE_FMT "%08lx"
|
|
|
++#endif
|
|
|
++
|
|
|
++extern unsigned long va_pa_offset;
|
|
|
++extern unsigned long pfn_base;
|
|
|
++
|
|
|
++extern unsigned long max_low_pfn;
|
|
|
++extern unsigned long min_low_pfn;
|
|
|
++
|
|
|
++#define __pa(x) ((unsigned long)(x) - va_pa_offset)
|
|
|
++#define __va(x) ((void *)((unsigned long) (x) + va_pa_offset))
|
|
|
++
|
|
|
++#define phys_to_pfn(phys) (PFN_DOWN(phys))
|
|
|
++#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
|
|
|
++
|
|
|
++#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
|
|
|
++#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
|
|
|
++
|
|
|
++#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
|
|
|
++#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
|
|
|
++
|
|
|
++#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
|
|
|
++#define page_to_bus(page) (page_to_phys(page))
|
|
|
++#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
|
|
|
++
|
|
|
++#define pfn_valid(pfn) (((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr))
|
|
|
++
|
|
|
++#define ARCH_PFN_OFFSET (pfn_base)
|
|
|
++
|
|
|
++#endif /* __ASSEMBLY__ */
|
|
|
++
|
|
|
++#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
|
|
|
++
|
|
|
++#endif /* __KERNEL__ */
|
|
|
++
|
|
|
++#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
|
|
|
++ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
|
++
|
|
|
++#include <asm-generic/memory_model.h>
|
|
|
++#include <asm-generic/getorder.h>
|
|
|
++
|
|
|
++/* vDSO support */
|
|
|
++/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
|
|
|
++#define __HAVE_ARCH_GATE_AREA
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_PAGE_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/pci.h linux-4.6.2.riscv/arch/riscv/include/asm/pci.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/pci.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/pci.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,37 @@
|
|
|
++#ifndef __ASM_RISCV_PCI_H
|
|
|
++#define __ASM_RISCV_PCI_H
|
|
|
++#ifdef __KERNEL__
|
|
|
++
|
|
|
++#include <linux/types.h>
|
|
|
++#include <linux/slab.h>
|
|
|
++#include <linux/dma-mapping.h>
|
|
|
++
|
|
|
++#include <asm/io.h>
|
|
|
++
|
|
|
++#define PCIBIOS_MIN_IO 0x1000
|
|
|
++#define PCIBIOS_MIN_MEM 0
|
|
|
++
|
|
|
++/* RISC-V shim does not initialize PCI bus */
|
|
|
++#define pcibios_assign_all_busses() 1
|
|
|
++
|
|
|
++/* RISC-V TileLink and PCIe share the share address space */
|
|
|
++#define PCI_DMA_BUS_IS_PHYS 1
|
|
|
++
|
|
|
++extern int isa_dma_bridge_buggy;
|
|
|
++
|
|
|
++#ifdef CONFIG_PCI
|
|
|
++static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
|
|
++{
|
|
|
++ /* no legacy IRQ on risc-v */
|
|
|
++ return -ENODEV;
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pci_proc_domain(struct pci_bus *bus)
|
|
|
++{
|
|
|
++ /* always show the domain in /proc */
|
|
|
++ return 1;
|
|
|
++}
|
|
|
++#endif /* CONFIG_PCI */
|
|
|
++
|
|
|
++#endif /* __KERNEL__ */
|
|
|
++#endif /* __ASM_PCI_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/pgalloc.h linux-4.6.2.riscv/arch/riscv/include/asm/pgalloc.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/pgalloc.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/pgalloc.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,107 @@
|
|
|
++#ifndef _ASM_RISCV_PGALLOC_H
|
|
|
++#define _ASM_RISCV_PGALLOC_H
|
|
|
++
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <asm/tlb.h>
|
|
|
++
|
|
|
++static inline void pmd_populate_kernel(struct mm_struct *mm,
|
|
|
++ pmd_t *pmd, pte_t *pte)
|
|
|
++{
|
|
|
++ unsigned long pfn = virt_to_pfn(pte);
|
|
|
++ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
|
++}
|
|
|
++
|
|
|
++static inline void pmd_populate(struct mm_struct *mm,
|
|
|
++ pmd_t *pmd, pgtable_t pte)
|
|
|
++{
|
|
|
++ unsigned long pfn = virt_to_pfn(page_address(pte));
|
|
|
++ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
|
++}
|
|
|
++
|
|
|
++#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
++static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
|
++{
|
|
|
++ unsigned long pfn = virt_to_pfn(pmd);
|
|
|
++ set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
|
++}
|
|
|
++#endif /* __PAGETABLE_PMD_FOLDED */
|
|
|
++
|
|
|
++#define pmd_pgtable(pmd) pmd_page(pmd)
|
|
|
++
|
|
|
++static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
++{
|
|
|
++ pgd_t *pgd;
|
|
|
++
|
|
|
++ pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
|
|
|
++ if (likely(pgd != NULL)) {
|
|
|
++ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
|
|
|
++ /* Copy kernel mappings */
|
|
|
++ memcpy(pgd + USER_PTRS_PER_PGD,
|
|
|
++ init_mm.pgd + USER_PTRS_PER_PGD,
|
|
|
++ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
|
++ }
|
|
|
++ return pgd;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
|
++{
|
|
|
++ free_page((unsigned long)pgd);
|
|
|
++}
|
|
|
++
|
|
|
++#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
++
|
|
|
++static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
++{
|
|
|
++ return (pmd_t *)__get_free_page(
|
|
|
++ GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
|
|
++}
|
|
|
++
|
|
|
++static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
|
++{
|
|
|
++ free_page((unsigned long)pmd);
|
|
|
++}
|
|
|
++
|
|
|
++#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
|
|
|
++
|
|
|
++#endif /* __PAGETABLE_PMD_FOLDED */
|
|
|
++
|
|
|
++static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|
|
++ unsigned long address)
|
|
|
++{
|
|
|
++ return (pte_t *)__get_free_page(
|
|
|
++ GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
|
|
++}
|
|
|
++
|
|
|
++static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|
|
++ unsigned long address)
|
|
|
++{
|
|
|
++ struct page *pte;
|
|
|
++ pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
|
|
++ if (likely(pte != NULL)) {
|
|
|
++ pgtable_page_ctor(pte);
|
|
|
++ }
|
|
|
++ return pte;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
|
|
++{
|
|
|
++ free_page((unsigned long)pte);
|
|
|
++}
|
|
|
++
|
|
|
++static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
|
|
++{
|
|
|
++ pgtable_page_dtor(pte);
|
|
|
++ __free_page(pte);
|
|
|
++}
|
|
|
++
|
|
|
++#define __pte_free_tlb(tlb, pte, buf) \
|
|
|
++do { \
|
|
|
++ pgtable_page_dtor(pte); \
|
|
|
++ tlb_remove_page((tlb), pte); \
|
|
|
++} while (0)
|
|
|
++
|
|
|
++static inline void check_pgt_cache(void)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_PGALLOC_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/pgtable-32.h linux-4.6.2.riscv/arch/riscv/include/asm/pgtable-32.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/pgtable-32.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/pgtable-32.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,12 @@
|
|
|
++#ifndef _ASM_RISCV_PGTABLE_32_H
|
|
|
++#define _ASM_RISCV_PGTABLE_32_H
|
|
|
++
|
|
|
++#include <asm-generic/pgtable-nopmd.h>
|
|
|
++#include <linux/const.h>
|
|
|
++
|
|
|
++/* Size of region mapped by a page global directory */
|
|
|
++#define PGDIR_SHIFT 22
|
|
|
++#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
|
|
|
++#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_PGTABLE_32_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/pgtable-64.h linux-4.6.2.riscv/arch/riscv/include/asm/pgtable-64.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/pgtable-64.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/pgtable-64.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,66 @@
|
|
|
++#ifndef _ASM_RISCV_PGTABLE_64_H
|
|
|
++#define _ASM_RISCV_PGTABLE_64_H
|
|
|
++
|
|
|
++#include <linux/const.h>
|
|
|
++
|
|
|
++#define PGDIR_SHIFT 30
|
|
|
++/* Size of region mapped by a page global directory */
|
|
|
++#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
|
|
|
++#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
|
|
++
|
|
|
++#define PMD_SHIFT 21
|
|
|
++/* Size of region mapped by a page middle directory */
|
|
|
++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
|
|
|
++#define PMD_MASK (~(PMD_SIZE - 1))
|
|
|
++
|
|
|
++/* Page Middle Directory entry */
|
|
|
++typedef struct {
|
|
|
++ unsigned long pmd;
|
|
|
++} pmd_t;
|
|
|
++
|
|
|
++#define pmd_val(x) ((x).pmd)
|
|
|
++#define __pmd(x) ((pmd_t) { (x) })
|
|
|
++
|
|
|
++#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
|
|
|
++
|
|
|
++static inline int pud_present(pud_t pud)
|
|
|
++{
|
|
|
++ return (pud_val(pud) & _PAGE_PRESENT);
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pud_none(pud_t pud)
|
|
|
++{
|
|
|
++ return (pud_val(pud) == 0);
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pud_bad(pud_t pud)
|
|
|
++{
|
|
|
++ return !pud_present(pud);
|
|
|
++}
|
|
|
++
|
|
|
++static inline void set_pud(pud_t *pudp, pud_t pud)
|
|
|
++{
|
|
|
++ *pudp = pud;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void pud_clear(pud_t *pudp)
|
|
|
++{
|
|
|
++ set_pud(pudp, __pud(0));
|
|
|
++}
|
|
|
++
|
|
|
++static inline unsigned long pud_page_vaddr(pud_t pud)
|
|
|
++{
|
|
|
++ return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
|
|
|
++}
|
|
|
++
|
|
|
++#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
|
|
++
|
|
|
++static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
|
|
++{
|
|
|
++ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
|
|
|
++}
|
|
|
++
|
|
|
++#define pmd_ERROR(e) \
|
|
|
++ pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_PGTABLE_64_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/pgtable-bits.h linux-4.6.2.riscv/arch/riscv/include/asm/pgtable-bits.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/pgtable-bits.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/pgtable-bits.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,37 @@
|
|
|
++#ifndef _ASM_RISCV_PGTABLE_BITS_H
|
|
|
++#define _ASM_RISCV_PGTABLE_BITS_H
|
|
|
++
|
|
|
++/*
|
|
|
++ * RV32Sv32 page table entry:
|
|
|
++ * | 31 10 | 9 7 | 6 | 5 | 4 1 | 0
|
|
|
++ * PFN reserved for SW D R TYPE V
|
|
|
++ *
|
|
|
++ * RV64Sv39 / RV64Sv48 page table entry:
|
|
|
++ * | 63 48 | 47 10 | 9 7 | 6 | 5 | 4 1 | 0
|
|
|
++ * reserved for HW PFN reserved for SW D R TYPE V
|
|
|
++ */
|
|
|
++
|
|
|
++#define _PAGE_PRESENT (1 << 0)
|
|
|
++#define _PAGE_READ (1 << 1) /* Readable */
|
|
|
++#define _PAGE_WRITE (1 << 2) /* Writable */
|
|
|
++#define _PAGE_EXEC (1 << 3) /* Executable */
|
|
|
++#define _PAGE_USER (1 << 4) /* User */
|
|
|
++#define _PAGE_GLOBAL (1 << 5) /* Global */
|
|
|
++#define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */
|
|
|
++#define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
|
|
|
++#define _PAGE_SOFT (1 << 8) /* Reserved for software */
|
|
|
++
|
|
|
++#define _PAGE_SPECIAL _PAGE_SOFT
|
|
|
++#define _PAGE_TABLE _PAGE_PRESENT
|
|
|
++
|
|
|
++#define _PAGE_PFN_SHIFT 10
|
|
|
++
|
|
|
++/* Set of bits to preserve across pte_modify() */
|
|
|
++#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
|
|
|
++ _PAGE_WRITE | _PAGE_EXEC | \
|
|
|
++ _PAGE_USER | _PAGE_GLOBAL))
|
|
|
++
|
|
|
++/* Advertise support for _PAGE_SPECIAL */
|
|
|
++#define __HAVE_ARCH_PTE_SPECIAL
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_PGTABLE_BITS_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/pgtable.h linux-4.6.2.riscv/arch/riscv/include/asm/pgtable.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/pgtable.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/pgtable.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,329 @@
|
|
|
++#ifndef _ASM_RISCV_PGTABLE_H
|
|
|
++#define _ASM_RISCV_PGTABLE_H
|
|
|
++
|
|
|
++#include <linux/mmzone.h>
|
|
|
++
|
|
|
++#include <asm/pgtable-bits.h>
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++#ifdef CONFIG_MMU
|
|
|
++
|
|
|
++/* Page Upper Directory not used in RISC-V */
|
|
|
++#include <asm-generic/pgtable-nopud.h>
|
|
|
++#include <asm/page.h>
|
|
|
++#include <linux/mm_types.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_64BIT
|
|
|
++#include <asm/pgtable-64.h>
|
|
|
++#else
|
|
|
++#include <asm/pgtable-32.h>
|
|
|
++#endif /* CONFIG_64BIT */
|
|
|
++
|
|
|
++/* Number of entries in the page global directory */
|
|
|
++#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
|
|
|
++/* Number of entries in the page table */
|
|
|
++#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
|
|
|
++
|
|
|
++/* Number of PGD entries that a user-mode program can use */
|
|
|
++#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
|
|
|
++#define FIRST_USER_ADDRESS 0
|
|
|
++
|
|
|
++/* Page protection bits */
|
|
|
++#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
|
|
|
++
|
|
|
++#define PAGE_NONE __pgprot(0)
|
|
|
++#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
|
|
|
++#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
|
|
|
++#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
|
|
|
++#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
|
|
|
++#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
|
|
|
++ _PAGE_EXEC | _PAGE_WRITE)
|
|
|
++
|
|
|
++#define PAGE_COPY PAGE_READ
|
|
|
++#define PAGE_COPY_EXEC PAGE_EXEC
|
|
|
++#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
|
|
|
++#define PAGE_SHARED PAGE_WRITE
|
|
|
++#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
|
|
|
++
|
|
|
++#define PAGE_KERNEL __pgprot(_PAGE_READ | _PAGE_WRITE | \
|
|
|
++ _PAGE_PRESENT | _PAGE_ACCESSED)
|
|
|
++
|
|
|
++#define swapper_pg_dir NULL
|
|
|
++
|
|
|
++/* MAP_PRIVATE permissions: xwr (copy-on-write) */
|
|
|
++#define __P000 PAGE_NONE
|
|
|
++#define __P001 PAGE_READ
|
|
|
++#define __P010 PAGE_COPY
|
|
|
++#define __P011 PAGE_COPY
|
|
|
++#define __P100 PAGE_EXEC
|
|
|
++#define __P101 PAGE_READ_EXEC
|
|
|
++#define __P110 PAGE_COPY_EXEC
|
|
|
++#define __P111 PAGE_COPY_READ_EXEC
|
|
|
++
|
|
|
++/* MAP_SHARED permissions: xwr */
|
|
|
++#define __S000 PAGE_NONE
|
|
|
++#define __S001 PAGE_READ
|
|
|
++#define __S010 PAGE_SHARED
|
|
|
++#define __S011 PAGE_SHARED
|
|
|
++#define __S100 PAGE_EXEC
|
|
|
++#define __S101 PAGE_READ_EXEC
|
|
|
++#define __S110 PAGE_SHARED_EXEC
|
|
|
++#define __S111 PAGE_SHARED_EXEC
|
|
|
++
|
|
|
++/*
|
|
|
++ * ZERO_PAGE is a global shared page that is always zero,
|
|
|
++ * used for zero-mapped memory areas, etc.
|
|
|
++ */
|
|
|
++extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|
|
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
|
|
++
|
|
|
++static inline int pmd_present(pmd_t pmd)
|
|
|
++{
|
|
|
++ return (pmd_val(pmd) & _PAGE_PRESENT);
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pmd_none(pmd_t pmd)
|
|
|
++{
|
|
|
++ return (pmd_val(pmd) == 0);
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pmd_bad(pmd_t pmd)
|
|
|
++{
|
|
|
++ return !pmd_present(pmd);
|
|
|
++}
|
|
|
++
|
|
|
++static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|
|
++{
|
|
|
++ *pmdp = pmd;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void pmd_clear(pmd_t *pmdp)
|
|
|
++{
|
|
|
++ set_pmd(pmdp, __pmd(0));
|
|
|
++}
|
|
|
++
|
|
|
++
|
|
|
++#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
|
|
|
++
|
|
|
++/* Locate an entry in the page global directory */
|
|
|
++static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
|
|
|
++{
|
|
|
++ return mm->pgd + pgd_index(addr);
|
|
|
++}
|
|
|
++/* Locate an entry in the kernel page global directory */
|
|
|
++#define pgd_offset_k(addr) pgd_offset(&init_mm, (addr))
|
|
|
++
|
|
|
++static inline struct page *pmd_page(pmd_t pmd)
|
|
|
++{
|
|
|
++ return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
|
|
|
++}
|
|
|
++
|
|
|
++static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
|
|
++{
|
|
|
++ return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
|
|
|
++}
|
|
|
++
|
|
|
++/* Yields the page frame number (PFN) of a page table entry */
|
|
|
++static inline unsigned long pte_pfn(pte_t pte)
|
|
|
++{
|
|
|
++ return (pte_val(pte) >> _PAGE_PFN_SHIFT);
|
|
|
++}
|
|
|
++
|
|
|
++#define pte_page(x) pfn_to_page(pte_pfn(x))
|
|
|
++
|
|
|
++/* Constructs a page table entry */
|
|
|
++static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
|
|
++{
|
|
|
++ return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
|
|
|
++}
|
|
|
++
|
|
|
++static inline pte_t mk_pte(struct page *page, pgprot_t prot)
|
|
|
++{
|
|
|
++ return pfn_pte(page_to_pfn(page), prot);
|
|
|
++}
|
|
|
++
|
|
|
++#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
|
|
++
|
|
|
++static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
|
|
|
++{
|
|
|
++ return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr);
|
|
|
++}
|
|
|
++
|
|
|
++#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
|
|
|
++#define pte_unmap(pte) ((void)(pte))
|
|
|
++
|
|
|
++/*
|
|
|
++ * Certain architectures need to do special things when PTEs within
|
|
|
++ * a page table are directly modified. Thus, the following hook is
|
|
|
++ * made available.
|
|
|
++ */
|
|
|
++static inline void set_pte(pte_t *ptep, pte_t pteval)
|
|
|
++{
|
|
|
++ *ptep = pteval;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void set_pte_at(struct mm_struct *mm,
|
|
|
++ unsigned long addr, pte_t *ptep, pte_t pteval)
|
|
|
++{
|
|
|
++ set_pte(ptep, pteval);
|
|
|
++}
|
|
|
++
|
|
|
++static inline void pte_clear(struct mm_struct *mm,
|
|
|
++ unsigned long addr, pte_t *ptep)
|
|
|
++{
|
|
|
++ set_pte_at(mm, addr, ptep, __pte(0));
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pte_present(pte_t pte)
|
|
|
++{
|
|
|
++ return (pte_val(pte) & _PAGE_PRESENT);
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pte_none(pte_t pte)
|
|
|
++{
|
|
|
++ return (pte_val(pte) == 0);
|
|
|
++}
|
|
|
++
|
|
|
++/* static inline int pte_read(pte_t pte) */
|
|
|
++
|
|
|
++static inline int pte_write(pte_t pte)
|
|
|
++{
|
|
|
++ return pte_val(pte) & _PAGE_WRITE;
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pte_huge(pte_t pte)
|
|
|
++{
|
|
|
++ return pte_present(pte)
|
|
|
++ && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
|
|
|
++}
|
|
|
++
|
|
|
++/* static inline int pte_exec(pte_t pte) */
|
|
|
++
|
|
|
++static inline int pte_dirty(pte_t pte)
|
|
|
++{
|
|
|
++ return pte_val(pte) & _PAGE_DIRTY;
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pte_young(pte_t pte)
|
|
|
++{
|
|
|
++ return pte_val(pte) & _PAGE_ACCESSED;
|
|
|
++}
|
|
|
++
|
|
|
++static inline int pte_special(pte_t pte)
|
|
|
++{
|
|
|
++ return pte_val(pte) & _PAGE_SPECIAL;
|
|
|
++}
|
|
|
++
|
|
|
++/* static inline pte_t pte_rdprotect(pte_t pte) */
|
|
|
++
|
|
|
++static inline pte_t pte_wrprotect(pte_t pte)
|
|
|
++{
|
|
|
++ return __pte(pte_val(pte) & ~(_PAGE_WRITE));
|
|
|
++}
|
|
|
++
|
|
|
++/* static inline pte_t pte_mkread(pte_t pte) */
|
|
|
++
|
|
|
++static inline pte_t pte_mkwrite(pte_t pte)
|
|
|
++{
|
|
|
++ return __pte(pte_val(pte) | _PAGE_WRITE);
|
|
|
++}
|
|
|
++
|
|
|
++/* static inline pte_t pte_mkexec(pte_t pte) */
|
|
|
++
|
|
|
++static inline pte_t pte_mkdirty(pte_t pte)
|
|
|
++{
|
|
|
++ return __pte(pte_val(pte) | _PAGE_DIRTY);
|
|
|
++}
|
|
|
++
|
|
|
++static inline pte_t pte_mkclean(pte_t pte)
|
|
|
++{
|
|
|
++ return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
|
|
|
++}
|
|
|
++
|
|
|
++static inline pte_t pte_mkyoung(pte_t pte)
|
|
|
++{
|
|
|
++ return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
|
|
|
++}
|
|
|
++
|
|
|
++static inline pte_t pte_mkold(pte_t pte)
|
|
|
++{
|
|
|
++ return __pte(pte_val(pte) | _PAGE_ACCESSED);
|
|
|
++}
|
|
|
++
|
|
|
++static inline pte_t pte_mkspecial(pte_t pte)
|
|
|
++{
|
|
|
++ return __pte(pte_val(pte) | _PAGE_SPECIAL);
|
|
|
++}
|
|
|
++
|
|
|
++/* Modify page protection bits */
|
|
|
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|
|
++{
|
|
|
++ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
|
|
|
++}
|
|
|
++
|
|
|
++#define pgd_ERROR(e) \
|
|
|
++ pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
|
|
|
++
|
|
|
++
|
|
|
++/* Commit new configuration to MMU hardware */
|
|
|
++static inline void update_mmu_cache(struct vm_area_struct *vma,
|
|
|
++ unsigned long address, pte_t *ptep)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * Encode and decode a swap entry
|
|
|
++ *
|
|
|
++ * Format of swap PTE:
|
|
|
++ * bit 0: _PAGE_PRESENT (zero)
|
|
|
++ * bit 1: reserved for future use (zero)
|
|
|
++ * bits 2 to 6: swap type
|
|
|
++ * bits 7 to XLEN-1: swap offset
|
|
|
++ */
|
|
|
++#define __SWP_TYPE_SHIFT 2
|
|
|
++#define __SWP_TYPE_BITS 5
|
|
|
++#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
|
|
|
++#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
|
|
|
++
|
|
|
++#define MAX_SWAPFILES_CHECK() \
|
|
|
++ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
|
|
|
++
|
|
|
++#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
|
|
|
++#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
|
|
|
++#define __swp_entry(type, offset) ((swp_entry_t) \
|
|
|
++ { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
|
|
|
++
|
|
|
++#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
|
|
++#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
|
|
++
|
|
|
++#ifdef CONFIG_FLATMEM
|
|
|
++#define kern_addr_valid(addr) (1) /* FIXME */
|
|
|
++#endif
|
|
|
++
|
|
|
++extern void paging_init(void);
|
|
|
++
|
|
|
++static inline void pgtable_cache_init(void)
|
|
|
++{
|
|
|
++ /* No page table caches to initialize */
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* CONFIG_MMU */
|
|
|
++
|
|
|
++#define VMALLOC_SIZE _AC(0x8000000,UL)
|
|
|
++#define VMALLOC_END (PAGE_OFFSET - 1)
|
|
|
++#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
|
|
|
++
|
|
|
++/* Task size is 0x40000000000 for RV64 or 0xb800000 for RV32.
|
|
|
++ Note that PGDIR_SIZE must evenly divide TASK_SIZE. */
|
|
|
++#ifdef CONFIG_64BIT
|
|
|
++#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
|
|
|
++#else
|
|
|
++#define TASK_SIZE VMALLOC_START
|
|
|
++#endif
|
|
|
++
|
|
|
++#include <asm-generic/pgtable.h>
|
|
|
++
|
|
|
++#endif /* !__ASSEMBLY__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_PGTABLE_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/processor.h linux-4.6.2.riscv/arch/riscv/include/asm/processor.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/processor.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/processor.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,87 @@
|
|
|
++#ifndef _ASM_RISCV_PROCESSOR_H
|
|
|
++#define _ASM_RISCV_PROCESSOR_H
|
|
|
++
|
|
|
++#include <linux/const.h>
|
|
|
++
|
|
|
++#include <asm/ptrace.h>
|
|
|
++
|
|
|
++/*
|
|
|
++ * This decides where the kernel will search for a free chunk of vm
|
|
|
++ * space during mmap's.
|
|
|
++ */
|
|
|
++#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
|
|
|
++
|
|
|
++#ifdef __KERNEL__
|
|
|
++#define STACK_TOP TASK_SIZE
|
|
|
++#define STACK_TOP_MAX STACK_TOP
|
|
|
++#endif /* __KERNEL__ */
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++struct task_struct;
|
|
|
++struct pt_regs;
|
|
|
++
|
|
|
++/*
|
|
|
++ * Default implementation of macro that returns current
|
|
|
++ * instruction pointer ("program counter").
|
|
|
++ */
|
|
|
++#define current_text_addr() ({ __label__ _l; _l: &&_l;})
|
|
|
++
|
|
|
++/* CPU-specific state of a task */
|
|
|
++struct thread_struct {
|
|
|
++ /* Callee-saved registers */
|
|
|
++ unsigned long ra;
|
|
|
++ unsigned long sp; /* Kernel mode stack */
|
|
|
++ unsigned long s[12]; /* s[0]: frame pointer */
|
|
|
++ struct user_fpregs_struct fstate;
|
|
|
++};
|
|
|
++
|
|
|
++#define INIT_THREAD { \
|
|
|
++ .sp = sizeof(init_stack) + (long)&init_stack, \
|
|
|
++}
|
|
|
++
|
|
|
++/* Return saved (kernel) PC of a blocked thread. */
|
|
|
++#define thread_saved_pc(t) ((t)->thread.ra)
|
|
|
++#define thread_saved_sp(t) ((t)->thread.sp)
|
|
|
++#define thread_saved_fp(t) ((t)->thread.s[0])
|
|
|
++
|
|
|
++#define task_pt_regs(tsk) \
|
|
|
++ ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE) - 1)
|
|
|
++
|
|
|
++#define KSTK_EIP(tsk) (task_pt_regs(tsk)->sepc)
|
|
|
++#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
|
|
|
++
|
|
|
++
|
|
|
++/* Do necessary setup to start up a newly executed thread. */
|
|
|
++extern void start_thread(struct pt_regs *regs,
|
|
|
++ unsigned long pc, unsigned long sp);
|
|
|
++
|
|
|
++/* Free all resources held by a thread. */
|
|
|
++static inline void release_thread(struct task_struct *dead_task)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++/* Free current thread data structures, etc. */
|
|
|
++static inline void exit_thread(void)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++extern unsigned long get_wchan(struct task_struct *p);
|
|
|
++
|
|
|
++
|
|
|
++static inline void cpu_relax(void)
|
|
|
++{
|
|
|
++ int dummy;
|
|
|
++ /* In lieu of a halt instruction, induce a long-latency stall. */
|
|
|
++ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
|
|
|
++ barrier();
|
|
|
++}
|
|
|
++
|
|
|
++static inline void wait_for_interrupt(void)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ ("wfi");
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* __ASSEMBLY__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_PROCESSOR_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/ptrace.h linux-4.6.2.riscv/arch/riscv/include/asm/ptrace.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/ptrace.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/ptrace.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,103 @@
|
|
|
++#ifndef _ASM_RISCV_PTRACE_H
|
|
|
++#define _ASM_RISCV_PTRACE_H
|
|
|
++
|
|
|
++#include <uapi/asm/ptrace.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++struct pt_regs {
|
|
|
++ unsigned long sepc;
|
|
|
++ unsigned long ra;
|
|
|
++ unsigned long sp;
|
|
|
++ unsigned long gp;
|
|
|
++ unsigned long tp;
|
|
|
++ unsigned long t0;
|
|
|
++ unsigned long t1;
|
|
|
++ unsigned long t2;
|
|
|
++ unsigned long s0;
|
|
|
++ unsigned long s1;
|
|
|
++ unsigned long a0;
|
|
|
++ unsigned long a1;
|
|
|
++ unsigned long a2;
|
|
|
++ unsigned long a3;
|
|
|
++ unsigned long a4;
|
|
|
++ unsigned long a5;
|
|
|
++ unsigned long a6;
|
|
|
++ unsigned long a7;
|
|
|
++ unsigned long s2;
|
|
|
++ unsigned long s3;
|
|
|
++ unsigned long s4;
|
|
|
++ unsigned long s5;
|
|
|
++ unsigned long s6;
|
|
|
++ unsigned long s7;
|
|
|
++ unsigned long s8;
|
|
|
++ unsigned long s9;
|
|
|
++ unsigned long s10;
|
|
|
++ unsigned long s11;
|
|
|
++ unsigned long t3;
|
|
|
++ unsigned long t4;
|
|
|
++ unsigned long t5;
|
|
|
++ unsigned long t6;
|
|
|
++ /* Supervisor CSRs */
|
|
|
++ unsigned long sstatus;
|
|
|
++ unsigned long sbadaddr;
|
|
|
++ unsigned long scause;
|
|
|
++};
|
|
|
++
|
|
|
++#ifdef CONFIG_64BIT
|
|
|
++#define REG_FMT "%016lx"
|
|
|
++#else
|
|
|
++#define REG_FMT "%08lx"
|
|
|
++#endif
|
|
|
++
|
|
|
++#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0)
|
|
|
++
|
|
|
++
|
|
|
++/* Helpers for working with the instruction pointer */
|
|
|
++#define GET_IP(regs) ((regs)->sepc)
|
|
|
++#define SET_IP(regs, val) (GET_IP(regs) = (val))
|
|
|
++
|
|
|
++static inline unsigned long instruction_pointer(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ return GET_IP(regs);
|
|
|
++}
|
|
|
++static inline void instruction_pointer_set(struct pt_regs *regs,
|
|
|
++ unsigned long val)
|
|
|
++{
|
|
|
++ SET_IP(regs, val);
|
|
|
++}
|
|
|
++
|
|
|
++#define profile_pc(regs) instruction_pointer(regs)
|
|
|
++
|
|
|
++/* Helpers for working with the user stack pointer */
|
|
|
++#define GET_USP(regs) ((regs)->sp)
|
|
|
++#define SET_USP(regs, val) (GET_USP(regs) = (val))
|
|
|
++
|
|
|
++static inline unsigned long user_stack_pointer(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ return GET_USP(regs);
|
|
|
++}
|
|
|
++static inline void user_stack_pointer_set(struct pt_regs *regs,
|
|
|
++ unsigned long val)
|
|
|
++{
|
|
|
++ SET_USP(regs, val);
|
|
|
++}
|
|
|
++
|
|
|
++/* Helpers for working with the frame pointer */
|
|
|
++#define GET_FP(regs) ((regs)->s0)
|
|
|
++#define SET_FP(regs, val) (GET_FP(regs) = (val))
|
|
|
++
|
|
|
++static inline unsigned long frame_pointer(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ return GET_FP(regs);
|
|
|
++}
|
|
|
++static inline void frame_pointer_set(struct pt_regs *regs,
|
|
|
++ unsigned long val)
|
|
|
++{
|
|
|
++ SET_FP(regs, val);
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* __ASSEMBLY__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_PTRACE_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/sbi-con.h linux-4.6.2.riscv/arch/riscv/include/asm/sbi-con.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/sbi-con.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/sbi-con.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,8 @@
|
|
|
++#ifndef _ASM_RISCV_SBI_CON_H
|
|
|
++#define _ASM_RISCV_SBI_CON_H
|
|
|
++
|
|
|
++#include <linux/irqreturn.h>
|
|
|
++
|
|
|
++irqreturn_t sbi_console_isr(void);
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_SBI_CON_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/sbi.h linux-4.6.2.riscv/arch/riscv/include/asm/sbi.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/sbi.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/sbi.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,30 @@
|
|
|
++#ifndef _ASM_RISCV_SBI_H
|
|
|
++#define _ASM_RISCV_SBI_H
|
|
|
++
|
|
|
++typedef struct {
|
|
|
++ unsigned long base;
|
|
|
++ unsigned long size;
|
|
|
++ unsigned long node_id;
|
|
|
++} memory_block_info;
|
|
|
++
|
|
|
++unsigned long sbi_query_memory(unsigned long id, memory_block_info *p);
|
|
|
++
|
|
|
++unsigned long sbi_hart_id(void);
|
|
|
++unsigned long sbi_num_harts(void);
|
|
|
++unsigned long sbi_timebase(void);
|
|
|
++void sbi_set_timer(unsigned long long stime_value);
|
|
|
++void sbi_send_ipi(unsigned long hart_id);
|
|
|
++unsigned long sbi_clear_ipi(void);
|
|
|
++void sbi_shutdown(void);
|
|
|
++
|
|
|
++void sbi_console_putchar(unsigned char ch);
|
|
|
++int sbi_console_getchar(void);
|
|
|
++
|
|
|
++void sbi_remote_sfence_vm(unsigned long hart_mask_ptr, unsigned long asid);
|
|
|
++void sbi_remote_sfence_vm_range(unsigned long hart_mask_ptr, unsigned long asid, unsigned long start, unsigned long size);
|
|
|
++void sbi_remote_fence_i(unsigned long hart_mask_ptr);
|
|
|
++
|
|
|
++unsigned long sbi_mask_interrupt(unsigned long which);
|
|
|
++unsigned long sbi_unmask_interrupt(unsigned long which);
|
|
|
++
|
|
|
++#endif
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/serial.h linux-4.6.2.riscv/arch/riscv/include/asm/serial.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/serial.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/serial.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,29 @@
|
|
|
++#ifndef _ASM_RISCV_SERIAL_H
|
|
|
++#define _ASM_RISCV_SERIAL_H
|
|
|
++
|
|
|
++/*
|
|
|
++ * FIXME: interim serial support for riscv-qemu
|
|
|
++ *
|
|
|
++ * Currently requires that the emulator itself create a hole at addresses
|
|
|
++ * 0x3f8 - 0x3ff without looking through page tables.
|
|
|
++ *
|
|
|
++ * This assumes you have a 1.8432 MHz clock for your UART.
|
|
|
++ */
|
|
|
++#define BASE_BAUD ( 1843200 / 16 )
|
|
|
++
|
|
|
++/* Standard COM flags */
|
|
|
++#ifdef CONFIG_SERIAL_DETECT_IRQ
|
|
|
++#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
|
|
|
++#else
|
|
|
++#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
|
|
|
++#endif
|
|
|
++
|
|
|
++#define SERIAL_PORT_DFNS \
|
|
|
++ { /* ttyS0 */ \
|
|
|
++ .baud_base = BASE_BAUD, \
|
|
|
++ .port = 0x3F8, \
|
|
|
++ .irq = 4, \
|
|
|
++ .flags = STD_COM_FLAGS, \
|
|
|
++ },
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_SERIAL_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/setup.h linux-4.6.2.riscv/arch/riscv/include/asm/setup.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/setup.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/setup.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,6 @@
|
|
|
++#ifndef _ASM_RISCV_SETUP_H
|
|
|
++#define _ASM_RISCV_SETUP_H
|
|
|
++
|
|
|
++#include <asm-generic/setup.h>
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_SETUP_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/smp.h linux-4.6.2.riscv/arch/riscv/include/asm/smp.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/smp.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/smp.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,28 @@
|
|
|
++#ifndef _ASM_RISCV_SMP_H
|
|
|
++#define _ASM_RISCV_SMP_H
|
|
|
++
|
|
|
++#include <linux/cpumask.h>
|
|
|
++#include <linux/irqreturn.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_SMP
|
|
|
++
|
|
|
++/* SMP initialization hook for setup_arch */
|
|
|
++void __init init_clockevent(void);
|
|
|
++
|
|
|
++/* SMP initialization hook for setup_arch */
|
|
|
++void __init setup_smp(void);
|
|
|
++
|
|
|
++/* Hook for the generic smp_call_function_many() routine. */
|
|
|
++void arch_send_call_function_ipi_mask(struct cpumask *mask);
|
|
|
++
|
|
|
++/* Hook for the generic smp_call_function_single() routine. */
|
|
|
++void arch_send_call_function_single_ipi(int cpu);
|
|
|
++
|
|
|
++#define raw_smp_processor_id() (current_thread_info()->cpu)
|
|
|
++
|
|
|
++/* Interprocessor interrupt handler */
|
|
|
++irqreturn_t handle_ipi(void);
|
|
|
++
|
|
|
++#endif /* CONFIG_SMP */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_SMP_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/spinlock.h linux-4.6.2.riscv/arch/riscv/include/asm/spinlock.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/spinlock.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/spinlock.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,142 @@
|
|
|
++#ifndef _ASM_RISCV_SPINLOCK_H
|
|
|
++#define _ASM_RISCV_SPINLOCK_H
|
|
|
++
|
|
|
++#include <linux/kernel.h>
|
|
|
++#include <asm/current.h>
|
|
|
++
|
|
|
++/*
|
|
|
++ * Simple spin lock operations. These provide no fairness guarantees.
|
|
|
++ */
|
|
|
++
|
|
|
++#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
|
++#define arch_spin_is_locked(x) ((x)->lock != 0)
|
|
|
++#define arch_spin_unlock_wait(x) \
|
|
|
++ do { cpu_relax(); } while ((x)->lock)
|
|
|
++
|
|
|
++static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoswap.w.rl x0, x0, %0"
|
|
|
++ : "=A" (lock->lock)
|
|
|
++ :: "memory");
|
|
|
++}
|
|
|
++
|
|
|
++static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
|
++{
|
|
|
++ int tmp = 1, busy;
|
|
|
++
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoswap.w.aq %0, %2, %1"
|
|
|
++ : "=r" (busy), "+A" (lock->lock)
|
|
|
++ : "r" (tmp)
|
|
|
++ : "memory");
|
|
|
++
|
|
|
++ return !busy;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
|
++{
|
|
|
++ while (1) {
|
|
|
++ if (arch_spin_is_locked(lock))
|
|
|
++ continue;
|
|
|
++
|
|
|
++ if (arch_spin_trylock(lock))
|
|
|
++ break;
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++/***********************************************************/
|
|
|
++
|
|
|
++static inline int arch_read_can_lock(arch_rwlock_t *lock)
|
|
|
++{
|
|
|
++ return lock->lock >= 0;
|
|
|
++}
|
|
|
++
|
|
|
++static inline int arch_write_can_lock(arch_rwlock_t *lock)
|
|
|
++{
|
|
|
++ return lock->lock == 0;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void arch_read_lock(arch_rwlock_t *lock)
|
|
|
++{
|
|
|
++ int tmp;
|
|
|
++
|
|
|
++ __asm__ __volatile__(
|
|
|
++ "1: lr.w %1, %0\n"
|
|
|
++ " bltz %1, 1b\n"
|
|
|
++ " addi %1, %1, 1\n"
|
|
|
++ " sc.w.aq %1, %1, %0\n"
|
|
|
++ " bnez %1, 1b\n"
|
|
|
++ : "+A" (lock->lock), "=&r" (tmp)
|
|
|
++ :: "memory");
|
|
|
++}
|
|
|
++
|
|
|
++static inline void arch_write_lock(arch_rwlock_t *lock)
|
|
|
++{
|
|
|
++ int tmp;
|
|
|
++
|
|
|
++ __asm__ __volatile__(
|
|
|
++ "1: lr.w %1, %0\n"
|
|
|
++ " bnez %1, 1b\n"
|
|
|
++ " li %1, -1\n"
|
|
|
++ " sc.w.aq %1, %1, %0\n"
|
|
|
++ " bnez %1, 1b\n"
|
|
|
++ : "+A" (lock->lock), "=&r" (tmp)
|
|
|
++ :: "memory");
|
|
|
++}
|
|
|
++
|
|
|
++static inline int arch_read_trylock(arch_rwlock_t * lock)
|
|
|
++{
|
|
|
++ int busy;
|
|
|
++
|
|
|
++ __asm__ __volatile__(
|
|
|
++ "1: lr.w %1, %0\n"
|
|
|
++ " bltz %1, 1f\n"
|
|
|
++ " addi %1, %1, 1\n"
|
|
|
++ " sc.w.aq %1, %1, %0\n"
|
|
|
++ " bnez %1, 1b\n"
|
|
|
++ "1:\n"
|
|
|
++ : "+A" (lock->lock), "=&r" (busy)
|
|
|
++ :: "memory");
|
|
|
++
|
|
|
++ return !busy;
|
|
|
++}
|
|
|
++
|
|
|
++static inline int arch_write_trylock(arch_rwlock_t * lock)
|
|
|
++{
|
|
|
++ int busy;
|
|
|
++
|
|
|
++ __asm__ __volatile__(
|
|
|
++ "1: lr.w %1, %0\n"
|
|
|
++ " bnez %1, 1f\n"
|
|
|
++ " li %1, -1\n"
|
|
|
++ " sc.w.aq %1, %1, %0\n"
|
|
|
++ " bnez %1, 1b\n"
|
|
|
++ "1:\n"
|
|
|
++ : "+A" (lock->lock), "=&r" (busy)
|
|
|
++ :: "memory");
|
|
|
++
|
|
|
++ return !busy;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void arch_read_unlock(arch_rwlock_t * lock)
|
|
|
++{
|
|
|
++ __asm__ __volatile__(
|
|
|
++ "amoadd.w.rl x0, %1, %0"
|
|
|
++ : "+A" (lock->lock)
|
|
|
++ : "r" (-1)
|
|
|
++ : "memory");
|
|
|
++}
|
|
|
++
|
|
|
++static inline void arch_write_unlock(arch_rwlock_t * lock)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "amoswap.w.rl x0, x0, %0"
|
|
|
++ : "=A" (lock->lock)
|
|
|
++ :: "memory");
|
|
|
++}
|
|
|
++
|
|
|
++#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
|
++#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_SPINLOCK_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/spinlock_types.h linux-4.6.2.riscv/arch/riscv/include/asm/spinlock_types.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/spinlock_types.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/spinlock_types.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,20 @@
|
|
|
++#ifndef _ASM_RISCV_SPINLOCK_TYPES_H
|
|
|
++#define _ASM_RISCV_SPINLOCK_TYPES_H
|
|
|
++
|
|
|
++#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
|
++# error "please don't include this file directly"
|
|
|
++#endif
|
|
|
++
|
|
|
++typedef struct {
|
|
|
++ volatile unsigned int lock;
|
|
|
++} arch_spinlock_t;
|
|
|
++
|
|
|
++#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
|
|
++
|
|
|
++typedef struct {
|
|
|
++ volatile unsigned int lock;
|
|
|
++} arch_rwlock_t;
|
|
|
++
|
|
|
++#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
|
|
++
|
|
|
++#endif
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/string.h linux-4.6.2.riscv/arch/riscv/include/asm/string.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/string.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/string.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,17 @@
|
|
|
++#ifndef _ASM_RISCV_STRING_H
|
|
|
++#define _ASM_RISCV_STRING_H
|
|
|
++
|
|
|
++#ifdef __KERNEL__
|
|
|
++
|
|
|
++#include <linux/types.h>
|
|
|
++#include <linux/linkage.h>
|
|
|
++
|
|
|
++#define __HAVE_ARCH_MEMSET
|
|
|
++extern asmlinkage void *memset(void *, int, size_t);
|
|
|
++
|
|
|
++#define __HAVE_ARCH_MEMCPY
|
|
|
++extern asmlinkage void *memcpy(void *, const void *, size_t);
|
|
|
++
|
|
|
++#endif /* __KERNEL__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_STRING_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/switch_to.h linux-4.6.2.riscv/arch/riscv/include/asm/switch_to.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/switch_to.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/switch_to.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,57 @@
|
|
|
++#ifndef _ASM_RISCV_SWITCH_TO_H
|
|
|
++#define _ASM_RISCV_SWITCH_TO_H
|
|
|
++
|
|
|
++#include <asm/processor.h>
|
|
|
++#include <asm/ptrace.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++extern void __fstate_save(struct task_struct *);
|
|
|
++extern void __fstate_restore(struct task_struct *);
|
|
|
++
|
|
|
++static inline void __fstate_clean(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void fstate_save(struct task_struct *task,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) {
|
|
|
++ __fstate_save(task);
|
|
|
++ __fstate_clean(regs);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++static inline void fstate_restore(struct task_struct *task,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ if ((regs->sstatus & SR_FS) != SR_FS_OFF) {
|
|
|
++ __fstate_restore(task);
|
|
|
++ __fstate_clean(regs);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++static inline void __switch_to_aux(struct task_struct *prev,
|
|
|
++ struct task_struct *next)
|
|
|
++{
|
|
|
++ struct pt_regs *regs;
|
|
|
++
|
|
|
++ regs = task_pt_regs(prev);
|
|
|
++ if (unlikely(regs->sstatus & SR_SD)) {
|
|
|
++ fstate_save(prev, regs);
|
|
|
++ }
|
|
|
++ fstate_restore(next, task_pt_regs(next));
|
|
|
++}
|
|
|
++
|
|
|
++extern struct task_struct *__switch_to(struct task_struct *,
|
|
|
++ struct task_struct *);
|
|
|
++
|
|
|
++#define switch_to(prev, next, last) \
|
|
|
++do { \
|
|
|
++ struct task_struct *__prev = (prev); \
|
|
|
++ struct task_struct *__next = (next); \
|
|
|
++ __switch_to_aux(__prev, __next); \
|
|
|
++ ((last) = __switch_to(__prev, __next)); \
|
|
|
++} while (0)
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_SWITCH_TO_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/syscall.h linux-4.6.2.riscv/arch/riscv/include/asm/syscall.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/syscall.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/syscall.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,90 @@
|
|
|
++/*
|
|
|
++ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
|
|
|
++ * Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
|
++ * Copyright 2015 Regents of the University of California, Berkeley
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or
|
|
|
++ * modify it under the terms of the GNU General Public License
|
|
|
++ * as published by the Free Software Foundation, version 2.
|
|
|
++ *
|
|
|
++ * This program is distributed in the hope that it will be useful, but
|
|
|
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
++ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
++ * NON INFRINGEMENT. See the GNU General Public License for
|
|
|
++ * more details.
|
|
|
++ *
|
|
|
++ * See asm-generic/syscall.h for descriptions of what we must do here.
|
|
|
++ */
|
|
|
++
|
|
|
++#ifndef _ASM_RISCV_SYSCALL_H
|
|
|
++#define _ASM_RISCV_SYSCALL_H
|
|
|
++
|
|
|
++#include <linux/sched.h>
|
|
|
++#include <linux/err.h>
|
|
|
++
|
|
|
++/* The array of function pointers for syscalls. */
|
|
|
++extern void *sys_call_table[];
|
|
|
++
|
|
|
++/*
|
|
|
++ * Only the low 32 bits of orig_r0 are meaningful, so we return int.
|
|
|
++ * This importantly ignores the high bits on 64-bit, so comparisons
|
|
|
++ * sign-extend the low 32 bits.
|
|
|
++ */
|
|
|
++static inline int syscall_get_nr(struct task_struct *task,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ return regs->a7;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void syscall_set_nr(struct task_struct *task,
|
|
|
++ struct pt_regs *regs,
|
|
|
++ int sysno)
|
|
|
++{
|
|
|
++ regs->a7 = sysno;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void syscall_rollback(struct task_struct *task,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ /* FIXME: We can't do this... */
|
|
|
++}
|
|
|
++
|
|
|
++static inline long syscall_get_error(struct task_struct *task,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ unsigned long error = regs->a0;
|
|
|
++ return IS_ERR_VALUE(error) ? error : 0;
|
|
|
++}
|
|
|
++
|
|
|
++static inline long syscall_get_return_value(struct task_struct *task,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ return regs->a0;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void syscall_set_return_value(struct task_struct *task,
|
|
|
++ struct pt_regs *regs,
|
|
|
++ int error, long val)
|
|
|
++{
|
|
|
++ regs->a0 = (long) error ?: val;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void syscall_get_arguments(struct task_struct *task,
|
|
|
++ struct pt_regs *regs,
|
|
|
++ unsigned int i, unsigned int n,
|
|
|
++ unsigned long *args)
|
|
|
++{
|
|
|
++ BUG_ON(i + n > 6);
|
|
|
++ memcpy(args, ®s->a0 + i * sizeof(regs->a0), n * sizeof(args[0]));
|
|
|
++}
|
|
|
++
|
|
|
++static inline void syscall_set_arguments(struct task_struct *task,
|
|
|
++ struct pt_regs *regs,
|
|
|
++ unsigned int i, unsigned int n,
|
|
|
++ const unsigned long *args)
|
|
|
++{
|
|
|
++ BUG_ON(i + n > 6);
|
|
|
++ memcpy(®s->a0 + i * sizeof(regs->a0), args, n * sizeof(regs->a0));
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* _ASM_TILE_SYSCALL_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/syscalls.h linux-4.6.2.riscv/arch/riscv/include/asm/syscalls.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/syscalls.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/syscalls.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,12 @@
|
|
|
++#ifndef _ASM_RISCV_SYSCALLS_H
|
|
|
++#define _ASM_RISCV_SYSCALLS_H
|
|
|
++
|
|
|
++#include <linux/linkage.h>
|
|
|
++
|
|
|
++#include <asm-generic/syscalls.h>
|
|
|
++
|
|
|
++/* kernel/sys_riscv.c */
|
|
|
++asmlinkage long sys_sysriscv(unsigned long, unsigned long,
|
|
|
++ unsigned long, unsigned long);
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_SYSCALLS_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/thread_info.h linux-4.6.2.riscv/arch/riscv/include/asm/thread_info.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/thread_info.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/thread_info.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,89 @@
|
|
|
++#ifndef _ASM_RISCV_THREAD_INFO_H
|
|
|
++#define _ASM_RISCV_THREAD_INFO_H
|
|
|
++
|
|
|
++#ifdef __KERNEL__
|
|
|
++
|
|
|
++#include <asm/page.h>
|
|
|
++#include <linux/const.h>
|
|
|
++
|
|
|
++/* thread information allocation */
|
|
|
++#define THREAD_SIZE_ORDER (1)
|
|
|
++#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++#include <asm/processor.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++typedef unsigned long mm_segment_t;
|
|
|
++
|
|
|
++/*
|
|
|
++ * low level task data that entry.S needs immediate access to
|
|
|
++ * - this struct should fit entirely inside of one cache line
|
|
|
++ * - this struct resides at the bottom of the supervisor stack
|
|
|
++ * - if the members of this struct changes, the assembly constants
|
|
|
++ * in asm-offsets.c must be updated accordingly
|
|
|
++ */
|
|
|
++struct thread_info {
|
|
|
++ struct task_struct *task; /* main task structure */
|
|
|
++ unsigned long flags; /* low level flags */
|
|
|
++ __u32 cpu; /* current CPU */
|
|
|
++ int preempt_count; /* 0 => preemptable, <0 => BUG */
|
|
|
++ mm_segment_t addr_limit;
|
|
|
++};
|
|
|
++
|
|
|
++/*
|
|
|
++ * macros/functions for gaining access to the thread information structure
|
|
|
++ *
|
|
|
++ * preempt_count needs to be 1 initially, until the scheduler is functional.
|
|
|
++ */
|
|
|
++#define INIT_THREAD_INFO(tsk) \
|
|
|
++{ \
|
|
|
++ .task = &tsk, \
|
|
|
++ .flags = 0, \
|
|
|
++ .cpu = 0, \
|
|
|
++ .preempt_count = INIT_PREEMPT_COUNT, \
|
|
|
++ .addr_limit = KERNEL_DS, \
|
|
|
++}
|
|
|
++
|
|
|
++#define init_thread_info (init_thread_union.thread_info)
|
|
|
++#define init_stack (init_thread_union.stack)
|
|
|
++
|
|
|
++/*
|
|
|
++ * Pointer to the thread_info struct of the current process
|
|
|
++ * Assumes that the kernel mode stack (thread_union) is THREAD_SIZE-aligned
|
|
|
++ */
|
|
|
++static inline struct thread_info *current_thread_info(void)
|
|
|
++{
|
|
|
++ register unsigned long sp __asm__ ("sp");
|
|
|
++ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* !__ASSEMBLY__ */
|
|
|
++
|
|
|
++/*
|
|
|
++ * thread information flags
|
|
|
++ * - these are process state flags that various assembly files may need to
|
|
|
++ * access
|
|
|
++ * - pending work-to-be-done flags are in lowest half-word
|
|
|
++ * - other flags in upper half-word(s)
|
|
|
++ */
|
|
|
++#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
|
|
|
++#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
|
|
|
++#define TIF_SIGPENDING 2 /* signal pending */
|
|
|
++#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
|
|
++#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
|
|
|
++#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
|
|
++#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
|
|
|
++
|
|
|
++#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
|
|
++#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
|
|
++#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
|
|
++#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
|
|
++
|
|
|
++#define _TIF_WORK_MASK \
|
|
|
++ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
|
|
|
++
|
|
|
++#endif /* __KERNEL__ */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_THREAD_INFO_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/timex.h linux-4.6.2.riscv/arch/riscv/include/asm/timex.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/timex.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/timex.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,39 @@
|
|
|
++#ifndef _ASM_RISCV_TIMEX_H
|
|
|
++#define _ASM_RISCV_TIMEX_H
|
|
|
++
|
|
|
++#include <asm/param.h>
|
|
|
++
|
|
|
++#define CLOCK_TICK_RATE (HZ * 100UL)
|
|
|
++
|
|
|
++typedef unsigned long cycles_t;
|
|
|
++
|
|
|
++static inline cycles_t get_cycles(void)
|
|
|
++{
|
|
|
++#if __riscv_xlen >= 64
|
|
|
++ cycles_t n;
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "rdtime %0"
|
|
|
++ : "=r" (n));
|
|
|
++ return n;
|
|
|
++#else
|
|
|
++ u32 lo, hi, tmp;
|
|
|
++ __asm__ __volatile__ (
|
|
|
++ "1:\n"
|
|
|
++ "rdtimeh %0\n"
|
|
|
++ "rdtime %1\n"
|
|
|
++ "rdtimeh %2\n"
|
|
|
++ "bne %0, %2, 1b"
|
|
|
++ : "=&r" (hi), "=&r" (lo), "=&r" (tmp));
|
|
|
++ return ((u64)hi << 32) | lo;
|
|
|
++#endif
|
|
|
++}
|
|
|
++
|
|
|
++#define ARCH_HAS_READ_CURRENT_TIMER
|
|
|
++
|
|
|
++static inline int read_current_timer(unsigned long *timer_val)
|
|
|
++{
|
|
|
++ *timer_val = get_cycles();
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_TIMEX_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/tlbflush.h linux-4.6.2.riscv/arch/riscv/include/asm/tlbflush.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/tlbflush.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/tlbflush.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,84 @@
|
|
|
++#ifndef _ASM_RISCV_TLBFLUSH_H
|
|
|
++#define _ASM_RISCV_TLBFLUSH_H
|
|
|
++
|
|
|
++#ifdef CONFIG_MMU
|
|
|
++
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/bug.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++/* Flush entire local TLB */
|
|
|
++static inline void local_flush_tlb_all(void)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ ("sfence.vm");
|
|
|
++}
|
|
|
++
|
|
|
++/* Flush one page from local TLB */
|
|
|
++static inline void local_flush_tlb_page(unsigned long addr)
|
|
|
++{
|
|
|
++ __asm__ __volatile__ ("sfence.vm %0" : : "r" (addr));
|
|
|
++}
|
|
|
++
|
|
|
++#ifndef CONFIG_SMP
|
|
|
++
|
|
|
++#define flush_tlb_all() local_flush_tlb_all()
|
|
|
++#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
|
|
|
++#define flush_tlb_range(vma, start, end) local_flush_tlb_all()
|
|
|
++
|
|
|
++#else /* CONFIG_SMP */
|
|
|
++
|
|
|
++#include <asm/sbi.h>
|
|
|
++
|
|
|
++#define flush_tlb_all() sbi_remote_sfence_vm(0, 0)
|
|
|
++#define flush_tlb_page(vma, addr) flush_tlb_range(vma, (addr), (addr) + 1)
|
|
|
++#define flush_tlb_range(vma, start, end) \
|
|
|
++ sbi_remote_sfence_vm_range(0, 0, (start), (end) - (start))
|
|
|
++
|
|
|
++#endif /* CONFIG_SMP */
|
|
|
++
|
|
|
++/* Flush the TLB entries of the specified mm context */
|
|
|
++static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
|
++{
|
|
|
++ flush_tlb_all();
|
|
|
++}
|
|
|
++
|
|
|
++/* Flush a range of kernel pages */
|
|
|
++static inline void flush_tlb_kernel_range(unsigned long start,
|
|
|
++ unsigned long end)
|
|
|
++{
|
|
|
++ flush_tlb_all();
|
|
|
++}
|
|
|
++
|
|
|
++#else /* !CONFIG_MMU */
|
|
|
++
|
|
|
++static inline void flush_tlb_all(void)
|
|
|
++{
|
|
|
++ BUG();
|
|
|
++}
|
|
|
++
|
|
|
++static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
|
++{
|
|
|
++ BUG();
|
|
|
++}
|
|
|
++
|
|
|
++static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
|
++ unsigned long addr)
|
|
|
++{
|
|
|
++ BUG();
|
|
|
++}
|
|
|
++
|
|
|
++static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
|
++ unsigned long start, unsigned long end)
|
|
|
++{
|
|
|
++ BUG();
|
|
|
++}
|
|
|
++
|
|
|
++static inline void flush_tlb_kernel_range(unsigned long start,
|
|
|
++ unsigned long end)
|
|
|
++{
|
|
|
++ BUG();
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* CONFIG_MMU */
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_TLBFLUSH_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/tlb.h linux-4.6.2.riscv/arch/riscv/include/asm/tlb.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/tlb.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/tlb.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,11 @@
|
|
|
++#ifndef _ASM_RISCV_TLB_H
|
|
|
++#define _ASM_RISCV_TLB_H
|
|
|
++
|
|
|
++#include <asm-generic/tlb.h>
|
|
|
++
|
|
|
++static inline void tlb_flush(struct mmu_gather *tlb)
|
|
|
++{
|
|
|
++ flush_tlb_mm(tlb->mm);
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_TLB_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/uaccess.h linux-4.6.2.riscv/arch/riscv/include/asm/uaccess.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/uaccess.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/uaccess.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,460 @@
|
|
|
++#ifndef _ASM_RISCV_UACCESS_H
|
|
|
++#define _ASM_RISCV_UACCESS_H
|
|
|
++
|
|
|
++/*
|
|
|
++ * User space memory access functions
|
|
|
++ */
|
|
|
++#include <linux/errno.h>
|
|
|
++#include <linux/compiler.h>
|
|
|
++#include <linux/thread_info.h>
|
|
|
++#include <asm/byteorder.h>
|
|
|
++#include <asm/asm.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_RV_PUM
|
|
|
++#define __enable_user_access() \
|
|
|
++ __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_PUM))
|
|
|
++#define __disable_user_access() \
|
|
|
++ __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_PUM))
|
|
|
++#else
|
|
|
++#define __enable_user_access()
|
|
|
++#define __disable_user_access()
|
|
|
++#endif
|
|
|
++
|
|
|
++/*
|
|
|
++ * The fs value determines whether argument validity checking should be
|
|
|
++ * performed or not. If get_fs() == USER_DS, checking is performed, with
|
|
|
++ * get_fs() == KERNEL_DS, checking is bypassed.
|
|
|
++ *
|
|
|
++ * For historical reasons, these macros are grossly misnamed.
|
|
|
++ */
|
|
|
++
|
|
|
++#define KERNEL_DS (~0UL)
|
|
|
++#define USER_DS (TASK_SIZE)
|
|
|
++
|
|
|
++#define get_ds() (KERNEL_DS)
|
|
|
++#define get_fs() (current_thread_info()->addr_limit)
|
|
|
++
|
|
|
++static inline void set_fs(mm_segment_t fs)
|
|
|
++{
|
|
|
++ current_thread_info()->addr_limit = fs;
|
|
|
++}
|
|
|
++
|
|
|
++#define segment_eq(a, b) ((a) == (b))
|
|
|
++
|
|
|
++#define user_addr_max() (get_fs())
|
|
|
++
|
|
|
++
|
|
|
++#define VERIFY_READ 0
|
|
|
++#define VERIFY_WRITE 1
|
|
|
++
|
|
|
++/**
|
|
|
++ * access_ok: - Checks if a user space pointer is valid
|
|
|
++ * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
|
|
|
++ * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
|
|
|
++ * to write to a block, it is always safe to read from it.
|
|
|
++ * @addr: User space pointer to start of block to check
|
|
|
++ * @size: Size of block to check
|
|
|
++ *
|
|
|
++ * Context: User context only. This function may sleep.
|
|
|
++ *
|
|
|
++ * Checks if a pointer to a block of memory in user space is valid.
|
|
|
++ *
|
|
|
++ * Returns true (nonzero) if the memory block may be valid, false (zero)
|
|
|
++ * if it is definitely invalid.
|
|
|
++ *
|
|
|
++ * Note that, depending on architecture, this function probably just
|
|
|
++ * checks that the pointer is in the user space range - after calling
|
|
|
++ * this function, memory access functions may still return -EFAULT.
|
|
|
++ */
|
|
|
++#define access_ok(type, addr, size) ({ \
|
|
|
++ __chk_user_ptr(addr); \
|
|
|
++ likely(__access_ok((unsigned long __force)(addr), (size))); \
|
|
|
++})
|
|
|
++
|
|
|
++/* Ensure that the range [addr, addr+size) is within the process's
|
|
|
++ * address space
|
|
|
++ */
|
|
|
++static inline int __access_ok(unsigned long addr, unsigned long size)
|
|
|
++{
|
|
|
++ const mm_segment_t fs = get_fs();
|
|
|
++ return (size <= fs) && (addr <= (fs - size));
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * The exception table consists of pairs of addresses: the first is the
|
|
|
++ * address of an instruction that is allowed to fault, and the second is
|
|
|
++ * the address at which the program should continue. No registers are
|
|
|
++ * modified, so it is entirely up to the continuation code to figure out
|
|
|
++ * what to do.
|
|
|
++ *
|
|
|
++ * All the routines below use bits of fixup code that are out of line
|
|
|
++ * with the main instruction path. This means when everything is well,
|
|
|
++ * we don't even have to jump over them. Further, they do not intrude
|
|
|
++ * on our cache or tlb entries.
|
|
|
++ */
|
|
|
++
|
|
|
++struct exception_table_entry {
|
|
|
++ unsigned long insn, fixup;
|
|
|
++};
|
|
|
++
|
|
|
++extern int fixup_exception(struct pt_regs *);
|
|
|
++
|
|
|
++#if defined(__LITTLE_ENDIAN)
|
|
|
++#define __MSW 1
|
|
|
++#define __LSW 0
|
|
|
++#elif defined(__BIG_ENDIAN)
|
|
|
++#define __MSW 0
|
|
|
++#define __LSW 1
|
|
|
++#else
|
|
|
++#error "Unknown endianness"
|
|
|
++#endif
|
|
|
++
|
|
|
++/*
|
|
|
++ * The "__xxx" versions of the user access functions do not verify the address
|
|
|
++ * space - it must have been done previously with a separate "access_ok()"
|
|
|
++ * call.
|
|
|
++ */
|
|
|
++
|
|
|
++#ifdef CONFIG_MMU
|
|
|
++#define __get_user_asm(insn, x, ptr, err) \
|
|
|
++do { \
|
|
|
++ uintptr_t __tmp; \
|
|
|
++ __enable_user_access(); \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "1:\n" \
|
|
|
++ " " insn " %1, %3\n" \
|
|
|
++ "2:\n" \
|
|
|
++ " .section .fixup,\"ax\"\n" \
|
|
|
++ " .balign 4\n" \
|
|
|
++ "3:\n" \
|
|
|
++ " li %0, %4\n" \
|
|
|
++ " li %1, 0\n" \
|
|
|
++ " jump 2b, %2\n" \
|
|
|
++ " .previous\n" \
|
|
|
++ " .section __ex_table,\"a\"\n" \
|
|
|
++ " .balign " SZPTR "\n" \
|
|
|
++ " " PTR " 1b, 3b\n" \
|
|
|
++ " .previous" \
|
|
|
++ : "+r" (err), "=&r" (x), "=r" (__tmp) \
|
|
|
++ : "m" (*(ptr)), "i" (-EFAULT)); \
|
|
|
++ __disable_user_access(); \
|
|
|
++} while (0)
|
|
|
++#else /* !CONFIG_MMU */
|
|
|
++#define __get_user_asm(insn, x, ptr, err) \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ insn " %0, %1" \
|
|
|
++ : "=r" (x) \
|
|
|
++ : "m" (*(ptr)))
|
|
|
++#endif /* CONFIG_MMU */
|
|
|
++
|
|
|
++
|
|
|
++#ifdef CONFIG_64BIT
|
|
|
++#define __get_user_8(x, ptr, err) \
|
|
|
++ __get_user_asm("ld", x, ptr, err)
|
|
|
++#else /* !CONFIG_64BIT */
|
|
|
++#ifdef CONFIG_MMU
|
|
|
++#define __get_user_8(x, ptr, err) \
|
|
|
++do { \
|
|
|
++ u32 __user *__ptr = (u32 __user *)(ptr); \
|
|
|
++ u32 __lo, __hi; \
|
|
|
++ uintptr_t __tmp; \
|
|
|
++ __enable_user_access(); \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "1:\n" \
|
|
|
++ " lw %1, %4\n" \
|
|
|
++ "2:\n" \
|
|
|
++ " lw %2, %5\n" \
|
|
|
++ "3:\n" \
|
|
|
++ " .section .fixup,\"ax\"\n" \
|
|
|
++ " .balign 4\n" \
|
|
|
++ "4:\n" \
|
|
|
++ " li %0, %6\n" \
|
|
|
++ " li %1, 0\n" \
|
|
|
++ " li %2, 0\n" \
|
|
|
++ " jump 3b, %3\n" \
|
|
|
++ " .previous\n" \
|
|
|
++ " .section __ex_table,\"a\"\n" \
|
|
|
++ " .balign " SZPTR "\n" \
|
|
|
++ " " PTR " 1b, 4b\n" \
|
|
|
++ " " PTR " 2b, 4b\n" \
|
|
|
++ " .previous" \
|
|
|
++ : "+r" (err), "=&r" (__lo), "=r" (__hi), \
|
|
|
++ "=r" (__tmp) \
|
|
|
++ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
|
|
|
++ "i" (-EFAULT)); \
|
|
|
++ __disable_user_access(); \
|
|
|
++ (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
|
|
|
++ (((u64)__hi << 32) | __lo))); \
|
|
|
++} while (0)
|
|
|
++#else /* !CONFIG_MMU */
|
|
|
++#define __get_user_8(x, ptr, err) \
|
|
|
++ (x) = (__typeof__(x))(*((u64 __user *)(ptr)))
|
|
|
++#endif /* CONFIG_MMU */
|
|
|
++#endif /* CONFIG_64BIT */
|
|
|
++
|
|
|
++
|
|
|
++/**
|
|
|
++ * __get_user: - Get a simple variable from user space, with less checking.
|
|
|
++ * @x: Variable to store result.
|
|
|
++ * @ptr: Source address, in user space.
|
|
|
++ *
|
|
|
++ * Context: User context only. This function may sleep.
|
|
|
++ *
|
|
|
++ * This macro copies a single simple variable from user space to kernel
|
|
|
++ * space. It supports simple types like char and int, but not larger
|
|
|
++ * data types like structures or arrays.
|
|
|
++ *
|
|
|
++ * @ptr must have pointer-to-simple-variable type, and the result of
|
|
|
++ * dereferencing @ptr must be assignable to @x without a cast.
|
|
|
++ *
|
|
|
++ * Caller must check the pointer with access_ok() before calling this
|
|
|
++ * function.
|
|
|
++ *
|
|
|
++ * Returns zero on success, or -EFAULT on error.
|
|
|
++ * On error, the variable @x is set to zero.
|
|
|
++ */
|
|
|
++#define __get_user(x, ptr) \
|
|
|
++({ \
|
|
|
++ register int __gu_err = 0; \
|
|
|
++ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
|
|
|
++ __chk_user_ptr(__gu_ptr); \
|
|
|
++ switch (sizeof(*__gu_ptr)) { \
|
|
|
++ case 1: \
|
|
|
++ __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
|
|
|
++ break; \
|
|
|
++ case 2: \
|
|
|
++ __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
|
|
|
++ break; \
|
|
|
++ case 4: \
|
|
|
++ __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
|
|
|
++ break; \
|
|
|
++ case 8: \
|
|
|
++ __get_user_8((x), __gu_ptr, __gu_err); \
|
|
|
++ break; \
|
|
|
++ default: \
|
|
|
++ BUILD_BUG(); \
|
|
|
++ } \
|
|
|
++ __gu_err; \
|
|
|
++})
|
|
|
++
|
|
|
++/**
|
|
|
++ * get_user: - Get a simple variable from user space.
|
|
|
++ * @x: Variable to store result.
|
|
|
++ * @ptr: Source address, in user space.
|
|
|
++ *
|
|
|
++ * Context: User context only. This function may sleep.
|
|
|
++ *
|
|
|
++ * This macro copies a single simple variable from user space to kernel
|
|
|
++ * space. It supports simple types like char and int, but not larger
|
|
|
++ * data types like structures or arrays.
|
|
|
++ *
|
|
|
++ * @ptr must have pointer-to-simple-variable type, and the result of
|
|
|
++ * dereferencing @ptr must be assignable to @x without a cast.
|
|
|
++ *
|
|
|
++ * Returns zero on success, or -EFAULT on error.
|
|
|
++ * On error, the variable @x is set to zero.
|
|
|
++ */
|
|
|
++#define get_user(x, ptr) \
|
|
|
++({ \
|
|
|
++ const __typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
++ might_fault(); \
|
|
|
++ access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
|
|
|
++ __get_user((x), __p) : \
|
|
|
++ ((x) = 0, -EFAULT); \
|
|
|
++})
|
|
|
++
|
|
|
++
|
|
|
++#ifdef CONFIG_MMU
|
|
|
++#define __put_user_asm(insn, x, ptr, err) \
|
|
|
++do { \
|
|
|
++ uintptr_t __tmp; \
|
|
|
++ __typeof__(*(ptr)) __x = x; \
|
|
|
++ __enable_user_access(); \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "1:\n" \
|
|
|
++ " " insn " %z3, %2\n" \
|
|
|
++ "2:\n" \
|
|
|
++ " .section .fixup,\"ax\"\n" \
|
|
|
++ " .balign 4\n" \
|
|
|
++ "3:\n" \
|
|
|
++ " li %0, %4\n" \
|
|
|
++ " jump 2b, %1\n" \
|
|
|
++ " .previous\n" \
|
|
|
++ " .section __ex_table,\"a\"\n" \
|
|
|
++ " .balign " SZPTR "\n" \
|
|
|
++ " " PTR " 1b, 3b\n" \
|
|
|
++ " .previous" \
|
|
|
++ : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
|
|
|
++ : "rJ" (__x), "i" (-EFAULT)); \
|
|
|
++ __disable_user_access(); \
|
|
|
++} while (0)
|
|
|
++#else /* !CONFIG_MMU */
|
|
|
++#define __put_user_asm(insn, x, ptr, err) \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ insn " %z1, %0" \
|
|
|
++ : "=m" (*(ptr)) \
|
|
|
++ : "rJ" ((__typeof__(*(ptr))) x))
|
|
|
++#endif /* CONFIG_MMU */
|
|
|
++
|
|
|
++
|
|
|
++#ifdef CONFIG_64BIT
|
|
|
++#define __put_user_8(x, ptr, err) \
|
|
|
++ __put_user_asm("sd", x, ptr, err)
|
|
|
++#else /* !CONFIG_64BIT */
|
|
|
++#ifdef CONFIG_MMU
|
|
|
++#define __put_user_8(x, ptr, err) \
|
|
|
++do { \
|
|
|
++ u32 __user *__ptr = (u32 __user *)(ptr); \
|
|
|
++ u64 __x = (__typeof__((x)-(x)))(x); \
|
|
|
++ uintptr_t __tmp; \
|
|
|
++ __enable_user_access(); \
|
|
|
++ __asm__ __volatile__ ( \
|
|
|
++ "1:\n" \
|
|
|
++ " sw %z4, %2\n" \
|
|
|
++ "2:\n" \
|
|
|
++ " sw %z5, %3\n" \
|
|
|
++ "3:\n" \
|
|
|
++ " .section .fixup,\"ax\"\n" \
|
|
|
++ " .balign 4\n" \
|
|
|
++ "4:\n" \
|
|
|
++ " li %0, %6\n" \
|
|
|
++ " jump 2b, %1\n" \
|
|
|
++ " .previous\n" \
|
|
|
++ " .section __ex_table,\"a\"\n" \
|
|
|
++ " .balign " SZPTR "\n" \
|
|
|
++ " " PTR " 1b, 4b\n" \
|
|
|
++ " " PTR " 2b, 4b\n" \
|
|
|
++ " .previous" \
|
|
|
++ : "+r" (err), "=r" (__tmp), \
|
|
|
++ "=m" (__ptr[__LSW]), \
|
|
|
++ "=m" (__ptr[__MSW]) \
|
|
|
++ : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
|
|
|
++ __disable_user_access(); \
|
|
|
++} while (0)
|
|
|
++#else /* !CONFIG_MMU */
|
|
|
++#define __put_user_8(x, ptr, err) \
|
|
|
++ *((u64 __user *)(ptr)) = (u64)(x)
|
|
|
++#endif /* CONFIG_MMU */
|
|
|
++#endif /* CONFIG_64BIT */
|
|
|
++
|
|
|
++
|
|
|
++/**
|
|
|
++ * __put_user: - Write a simple value into user space, with less checking.
|
|
|
++ * @x: Value to copy to user space.
|
|
|
++ * @ptr: Destination address, in user space.
|
|
|
++ *
|
|
|
++ * Context: User context only. This function may sleep.
|
|
|
++ *
|
|
|
++ * This macro copies a single simple value from kernel space to user
|
|
|
++ * space. It supports simple types like char and int, but not larger
|
|
|
++ * data types like structures or arrays.
|
|
|
++ *
|
|
|
++ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
|
|
|
++ * to the result of dereferencing @ptr.
|
|
|
++ *
|
|
|
++ * Caller must check the pointer with access_ok() before calling this
|
|
|
++ * function.
|
|
|
++ *
|
|
|
++ * Returns zero on success, or -EFAULT on error.
|
|
|
++ */
|
|
|
++#define __put_user(x, ptr) \
|
|
|
++({ \
|
|
|
++ register int __pu_err = 0; \
|
|
|
++ __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
|
|
|
++ __chk_user_ptr(__gu_ptr); \
|
|
|
++ switch (sizeof(*__gu_ptr)) { \
|
|
|
++ case 1: \
|
|
|
++ __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
|
|
|
++ break; \
|
|
|
++ case 2: \
|
|
|
++ __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
|
|
|
++ break; \
|
|
|
++ case 4: \
|
|
|
++ __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
|
|
|
++ break; \
|
|
|
++ case 8: \
|
|
|
++ __put_user_8((x), __gu_ptr, __pu_err); \
|
|
|
++ break; \
|
|
|
++ default: \
|
|
|
++ BUILD_BUG(); \
|
|
|
++ } \
|
|
|
++ __pu_err; \
|
|
|
++})
|
|
|
++
|
|
|
++/**
|
|
|
++ * put_user: - Write a simple value into user space.
|
|
|
++ * @x: Value to copy to user space.
|
|
|
++ * @ptr: Destination address, in user space.
|
|
|
++ *
|
|
|
++ * Context: User context only. This function may sleep.
|
|
|
++ *
|
|
|
++ * This macro copies a single simple value from kernel space to user
|
|
|
++ * space. It supports simple types like char and int, but not larger
|
|
|
++ * data types like structures or arrays.
|
|
|
++ *
|
|
|
++ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
|
|
|
++ * to the result of dereferencing @ptr.
|
|
|
++ *
|
|
|
++ * Returns zero on success, or -EFAULT on error.
|
|
|
++ */
|
|
|
++#define put_user(x, ptr) \
|
|
|
++({ \
|
|
|
++ __typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
++ might_fault(); \
|
|
|
++ access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
|
|
|
++ __put_user((x), __p) : \
|
|
|
++ -EFAULT; \
|
|
|
++})
|
|
|
++
|
|
|
++
|
|
|
++extern unsigned long __must_check __copy_user(void __user *to,
|
|
|
++ const void __user *from, unsigned long n);
|
|
|
++
|
|
|
++static inline long __must_check __copy_from_user(void *to,
|
|
|
++ const void __user *from, unsigned long n)
|
|
|
++{
|
|
|
++ return __copy_user(to, from, n);
|
|
|
++}
|
|
|
++
|
|
|
++static inline long __must_check __copy_to_user(void __user *to,
|
|
|
++ const void *from, unsigned long n)
|
|
|
++{
|
|
|
++ return __copy_user(to, from, n);
|
|
|
++}
|
|
|
++
|
|
|
++#define __copy_from_user_inatomic(to, from, n) \
|
|
|
++ __copy_from_user((to), (from), (n))
|
|
|
++#define __copy_to_user_inatomic(to, from, n) \
|
|
|
++ __copy_to_user((to), (from), (n))
|
|
|
++
|
|
|
++static inline long copy_from_user(void *to,
|
|
|
++ const void __user * from, unsigned long n)
|
|
|
++{
|
|
|
++ might_fault();
|
|
|
++ return access_ok(VERIFY_READ, from, n) ?
|
|
|
++ __copy_from_user(to, from, n) : n;
|
|
|
++}
|
|
|
++
|
|
|
++static inline long copy_to_user(void __user *to,
|
|
|
++ const void *from, unsigned long n)
|
|
|
++{
|
|
|
++ might_fault();
|
|
|
++ return access_ok(VERIFY_WRITE, to, n) ?
|
|
|
++ __copy_to_user(to, from, n) : n;
|
|
|
++}
|
|
|
++
|
|
|
++extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
|
|
++
|
|
|
++extern long __must_check strlen_user(const char __user *str);
|
|
|
++extern long __must_check strnlen_user(const char __user *str, long n);
|
|
|
++
|
|
|
++extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
|
|
++
|
|
|
++static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
|
|
++{
|
|
|
++ might_fault();
|
|
|
++ return access_ok(VERIFY_WRITE, to, n) ?
|
|
|
++ __clear_user(to, n) : n;
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_UACCESS_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/unistd.h linux-4.6.2.riscv/arch/riscv/include/asm/unistd.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/unistd.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/unistd.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,3 @@
|
|
|
++#define __ARCH_HAVE_MMU
|
|
|
++#define __ARCH_WANT_SYS_CLONE
|
|
|
++#include <uapi/asm/unistd.h>
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/vdso.h linux-4.6.2.riscv/arch/riscv/include/asm/vdso.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/vdso.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/vdso.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,15 @@
|
|
|
++#ifndef _ASM_RISCV_VDSO_H
|
|
|
++#define _ASM_RISCV_VDSO_H
|
|
|
++
|
|
|
++#include <linux/types.h>
|
|
|
++
|
|
|
++struct vdso_data {
|
|
|
++};
|
|
|
++
|
|
|
++#define VDSO_SYMBOL(base, name) \
|
|
|
++({ \
|
|
|
++ extern const char __vdso_##name[]; \
|
|
|
++ (void __user *)((unsigned long)(base) + __vdso_##name); \
|
|
|
++})
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_VDSO_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/asm/word-at-a-time.h linux-4.6.2.riscv/arch/riscv/include/asm/word-at-a-time.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/asm/word-at-a-time.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/asm/word-at-a-time.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,42 @@
|
|
|
++#ifndef _ASM_RISCV_WORD_AT_A_TIME_H
|
|
|
++#define _ASM_RISCV_WORD_AT_A_TIME_H
|
|
|
++
|
|
|
++/* Derived from arch/x86/include/asm/word-at-a-time.h */
|
|
|
++
|
|
|
++#include <linux/kernel.h>
|
|
|
++
|
|
|
++struct word_at_a_time {
|
|
|
++ const unsigned long one_bits, high_bits;
|
|
|
++};
|
|
|
++
|
|
|
++#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
|
|
|
++
|
|
|
++static inline unsigned long has_zero(unsigned long val,
|
|
|
++ unsigned long *bits, const struct word_at_a_time *c)
|
|
|
++{
|
|
|
++ unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits;
|
|
|
++ *bits = mask;
|
|
|
++ return mask;
|
|
|
++}
|
|
|
++
|
|
|
++static inline unsigned long prep_zero_mask(unsigned long val,
|
|
|
++ unsigned long bits, const struct word_at_a_time *c)
|
|
|
++{
|
|
|
++ return bits;
|
|
|
++}
|
|
|
++
|
|
|
++static inline unsigned long create_zero_mask(unsigned long bits)
|
|
|
++{
|
|
|
++ bits = (bits - 1) & ~bits;
|
|
|
++ return bits >> 7;
|
|
|
++}
|
|
|
++
|
|
|
++static inline unsigned long find_zero(unsigned long mask)
|
|
|
++{
|
|
|
++ return fls64(mask) >> 3;
|
|
|
++}
|
|
|
++
|
|
|
++/* The mask we created is directly usable as a bytemask */
|
|
|
++#define zero_bytemask(mask) (mask)
|
|
|
++
|
|
|
++#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/uapi/asm/auxvec.h linux-4.6.2.riscv/arch/riscv/include/uapi/asm/auxvec.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/uapi/asm/auxvec.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/uapi/asm/auxvec.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,7 @@
|
|
|
++#ifndef _UAPI_ASM_RISCV_AUXVEC_H
|
|
|
++#define _UAPI_ASM_RISCV_AUXVEC_H
|
|
|
++
|
|
|
++/* vDSO location */
|
|
|
++#define AT_SYSINFO_EHDR 33
|
|
|
++
|
|
|
++#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/uapi/asm/bitsperlong.h linux-4.6.2.riscv/arch/riscv/include/uapi/asm/bitsperlong.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/uapi/asm/bitsperlong.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/uapi/asm/bitsperlong.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,8 @@
|
|
|
++#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
|
|
|
++#define _UAPI_ASM_RISCV_BITSPERLONG_H
|
|
|
++
|
|
|
++#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
|
|
|
++
|
|
|
++#include <asm-generic/bitsperlong.h>
|
|
|
++
|
|
|
++#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/uapi/asm/byteorder.h linux-4.6.2.riscv/arch/riscv/include/uapi/asm/byteorder.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/uapi/asm/byteorder.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/uapi/asm/byteorder.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,6 @@
|
|
|
++#ifndef _UAPI_ASM_RISCV_BYTEORDER_H
|
|
|
++#define _UAPI_ASM_RISCV_BYTEORDER_H
|
|
|
++
|
|
|
++#include <linux/byteorder/little_endian.h>
|
|
|
++
|
|
|
++#endif /* _UAPI_ASM_RISCV_BYTEORDER_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/uapi/asm/elf.h linux-4.6.2.riscv/arch/riscv/include/uapi/asm/elf.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/uapi/asm/elf.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/uapi/asm/elf.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,64 @@
|
|
|
++#ifndef _UAPI_ASM_ELF_H
|
|
|
++#define _UAPI_ASM_ELF_H
|
|
|
++
|
|
|
++#include <asm/ptrace.h>
|
|
|
++
|
|
|
++/* ELF register definitions */
|
|
|
++typedef unsigned long elf_greg_t;
|
|
|
++typedef struct user_regs_struct elf_gregset_t;
|
|
|
++#define ELF_NGREG (sizeof(elf_gregset_t) / sizeof(elf_greg_t))
|
|
|
++
|
|
|
++typedef struct user_fpregs_struct elf_fpregset_t;
|
|
|
++
|
|
|
++
|
|
|
++/*
|
|
|
++ * RISC-V relocation types
|
|
|
++ */
|
|
|
++
|
|
|
++/* Relocation types used by the dynamic linker */
|
|
|
++#define R_RISCV_NONE 0
|
|
|
++#define R_RISCV_32 1
|
|
|
++#define R_RISCV_64 2
|
|
|
++#define R_RISCV_RELATIVE 3
|
|
|
++#define R_RISCV_COPY 4
|
|
|
++#define R_RISCV_JUMP_SLOT 5
|
|
|
++#define R_RISCV_TLS_DTPMOD32 6
|
|
|
++#define R_RISCV_TLS_DTPMOD64 7
|
|
|
++#define R_RISCV_TLS_DTPREL32 8
|
|
|
++#define R_RISCV_TLS_DTPREL64 9
|
|
|
++#define R_RISCV_TLS_TPREL32 10
|
|
|
++#define R_RISCV_TLS_TPREL64 11
|
|
|
++
|
|
|
++/* Relocation types not used by the dynamic linker */
|
|
|
++#define R_RISCV_BRANCH 16
|
|
|
++#define R_RISCV_JAL 17
|
|
|
++#define R_RISCV_CALL 18
|
|
|
++#define R_RISCV_CALL_PLT 19
|
|
|
++#define R_RISCV_GOT_HI20 20
|
|
|
++#define R_RISCV_TLS_GOT_HI20 21
|
|
|
++#define R_RISCV_TLS_GD_HI20 22
|
|
|
++#define R_RISCV_PCREL_HI20 23
|
|
|
++#define R_RISCV_PCREL_LO12_I 24
|
|
|
++#define R_RISCV_PCREL_LO12_S 25
|
|
|
++#define R_RISCV_HI20 26
|
|
|
++#define R_RISCV_LO12_I 27
|
|
|
++#define R_RISCV_LO12_S 28
|
|
|
++#define R_RISCV_TPREL_HI20 29
|
|
|
++#define R_RISCV_TPREL_LO12_I 30
|
|
|
++#define R_RISCV_TPREL_LO12_S 31
|
|
|
++#define R_RISCV_TPREL_ADD 32
|
|
|
++#define R_RISCV_ADD8 33
|
|
|
++#define R_RISCV_ADD16 34
|
|
|
++#define R_RISCV_ADD32 35
|
|
|
++#define R_RISCV_ADD64 36
|
|
|
++#define R_RISCV_SUB8 37
|
|
|
++#define R_RISCV_SUB16 38
|
|
|
++#define R_RISCV_SUB32 39
|
|
|
++#define R_RISCV_SUB64 40
|
|
|
++#define R_RISCV_GNU_VTINHERIT 41
|
|
|
++#define R_RISCV_GNU_VTENTRY 42
|
|
|
++#define R_RISCV_ALIGN 43
|
|
|
++#define R_RISCV_RVC_BRANCH 44
|
|
|
++#define R_RISCV_RVC_JUMP 45
|
|
|
++
|
|
|
++#endif /* _UAPI_ASM_ELF_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/uapi/asm/Kbuild linux-4.6.2.riscv/arch/riscv/include/uapi/asm/Kbuild
|
|
|
+--- linux-4.6.2/arch/riscv/include/uapi/asm/Kbuild 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/uapi/asm/Kbuild 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,10 @@
|
|
|
++# UAPI Header export list
|
|
|
++include include/uapi/asm-generic/Kbuild.asm
|
|
|
++
|
|
|
++header-y += auxvec.h
|
|
|
++header-y += bitsperlong.h
|
|
|
++header-y += byteorder.h
|
|
|
++header-y += ptrace.h
|
|
|
++header-y += sigcontext.h
|
|
|
++header-y += siginfo.h
|
|
|
++header-y += unistd.h
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/uapi/asm/ptrace.h linux-4.6.2.riscv/arch/riscv/include/uapi/asm/ptrace.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/uapi/asm/ptrace.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/uapi/asm/ptrace.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,55 @@
|
|
|
++#ifndef _UAPI_ASM_RISCV_PTRACE_H
|
|
|
++#define _UAPI_ASM_RISCV_PTRACE_H
|
|
|
++
|
|
|
++#ifndef __ASSEMBLY__
|
|
|
++
|
|
|
++#include <linux/types.h>
|
|
|
++
|
|
|
++/* User-mode register state for core dumps, ptrace, sigcontext
|
|
|
++ *
|
|
|
++ * This decouples struct pt_regs from the userspace ABI.
|
|
|
++ * struct user_regs_struct must form a prefix of struct pt_regs.
|
|
|
++ */
|
|
|
++struct user_regs_struct {
|
|
|
++ unsigned long pc;
|
|
|
++ unsigned long ra;
|
|
|
++ unsigned long sp;
|
|
|
++ unsigned long gp;
|
|
|
++ unsigned long tp;
|
|
|
++ unsigned long t0;
|
|
|
++ unsigned long t1;
|
|
|
++ unsigned long t2;
|
|
|
++ unsigned long s0;
|
|
|
++ unsigned long s1;
|
|
|
++ unsigned long a0;
|
|
|
++ unsigned long a1;
|
|
|
++ unsigned long a2;
|
|
|
++ unsigned long a3;
|
|
|
++ unsigned long a4;
|
|
|
++ unsigned long a5;
|
|
|
++ unsigned long a6;
|
|
|
++ unsigned long a7;
|
|
|
++ unsigned long s2;
|
|
|
++ unsigned long s3;
|
|
|
++ unsigned long s4;
|
|
|
++ unsigned long s5;
|
|
|
++ unsigned long s6;
|
|
|
++ unsigned long s7;
|
|
|
++ unsigned long s8;
|
|
|
++ unsigned long s9;
|
|
|
++ unsigned long s10;
|
|
|
++ unsigned long s11;
|
|
|
++ unsigned long t3;
|
|
|
++ unsigned long t4;
|
|
|
++ unsigned long t5;
|
|
|
++ unsigned long t6;
|
|
|
++};
|
|
|
++
|
|
|
++struct user_fpregs_struct {
|
|
|
++ __u64 f[32];
|
|
|
++ __u32 fcsr;
|
|
|
++};
|
|
|
++
|
|
|
++#endif /* __ASSEMBLY__ */
|
|
|
++
|
|
|
++#endif /* _UAPI_ASM_RISCV_PTRACE_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/uapi/asm/sigcontext.h linux-4.6.2.riscv/arch/riscv/include/uapi/asm/sigcontext.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/uapi/asm/sigcontext.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/uapi/asm/sigcontext.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,16 @@
|
|
|
++#ifndef _UAPI_ASM_RISCV_SIGCONTEXT_H
|
|
|
++#define _UAPI_ASM_RISCV_SIGCONTEXT_H
|
|
|
++
|
|
|
++#include <asm/ptrace.h>
|
|
|
++
|
|
|
++/* Signal context structure
|
|
|
++ *
|
|
|
++ * This contains the context saved before a signal handler is invoked;
|
|
|
++ * it is restored by sys_sigreturn / sys_rt_sigreturn.
|
|
|
++ */
|
|
|
++struct sigcontext {
|
|
|
++ struct user_regs_struct sc_regs;
|
|
|
++ struct user_fpregs_struct sc_fpregs;
|
|
|
++};
|
|
|
++
|
|
|
++#endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/uapi/asm/siginfo.h linux-4.6.2.riscv/arch/riscv/include/uapi/asm/siginfo.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/uapi/asm/siginfo.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/uapi/asm/siginfo.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,24 @@
|
|
|
++/*
|
|
|
++ * Copyright (C) 2012 ARM Ltd.
|
|
|
++ * Copyright (C) 2016 SiFive, Inc.
|
|
|
++ *
|
|
|
++ * This program is free software; you can redistribute it and/or modify
|
|
|
++ * it under the terms of the GNU General Public License version 2 as
|
|
|
++ * published by the Free Software Foundation.
|
|
|
++ *
|
|
|
++ * This program is distributed in the hope that it will be useful,
|
|
|
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
++ * GNU General Public License for more details.
|
|
|
++ *
|
|
|
++ * You should have received a copy of the GNU General Public License
|
|
|
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
++ */
|
|
|
++#ifndef __ASM_SIGINFO_H
|
|
|
++#define __ASM_SIGINFO_H
|
|
|
++
|
|
|
++#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
|
|
|
++
|
|
|
++#include <asm-generic/siginfo.h>
|
|
|
++
|
|
|
++#endif
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/include/uapi/asm/unistd.h linux-4.6.2.riscv/arch/riscv/include/uapi/asm/unistd.h
|
|
|
+--- linux-4.6.2/arch/riscv/include/uapi/asm/unistd.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/include/uapi/asm/unistd.h 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,9 @@
|
|
|
++#include <asm-generic/unistd.h>
|
|
|
++
|
|
|
++#define __NR_sysriscv __NR_arch_specific_syscall
|
|
|
++#ifndef __riscv_atomic
|
|
|
++__SYSCALL(__NR_sysriscv, sys_sysriscv)
|
|
|
++#endif
|
|
|
++
|
|
|
++#define RISCV_ATOMIC_CMPXCHG 1
|
|
|
++#define RISCV_ATOMIC_CMPXCHG64 2
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/initramfs.txt linux-4.6.2.riscv/arch/riscv/initramfs.txt
|
|
|
+--- linux-4.6.2/arch/riscv/initramfs.txt 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/initramfs.txt 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,9 @@
|
|
|
++dir /bin 755 0 0
|
|
|
++file /bin/busybox tmp/bin/busybox 755 0 0
|
|
|
++dir /etc 755 0 0
|
|
|
++file /etc/inittab tmp/etc/inittab 755 0 0
|
|
|
++
|
|
|
++dir /dev 755 0 0
|
|
|
++nod /dev/console 644 0 0 c 5 1
|
|
|
++nod /dev/null 644 0 0 c 1 3
|
|
|
++slink /init /bin/busybox 755 0 0
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/Kconfig linux-4.6.2.riscv/arch/riscv/Kconfig
|
|
|
+--- linux-4.6.2/arch/riscv/Kconfig 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/Kconfig 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -0,0 +1,284 @@
|
|
|
++#
|
|
|
++# For a description of the syntax of this configuration file,
|
|
|
++# see Documentation/kbuild/kconfig-language.txt.
|
|
|
++#
|
|
|
++
|
|
|
++config RISCV
|
|
|
++ def_bool y
|
|
|
++ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
|
|
++ select ARCH_WANT_FRAME_POINTERS
|
|
|
++ select CLONE_BACKWARDS
|
|
|
++ select COMMON_CLK
|
|
|
++ select GENERIC_CLOCKEVENTS
|
|
|
++ select GENERIC_CPU_DEVICES
|
|
|
++ select GENERIC_IRQ_SHOW
|
|
|
++ select GENERIC_PCI_IOMAP
|
|
|
++ select GENERIC_STRNCPY_FROM_USER
|
|
|
++ select GENERIC_STRNLEN_USER
|
|
|
++ select GENERIC_SMP_IDLE_THREAD
|
|
|
++ select GENERIC_ATOMIC64 if !64BIT || !RV_ATOMIC
|
|
|
++ select ARCH_WANT_OPTIONAL_GPIOLIB
|
|
|
++ select HAVE_MEMBLOCK
|
|
|
++ select HAVE_MEMBLOCK_NODE_MAP
|
|
|
++ select HAVE_DMA_API_DEBUG
|
|
|
++ select HAVE_DMA_CONTIGUOUS
|
|
|
++ select HAVE_GENERIC_DMA_COHERENT
|
|
|
++ select IRQ_DOMAIN
|
|
|
++ select NO_BOOTMEM
|
|
|
++ select RV_ATOMIC if SMP
|
|
|
++ select RV_SYSRISCV_ATOMIC if !RV_ATOMIC
|
|
|
++ select SPARSE_IRQ
|
|
|
++ select SYSCTL_EXCEPTION_TRACE
|
|
|
++ select HAVE_ARCH_TRACEHOOK
|
|
|
++
|
|
|
++config MMU
|
|
|
++ def_bool y
|
|
|
++
|
|
|
++# even on 32-bit, physical (and DMA) addresses are > 32-bits
|
|
|
++config ARCH_PHYS_ADDR_T_64BIT
|
|
|
++ def_bool y
|
|
|
++
|
|
|
++config ARCH_DMA_ADDR_T_64BIT
|
|
|
++ def_bool y
|
|
|
++
|
|
|
++config STACKTRACE_SUPPORT
|
|
|
++ def_bool y
|
|
|
++
|
|
|
++config RWSEM_GENERIC_SPINLOCK
|
|
|
++ def_bool y
|
|
|
++
|
|
|
++config GENERIC_BUG
|
|
|
++ def_bool y
|
|
|
++ depends on BUG
|
|
|
++ select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
|
|
|
++
|
|
|
++config GENERIC_BUG_RELATIVE_POINTERS
|
|
|
++ bool
|
|
|
++
|
|
|
++config GENERIC_CALIBRATE_DELAY
|
|
|
++ def_bool y
|
|
|
++
|
|
|
++config GENERIC_CSUM
|
|
|
++ def_bool y
|
|
|
++
|
|
|
++config GENERIC_HWEIGHT
|
|
|
++ def_bool y
|
|
|
++
|
|
|
++config PGTABLE_LEVELS
|
|
|
++ int
|
|
|
++ default 3 if 64BIT
|
|
|
++ default 2
|
|
|
++
|
|
|
++menu "Platform type"
|
|
|
++
|
|
|
++config SMP
|
|
|
++ bool "Symmetric Multi-Processing"
|
|
|
++ help
|
|
|
++ This enables support for systems with more than one CPU. If
|
|
|
++ you say N here, the kernel will run on single and
|
|
|
++ multiprocessor machines, but will use only one CPU of a
|
|
|
++ multiprocessor machine. If you say Y here, the kernel will run
|
|
|
++ on many, but not all, single processor machines. On a single
|
|
|
++ processor machine, the kernel will run faster if you say N
|
|
|
++ here.
|
|
|
++
|
|
|
++ If you don't know what to do here, say N.
|
|
|
++
|
|
|
++config NR_CPUS
|
|
|
++ int "Maximum number of CPUs (2-32)"
|
|
|
++ range 2 32
|
|
|
++ depends on SMP
|
|
|
++ default "8"
|
|
|
++
|
|
|
++choice
|
|
|
++ prompt "CPU selection"
|
|
|
++ default CPU_RV_ROCKET
|
|
|
++
|
|
|
++config CPU_RV_ROCKET
|
|
|
++ bool "Rocket"
|
|
|
++ select CPU_SUPPORTS_64BIT_KERNEL
|
|
|
++
|
|
|
++config CPU_RV_GENERIC
|
|
|
++ bool "Generic RISC-V"
|
|
|
++ select CPU_SUPPORTS_32BIT_KERNEL
|
|
|
++ select CPU_SUPPORTS_64BIT_KERNEL
|
|
|
++
|
|
|
++endchoice
|
|
|
++
|
|
|
++config CPU_SUPPORTS_32BIT_KERNEL
|
|
|
++ bool
|
|
|
++config CPU_SUPPORTS_64BIT_KERNEL
|
|
|
++ bool
|
|
|
++
|
|
|
++config SBI_CONSOLE
|
|
|
++ tristate "SBI console support"
|
|
|
++ select TTY
|
|
|
++ default y
|
|
|
++
|
|
|
++config RVC
|
|
|
++ bool "Use compressed instructions (RV32C or RV64C)"
|
|
|
++ default n
|
|
|
++
|
|
|
++config RV_ATOMIC
|
|
|
++ bool "Use atomic memory instructions (RV32A or RV64A)"
|
|
|
++ default y
|
|
|
++
|
|
|
++config RV_SYSRISCV_ATOMIC
|
|
|
++ bool "Include support for atomic operation syscalls"
|
|
|
++ default n
|
|
|
++ help
|
|
|
++ If atomic memory instructions are present, i.e.,
|
|
|
++ CONFIG_RV_ATOMIC, this includes support for the syscall that
|
|
|
++ provides atomic accesses. This is only useful to run
|
|
|
++ binaries that require atomic access but were compiled with
|
|
|
++ -mno-atomic.
|
|
|
++
|
|
|
++ If CONFIG_RV_ATOMIC is unset, this option is mandatory.
|
|
|
++
|
|
|
++config RV_PUM
|
|
|
++ def_bool y
|
|
|
++ prompt "Protect User Memory" if EXPERT
|
|
|
++ ---help---
|
|
|
++ Protect User Memory (PUM) prevents the kernel from inadvertently
|
|
|
++ accessing user-space memory. There is a small performance cost
|
|
|
++ and kernel size increase if this is enabled.
|
|
|
++
|
|
|
++ If unsure, say Y.
|
|
|
++
|
|
|
++endmenu
|
|
|
++
|
|
|
++menu "Kernel type"
|
|
|
++
|
|
|
++choice
|
|
|
++ prompt "Kernel code model"
|
|
|
++ default 64BIT
|
|
|
++
|
|
|
++config 32BIT
|
|
|
++ bool "32-bit kernel"
|
|
|
++ depends on CPU_SUPPORTS_32BIT_KERNEL
|
|
|
++ help
|
|
|
++ Select this option to build a 32-bit kernel.
|
|
|
++
|
|
|
++config 64BIT
|
|
|
++ bool "64-bit kernel"
|
|
|
++ depends on CPU_SUPPORTS_64BIT_KERNEL
|
|
|
++ help
|
|
|
++ Select this option to build a 64-bit kernel.
|
|
|
++
|
|
|
++endchoice
|
|
|
++
|
|
|
++source "mm/Kconfig"
|
|
|
++
|
|
|
++source "kernel/Kconfig.preempt"
|
|
|
++
|
|
|
++source "kernel/Kconfig.hz"
|
|
|
++
|
|
|
++endmenu
|
|
|
++
|
|
|
++menu "Bus support"
|
|
|
++
|
|
|
++config PCI
|
|
|
++ bool "PCI support"
|
|
|
++ select PCI_MSI
|
|
|
++ help
|
|
|
++ This feature enables support for PCI bus system. If you say Y
|
|
|
++ here, the kernel will include drivers and infrastructure code
|
|
|
++ to support PCI bus devices.
|
|
|
++
|
|
|
++config PCI_DOMAINS
|
|
|
++ def_bool PCI
|
|
|
++
|
|
|
++config PCI_DOMAINS_GENERIC
|
|
|
++ def_bool PCI
|
|
|
++
|
|
|
++config PCI_SYSCALL
|
|
|
++ def_bool PCI
|
|
|
++
|
|
|
++source "drivers/pci/Kconfig"
|
|
|
++
|
|
|
++endmenu
|
|
|
++
|
|
|
++source "init/Kconfig"
|
|
|
++
|
|
|
++source "kernel/Kconfig.freezer"
|
|
|
++
|
|
|
++menu "Executable file formats"
|
|
|
++
|
|
|
++source "fs/Kconfig.binfmt"
|
|
|
++
|
|
|
++endmenu
|
|
|
++
|
|
|
++menu "Power management options"
|
|
|
++
|
|
|
++source kernel/power/Kconfig
|
|
|
++
|
|
|
++endmenu
|
|
|
++
|
|
|
++source "net/Kconfig"
|
|
|
++
|
|
|
++source "drivers/Kconfig"
|
|
|
++
|
|
|
++source "fs/Kconfig"
|
|
|
++
|
|
|
++menu "Kernel hacking"
|
|
|
++
|
|
|
++config CMDLINE_BOOL
|
|
|
++ bool "Built-in kernel command line"
|
|
|
++ default n
|
|
|
++ help
|
|
|
++ For most platforms, it is firmware or second stage bootloader
|
|
|
++ that by default specifies the kernel command line options.
|
|
|
++ However, it might be necessary or advantageous to either override
|
|
|
++ the default kernel command line or add a few extra options to it.
|
|
|
++ For such cases, this option allows hardcoding command line options
|
|
|
++ directly into the kernel.
|
|
|
++
|
|
|
++ For that, choose 'Y' here and fill in the extra boot parameters
|
|
|
++ in CONFIG_CMDLINE.
|
|
|
++
|
|
|
++ The built-in options will be concatenated to the default command
|
|
|
++ line if CMDLINE_OVERRIDE is set to 'N'. Otherwise, the default
|
|
|
++ command line will be ignored and replaced by the built-in string.
|
|
|
++
|
|
|
++config CMDLINE
|
|
|
++ string "Built-in kernel command string"
|
|
|
++ depends on CMDLINE_BOOL
|
|
|
++ default ""
|
|
|
++ help
|
|
|
++ Supply command-line options at build time by entering them here.
|
|
|
++
|
|
|
++config CMDLINE_OVERRIDE
|
|
|
++ bool "Built-in command line overrides bootloader arguments"
|
|
|
++ default n
|
|
|
++ depends on CMDLINE_BOOL
|
|
|
++ help
|
|
|
++ Set this option to 'Y' to have the kernel ignore the bootloader
|
|
|
++ or firmware command line. Instead, the built-in command line
|
|
|
++ will be used exclusively.
|
|
|
++
|
|
|
++config EARLY_PRINTK
|
|
|
++ bool "Early printk"
|
|
|
++ default n
|
|
|
++ help
|
|
|
++ This option enables special console drivers which allow the kernel
|
|
|
++ to print messages very early in the bootup process.
|
|
|
++
|
|
|
++ This is useful for kernel debugging when your machine crashes very
|
|
|
++ early before the console code is initialized. For normal operation
|
|
|
++ it is not recommended because it looks ugly and doesn't cooperate
|
|
|
++ with klogd/syslogd or the X server. You should normally N here,
|
|
|
++ unless you want to debug such a crash.
|
|
|
++
|
|
|
++
|
|
|
++source "lib/Kconfig.debug"
|
|
|
++
|
|
|
++config CMDLINE_BOOL
|
|
|
++ bool
|
|
|
++endmenu
|
|
|
++
|
|
|
++source "security/Kconfig"
|
|
|
++
|
|
|
++source "crypto/Kconfig"
|
|
|
++
|
|
|
++source "lib/Kconfig"
|
|
|
++
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/asm-offsets.c linux-4.6.2.riscv/arch/riscv/kernel/asm-offsets.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/asm-offsets.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/asm-offsets.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,99 @@
|
|
|
++#include <linux/kbuild.h>
|
|
|
++#include <linux/sched.h>
|
|
|
++#include <asm/thread_info.h>
|
|
|
++#include <asm/ptrace.h>
|
|
|
++
|
|
|
++void asm_offsets(void)
|
|
|
++{
|
|
|
++ OFFSET(TASK_THREAD_INFO, task_struct, stack);
|
|
|
++ OFFSET(THREAD_RA, task_struct, thread.ra);
|
|
|
++ OFFSET(THREAD_SP, task_struct, thread.sp);
|
|
|
++ OFFSET(THREAD_S0, task_struct, thread.s[0]);
|
|
|
++ OFFSET(THREAD_S1, task_struct, thread.s[1]);
|
|
|
++ OFFSET(THREAD_S2, task_struct, thread.s[2]);
|
|
|
++ OFFSET(THREAD_S3, task_struct, thread.s[3]);
|
|
|
++ OFFSET(THREAD_S4, task_struct, thread.s[4]);
|
|
|
++ OFFSET(THREAD_S5, task_struct, thread.s[5]);
|
|
|
++ OFFSET(THREAD_S6, task_struct, thread.s[6]);
|
|
|
++ OFFSET(THREAD_S7, task_struct, thread.s[7]);
|
|
|
++ OFFSET(THREAD_S8, task_struct, thread.s[8]);
|
|
|
++ OFFSET(THREAD_S9, task_struct, thread.s[9]);
|
|
|
++ OFFSET(THREAD_S10, task_struct, thread.s[10]);
|
|
|
++ OFFSET(THREAD_S11, task_struct, thread.s[11]);
|
|
|
++ OFFSET(THREAD_SP, task_struct, thread.sp);
|
|
|
++ OFFSET(TI_TASK, thread_info, task);
|
|
|
++ OFFSET(TI_FLAGS, thread_info, flags);
|
|
|
++ OFFSET(TI_CPU, thread_info, cpu);
|
|
|
++
|
|
|
++ OFFSET(THREAD_F0, task_struct, thread.fstate.f[0]);
|
|
|
++ OFFSET(THREAD_F1, task_struct, thread.fstate.f[1]);
|
|
|
++ OFFSET(THREAD_F2, task_struct, thread.fstate.f[2]);
|
|
|
++ OFFSET(THREAD_F3, task_struct, thread.fstate.f[3]);
|
|
|
++ OFFSET(THREAD_F4, task_struct, thread.fstate.f[4]);
|
|
|
++ OFFSET(THREAD_F5, task_struct, thread.fstate.f[5]);
|
|
|
++ OFFSET(THREAD_F6, task_struct, thread.fstate.f[6]);
|
|
|
++ OFFSET(THREAD_F7, task_struct, thread.fstate.f[7]);
|
|
|
++ OFFSET(THREAD_F8, task_struct, thread.fstate.f[8]);
|
|
|
++ OFFSET(THREAD_F9, task_struct, thread.fstate.f[9]);
|
|
|
++ OFFSET(THREAD_F10, task_struct, thread.fstate.f[10]);
|
|
|
++ OFFSET(THREAD_F11, task_struct, thread.fstate.f[11]);
|
|
|
++ OFFSET(THREAD_F12, task_struct, thread.fstate.f[12]);
|
|
|
++ OFFSET(THREAD_F13, task_struct, thread.fstate.f[13]);
|
|
|
++ OFFSET(THREAD_F14, task_struct, thread.fstate.f[14]);
|
|
|
++ OFFSET(THREAD_F15, task_struct, thread.fstate.f[15]);
|
|
|
++ OFFSET(THREAD_F16, task_struct, thread.fstate.f[16]);
|
|
|
++ OFFSET(THREAD_F17, task_struct, thread.fstate.f[17]);
|
|
|
++ OFFSET(THREAD_F18, task_struct, thread.fstate.f[18]);
|
|
|
++ OFFSET(THREAD_F19, task_struct, thread.fstate.f[19]);
|
|
|
++ OFFSET(THREAD_F20, task_struct, thread.fstate.f[20]);
|
|
|
++ OFFSET(THREAD_F21, task_struct, thread.fstate.f[21]);
|
|
|
++ OFFSET(THREAD_F22, task_struct, thread.fstate.f[22]);
|
|
|
++ OFFSET(THREAD_F23, task_struct, thread.fstate.f[23]);
|
|
|
++ OFFSET(THREAD_F24, task_struct, thread.fstate.f[24]);
|
|
|
++ OFFSET(THREAD_F25, task_struct, thread.fstate.f[25]);
|
|
|
++ OFFSET(THREAD_F26, task_struct, thread.fstate.f[26]);
|
|
|
++ OFFSET(THREAD_F27, task_struct, thread.fstate.f[27]);
|
|
|
++ OFFSET(THREAD_F28, task_struct, thread.fstate.f[28]);
|
|
|
++ OFFSET(THREAD_F29, task_struct, thread.fstate.f[29]);
|
|
|
++ OFFSET(THREAD_F30, task_struct, thread.fstate.f[30]);
|
|
|
++ OFFSET(THREAD_F31, task_struct, thread.fstate.f[31]);
|
|
|
++ OFFSET(THREAD_FCSR, task_struct, thread.fstate.fcsr);
|
|
|
++
|
|
|
++ DEFINE(PT_SIZE, sizeof(struct pt_regs));
|
|
|
++ OFFSET(PT_SEPC, pt_regs, sepc);
|
|
|
++ OFFSET(PT_RA, pt_regs, ra);
|
|
|
++ OFFSET(PT_FP, pt_regs, s0);
|
|
|
++ OFFSET(PT_S0, pt_regs, s0);
|
|
|
++ OFFSET(PT_S1, pt_regs, s1);
|
|
|
++ OFFSET(PT_S2, pt_regs, s2);
|
|
|
++ OFFSET(PT_S3, pt_regs, s3);
|
|
|
++ OFFSET(PT_S4, pt_regs, s4);
|
|
|
++ OFFSET(PT_S5, pt_regs, s5);
|
|
|
++ OFFSET(PT_S6, pt_regs, s6);
|
|
|
++ OFFSET(PT_S7, pt_regs, s7);
|
|
|
++ OFFSET(PT_S8, pt_regs, s8);
|
|
|
++ OFFSET(PT_S9, pt_regs, s9);
|
|
|
++ OFFSET(PT_S10, pt_regs, s10);
|
|
|
++ OFFSET(PT_S11, pt_regs, s11);
|
|
|
++ OFFSET(PT_SP, pt_regs, sp);
|
|
|
++ OFFSET(PT_TP, pt_regs, tp);
|
|
|
++ OFFSET(PT_A0, pt_regs, a0);
|
|
|
++ OFFSET(PT_A1, pt_regs, a1);
|
|
|
++ OFFSET(PT_A2, pt_regs, a2);
|
|
|
++ OFFSET(PT_A3, pt_regs, a3);
|
|
|
++ OFFSET(PT_A4, pt_regs, a4);
|
|
|
++ OFFSET(PT_A5, pt_regs, a5);
|
|
|
++ OFFSET(PT_A6, pt_regs, a6);
|
|
|
++ OFFSET(PT_A7, pt_regs, a7);
|
|
|
++ OFFSET(PT_T0, pt_regs, t0);
|
|
|
++ OFFSET(PT_T1, pt_regs, t1);
|
|
|
++ OFFSET(PT_T2, pt_regs, t2);
|
|
|
++ OFFSET(PT_T3, pt_regs, t3);
|
|
|
++ OFFSET(PT_T4, pt_regs, t4);
|
|
|
++ OFFSET(PT_T5, pt_regs, t5);
|
|
|
++ OFFSET(PT_T6, pt_regs, t6);
|
|
|
++ OFFSET(PT_GP, pt_regs, gp);
|
|
|
++ OFFSET(PT_SSTATUS, pt_regs, sstatus);
|
|
|
++ OFFSET(PT_SBADADDR, pt_regs, sbadaddr);
|
|
|
++ OFFSET(PT_SCAUSE, pt_regs, scause);
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/cpu.c linux-4.6.2.riscv/arch/riscv/kernel/cpu.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/cpu.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/cpu.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,41 @@
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/seq_file.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_PROC_FS
|
|
|
++
|
|
|
++static void *c_start(struct seq_file *m, loff_t *pos)
|
|
|
++{
|
|
|
++ *pos = cpumask_next(*pos - 1, cpu_online_mask);
|
|
|
++ if ((*pos) < nr_cpu_ids)
|
|
|
++ return (void *)(uintptr_t)(1 + *pos);
|
|
|
++ return NULL;
|
|
|
++}
|
|
|
++
|
|
|
++static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
++{
|
|
|
++ (*pos)++;
|
|
|
++ return c_start(m, pos);
|
|
|
++}
|
|
|
++
|
|
|
++static void c_stop(struct seq_file *m, void *v)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++static int c_show(struct seq_file *m, void *v)
|
|
|
++{
|
|
|
++ unsigned long hart_id = (unsigned long)v - 1;
|
|
|
++
|
|
|
++ seq_printf(m, "hart\t: %lu\n", hart_id);
|
|
|
++ seq_printf(m, "isa\t: RV%zuG\n", sizeof(void *) * 8);
|
|
|
++ seq_printf(m, "\n");
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++const struct seq_operations cpuinfo_op = {
|
|
|
++ .start = c_start,
|
|
|
++ .next = c_next,
|
|
|
++ .stop = c_stop,
|
|
|
++ .show = c_show
|
|
|
++};
|
|
|
++
|
|
|
++#endif /* CONFIG_PROC_FS */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/entry.S linux-4.6.2.riscv/arch/riscv/kernel/entry.S
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/entry.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/entry.S 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,399 @@
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/linkage.h>
|
|
|
++
|
|
|
++#include <asm/asm.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++#include <asm/unistd.h>
|
|
|
++#include <asm/thread_info.h>
|
|
|
++#include <asm/asm-offsets.h>
|
|
|
++
|
|
|
++ .text
|
|
|
++ .altmacro
|
|
|
++ .macro SAVE_ALL
|
|
|
++ LOCAL _restore_kernel_sp
|
|
|
++ LOCAL _save_context
|
|
|
++
|
|
|
++ /* If coming from userspace, preserve the user stack pointer and load
|
|
|
++ the kernel stack pointer. If we came from the kernel, sscratch
|
|
|
++ will contain 0, and we should continue on the current stack. */
|
|
|
++ csrrw sp, sscratch, sp
|
|
|
++ bnez sp, _save_context
|
|
|
++
|
|
|
++_restore_kernel_sp:
|
|
|
++ csrr sp, sscratch
|
|
|
++_save_context:
|
|
|
++ addi sp, sp, -(PT_SIZE)
|
|
|
++ REG_S x1, PT_RA(sp)
|
|
|
++ REG_S x3, PT_GP(sp)
|
|
|
++ REG_S x4, PT_TP(sp)
|
|
|
++ REG_S x5, PT_T0(sp)
|
|
|
++ REG_S x6, PT_T1(sp)
|
|
|
++ REG_S x7, PT_T2(sp)
|
|
|
++ REG_S x8, PT_S0(sp)
|
|
|
++ REG_S x9, PT_S1(sp)
|
|
|
++ REG_S x10, PT_A0(sp)
|
|
|
++ REG_S x11, PT_A1(sp)
|
|
|
++ REG_S x12, PT_A2(sp)
|
|
|
++ REG_S x13, PT_A3(sp)
|
|
|
++ REG_S x14, PT_A4(sp)
|
|
|
++ REG_S x15, PT_A5(sp)
|
|
|
++ REG_S x16, PT_A6(sp)
|
|
|
++ REG_S x17, PT_A7(sp)
|
|
|
++ REG_S x18, PT_S2(sp)
|
|
|
++ REG_S x19, PT_S3(sp)
|
|
|
++ REG_S x20, PT_S4(sp)
|
|
|
++ REG_S x21, PT_S5(sp)
|
|
|
++ REG_S x22, PT_S6(sp)
|
|
|
++ REG_S x23, PT_S7(sp)
|
|
|
++ REG_S x24, PT_S8(sp)
|
|
|
++ REG_S x25, PT_S9(sp)
|
|
|
++ REG_S x26, PT_S10(sp)
|
|
|
++ REG_S x27, PT_S11(sp)
|
|
|
++ REG_S x28, PT_T3(sp)
|
|
|
++ REG_S x29, PT_T4(sp)
|
|
|
++ REG_S x30, PT_T5(sp)
|
|
|
++ REG_S x31, PT_T6(sp)
|
|
|
++
|
|
|
++ /* Disable FPU to detect illegal usage of
|
|
|
++ floating point in kernel space */
|
|
|
++ li t0, SR_FS
|
|
|
++
|
|
|
++ csrr s0, sscratch
|
|
|
++ csrrc s1, sstatus, t0
|
|
|
++ csrr s2, sepc
|
|
|
++ csrr s3, sbadaddr
|
|
|
++ csrr s4, scause
|
|
|
++ REG_S s0, PT_SP(sp)
|
|
|
++ REG_S s1, PT_SSTATUS(sp)
|
|
|
++ REG_S s2, PT_SEPC(sp)
|
|
|
++ REG_S s3, PT_SBADADDR(sp)
|
|
|
++ REG_S s4, PT_SCAUSE(sp)
|
|
|
++ .endm
|
|
|
++
|
|
|
++ .macro RESTORE_ALL
|
|
|
++ REG_L a0, PT_SSTATUS(sp)
|
|
|
++ REG_L a2, PT_SEPC(sp)
|
|
|
++ csrw sstatus, a0
|
|
|
++ csrw sepc, a2
|
|
|
++
|
|
|
++ REG_L x1, PT_RA(sp)
|
|
|
++ REG_L x3, PT_GP(sp)
|
|
|
++ REG_L x4, PT_TP(sp)
|
|
|
++ REG_L x5, PT_T0(sp)
|
|
|
++ REG_L x6, PT_T1(sp)
|
|
|
++ REG_L x7, PT_T2(sp)
|
|
|
++ REG_L x8, PT_S0(sp)
|
|
|
++ REG_L x9, PT_S1(sp)
|
|
|
++ REG_L x10, PT_A0(sp)
|
|
|
++ REG_L x11, PT_A1(sp)
|
|
|
++ REG_L x12, PT_A2(sp)
|
|
|
++ REG_L x13, PT_A3(sp)
|
|
|
++ REG_L x14, PT_A4(sp)
|
|
|
++ REG_L x15, PT_A5(sp)
|
|
|
++ REG_L x16, PT_A6(sp)
|
|
|
++ REG_L x17, PT_A7(sp)
|
|
|
++ REG_L x18, PT_S2(sp)
|
|
|
++ REG_L x19, PT_S3(sp)
|
|
|
++ REG_L x20, PT_S4(sp)
|
|
|
++ REG_L x21, PT_S5(sp)
|
|
|
++ REG_L x22, PT_S6(sp)
|
|
|
++ REG_L x23, PT_S7(sp)
|
|
|
++ REG_L x24, PT_S8(sp)
|
|
|
++ REG_L x25, PT_S9(sp)
|
|
|
++ REG_L x26, PT_S10(sp)
|
|
|
++ REG_L x27, PT_S11(sp)
|
|
|
++ REG_L x28, PT_T3(sp)
|
|
|
++ REG_L x29, PT_T4(sp)
|
|
|
++ REG_L x30, PT_T5(sp)
|
|
|
++ REG_L x31, PT_T6(sp)
|
|
|
++
|
|
|
++ REG_L x2, PT_SP(sp)
|
|
|
++ .endm
|
|
|
++
|
|
|
++ENTRY(handle_exception)
|
|
|
++ SAVE_ALL
|
|
|
++
|
|
|
++ /* Set sscratch register to 0, so that if a recursive exception
|
|
|
++ occurs, the exception vector knows it came from the kernel */
|
|
|
++ csrw sscratch, x0
|
|
|
++
|
|
|
++ /* Compute address of current thread_info */
|
|
|
++ li tp, ~(THREAD_SIZE-1)
|
|
|
++ and tp, tp, sp
|
|
|
++ /* Set current pointer */
|
|
|
++ REG_L tp, TI_TASK(tp)
|
|
|
++
|
|
|
++1: auipc gp, %pcrel_hi(_gp)
|
|
|
++ addi gp, gp, %pcrel_lo(1b)
|
|
|
++
|
|
|
++ la ra, ret_from_exception
|
|
|
++ /* MSB of cause differentiates between
|
|
|
++ interrupts and exceptions */
|
|
|
++ bge s4, zero, 1f
|
|
|
++
|
|
|
++ /* Handle interrupts */
|
|
|
++ slli a0, s4, 1
|
|
|
++ srli a0, a0, 1
|
|
|
++ move a1, sp /* pt_regs */
|
|
|
++ tail do_IRQ
|
|
|
++1:
|
|
|
++ /* Handle syscalls */
|
|
|
++ li t0, EXC_SYSCALL
|
|
|
++ beq s4, t0, handle_syscall
|
|
|
++
|
|
|
++ /* Handle other exceptions */
|
|
|
++ slli t0, s4, LGPTR
|
|
|
++ la t1, excp_vect_table
|
|
|
++ la t2, excp_vect_table_end
|
|
|
++ move a0, sp /* pt_regs */
|
|
|
++ add t0, t1, t0
|
|
|
++ /* Check if exception code lies within bounds */
|
|
|
++ bgeu t0, t2, 1f
|
|
|
++ REG_L t0, 0(t0)
|
|
|
++ jr t0
|
|
|
++1:
|
|
|
++ tail do_trap_unknown
|
|
|
++
|
|
|
++handle_syscall:
|
|
|
++ /* Advance SEPC to avoid executing the original
|
|
|
++ scall instruction on sret */
|
|
|
++ addi s2, s2, 0x4
|
|
|
++ REG_S s2, PT_SEPC(sp)
|
|
|
++ /* System calls run with interrupts enabled */
|
|
|
++ csrs sstatus, SR_IE
|
|
|
++ /* Trace syscalls, but only if requested by the user. */
|
|
|
++ REG_L t0, TASK_THREAD_INFO(tp)
|
|
|
++ REG_L t0, TI_FLAGS(t0)
|
|
|
++ andi t0, t0, _TIF_SYSCALL_TRACE
|
|
|
++ bnez t0, handle_syscall_trace_enter
|
|
|
++check_syscall_nr:
|
|
|
++ /* Check to make sure we don't jump to a bogus syscall number. */
|
|
|
++ li t0, __NR_syscalls
|
|
|
++ la s0, sys_ni_syscall
|
|
|
++ /* Syscall number held in a7 */
|
|
|
++ bgeu a7, t0, 1f
|
|
|
++ la s0, sys_call_table
|
|
|
++ slli t0, a7, LGPTR
|
|
|
++ add s0, s0, t0
|
|
|
++ REG_L s0, 0(s0)
|
|
|
++1:
|
|
|
++ jalr s0
|
|
|
++
|
|
|
++ret_from_syscall:
|
|
|
++ /* Set user a0 to kernel a0 */
|
|
|
++ REG_S a0, PT_A0(sp)
|
|
|
++ /* Trace syscalls, but only if requested by the user. */
|
|
|
++ REG_L t0, TASK_THREAD_INFO(tp)
|
|
|
++ REG_L t0, TI_FLAGS(t0)
|
|
|
++ andi t0, t0, _TIF_SYSCALL_TRACE
|
|
|
++ bnez t0, handle_syscall_trace_exit
|
|
|
++
|
|
|
++ret_from_exception:
|
|
|
++ REG_L s0, PT_SSTATUS(sp)
|
|
|
++ csrc sstatus, SR_IE
|
|
|
++ andi s0, s0, SR_PS
|
|
|
++ bnez s0, restore_all
|
|
|
++
|
|
|
++resume_userspace:
|
|
|
++ /* Interrupts must be disabled here so flags are checked atomically */
|
|
|
++ REG_L s0, TASK_THREAD_INFO(tp)
|
|
|
++ REG_L s0, TI_FLAGS(s0) /* current_thread_info->flags */
|
|
|
++ andi s1, s0, _TIF_WORK_MASK
|
|
|
++ bnez s1, work_pending
|
|
|
++
|
|
|
++ /* Save unwound kernel stack pointer in sscratch */
|
|
|
++ addi s0, sp, PT_SIZE
|
|
|
++ csrw sscratch, s0
|
|
|
++restore_all:
|
|
|
++ RESTORE_ALL
|
|
|
++ sret
|
|
|
++
|
|
|
++work_pending:
|
|
|
++ /* Enter slow path for supplementary processing */
|
|
|
++ la ra, ret_from_exception
|
|
|
++ andi s1, s0, _TIF_NEED_RESCHED
|
|
|
++ bnez s1, work_resched
|
|
|
++work_notifysig:
|
|
|
++ /* Handle pending signals and notify-resume requests */
|
|
|
++ csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
|
|
|
++ move a0, sp /* pt_regs */
|
|
|
++ move a1, s0 /* current_thread_info->flags */
|
|
|
++ tail do_notify_resume
|
|
|
++work_resched:
|
|
|
++ tail schedule
|
|
|
++
|
|
|
++/* Slow paths for ptrace. */
|
|
|
++handle_syscall_trace_enter:
|
|
|
++ move a0, sp
|
|
|
++ call do_syscall_trace_enter
|
|
|
++ REG_L a0, PT_A0(sp)
|
|
|
++ REG_L a1, PT_A1(sp)
|
|
|
++ REG_L a2, PT_A2(sp)
|
|
|
++ REG_L a3, PT_A3(sp)
|
|
|
++ REG_L a4, PT_A4(sp)
|
|
|
++ REG_L a5, PT_A5(sp)
|
|
|
++ REG_L a6, PT_A6(sp)
|
|
|
++ REG_L a7, PT_A7(sp)
|
|
|
++ j check_syscall_nr
|
|
|
++handle_syscall_trace_exit:
|
|
|
++ move a0, sp
|
|
|
++ call do_syscall_trace_exit
|
|
|
++ j ret_from_exception
|
|
|
++
|
|
|
++END(handle_exception)
|
|
|
++
|
|
|
++ENTRY(ret_from_fork)
|
|
|
++ la ra, ret_from_exception
|
|
|
++ tail schedule_tail
|
|
|
++ENDPROC(ret_from_fork)
|
|
|
++
|
|
|
++ENTRY(ret_from_kernel_thread)
|
|
|
++ call schedule_tail
|
|
|
++ /* Call fn(arg) */
|
|
|
++ la ra, ret_from_exception
|
|
|
++ move a0, s1
|
|
|
++ jr s0
|
|
|
++ENDPROC(ret_from_kernel_thread)
|
|
|
++
|
|
|
++
|
|
|
++/*
|
|
|
++ * Integer register context switch
|
|
|
++ * The callee-saved registers must be saved and restored.
|
|
|
++ *
|
|
|
++ * a0: previous task_struct (must be preserved across the switch)
|
|
|
++ * a1: next task_struct
|
|
|
++ */
|
|
|
++ENTRY(__switch_to)
|
|
|
++ /* Save context into prev->thread */
|
|
|
++ REG_S ra, THREAD_RA(a0)
|
|
|
++ REG_S sp, THREAD_SP(a0)
|
|
|
++ REG_S s0, THREAD_S0(a0)
|
|
|
++ REG_S s1, THREAD_S1(a0)
|
|
|
++ REG_S s2, THREAD_S2(a0)
|
|
|
++ REG_S s3, THREAD_S3(a0)
|
|
|
++ REG_S s4, THREAD_S4(a0)
|
|
|
++ REG_S s5, THREAD_S5(a0)
|
|
|
++ REG_S s6, THREAD_S6(a0)
|
|
|
++ REG_S s7, THREAD_S7(a0)
|
|
|
++ REG_S s8, THREAD_S8(a0)
|
|
|
++ REG_S s9, THREAD_S9(a0)
|
|
|
++ REG_S s10, THREAD_S10(a0)
|
|
|
++ REG_S s11, THREAD_S11(a0)
|
|
|
++ /* Restore context from next->thread */
|
|
|
++ REG_L ra, THREAD_RA(a1)
|
|
|
++ REG_L sp, THREAD_SP(a1)
|
|
|
++ REG_L s0, THREAD_S0(a1)
|
|
|
++ REG_L s1, THREAD_S1(a1)
|
|
|
++ REG_L s2, THREAD_S2(a1)
|
|
|
++ REG_L s3, THREAD_S3(a1)
|
|
|
++ REG_L s4, THREAD_S4(a1)
|
|
|
++ REG_L s5, THREAD_S5(a1)
|
|
|
++ REG_L s6, THREAD_S6(a1)
|
|
|
++ REG_L s7, THREAD_S7(a1)
|
|
|
++ REG_L s8, THREAD_S8(a1)
|
|
|
++ REG_L s9, THREAD_S9(a1)
|
|
|
++ REG_L s10, THREAD_S10(a1)
|
|
|
++ REG_L s11, THREAD_S11(a1)
|
|
|
++ mv tp, a1 /* Next current pointer */
|
|
|
++ ret
|
|
|
++ENDPROC(__switch_to)
|
|
|
++
|
|
|
++ENTRY(__fstate_save)
|
|
|
++ li t1, SR_FS
|
|
|
++ csrs sstatus, t1
|
|
|
++ frcsr t0
|
|
|
++ fsd f0, THREAD_F0(a0)
|
|
|
++ fsd f1, THREAD_F1(a0)
|
|
|
++ fsd f2, THREAD_F2(a0)
|
|
|
++ fsd f3, THREAD_F3(a0)
|
|
|
++ fsd f4, THREAD_F4(a0)
|
|
|
++ fsd f5, THREAD_F5(a0)
|
|
|
++ fsd f6, THREAD_F6(a0)
|
|
|
++ fsd f7, THREAD_F7(a0)
|
|
|
++ fsd f8, THREAD_F8(a0)
|
|
|
++ fsd f9, THREAD_F9(a0)
|
|
|
++ fsd f10, THREAD_F10(a0)
|
|
|
++ fsd f11, THREAD_F11(a0)
|
|
|
++ fsd f12, THREAD_F12(a0)
|
|
|
++ fsd f13, THREAD_F13(a0)
|
|
|
++ fsd f14, THREAD_F14(a0)
|
|
|
++ fsd f15, THREAD_F15(a0)
|
|
|
++ fsd f16, THREAD_F16(a0)
|
|
|
++ fsd f17, THREAD_F17(a0)
|
|
|
++ fsd f18, THREAD_F18(a0)
|
|
|
++ fsd f19, THREAD_F19(a0)
|
|
|
++ fsd f20, THREAD_F20(a0)
|
|
|
++ fsd f21, THREAD_F21(a0)
|
|
|
++ fsd f22, THREAD_F22(a0)
|
|
|
++ fsd f23, THREAD_F23(a0)
|
|
|
++ fsd f24, THREAD_F24(a0)
|
|
|
++ fsd f25, THREAD_F25(a0)
|
|
|
++ fsd f26, THREAD_F26(a0)
|
|
|
++ fsd f27, THREAD_F27(a0)
|
|
|
++ fsd f28, THREAD_F28(a0)
|
|
|
++ fsd f29, THREAD_F29(a0)
|
|
|
++ fsd f30, THREAD_F30(a0)
|
|
|
++ fsd f31, THREAD_F31(a0)
|
|
|
++ sw t0, THREAD_FCSR(a0)
|
|
|
++ csrc sstatus, t1
|
|
|
++ ret
|
|
|
++ENDPROC(__fstate_save)
|
|
|
++
|
|
|
++ENTRY(__fstate_restore)
|
|
|
++ li t1, SR_FS
|
|
|
++ lw t0, THREAD_FCSR(a0)
|
|
|
++ csrs sstatus, t1
|
|
|
++ fld f0, THREAD_F0(a0)
|
|
|
++ fld f1, THREAD_F1(a0)
|
|
|
++ fld f2, THREAD_F2(a0)
|
|
|
++ fld f3, THREAD_F3(a0)
|
|
|
++ fld f4, THREAD_F4(a0)
|
|
|
++ fld f5, THREAD_F5(a0)
|
|
|
++ fld f6, THREAD_F6(a0)
|
|
|
++ fld f7, THREAD_F7(a0)
|
|
|
++ fld f8, THREAD_F8(a0)
|
|
|
++ fld f9, THREAD_F9(a0)
|
|
|
++ fld f10, THREAD_F10(a0)
|
|
|
++ fld f11, THREAD_F11(a0)
|
|
|
++ fld f12, THREAD_F12(a0)
|
|
|
++ fld f13, THREAD_F13(a0)
|
|
|
++ fld f14, THREAD_F14(a0)
|
|
|
++ fld f15, THREAD_F15(a0)
|
|
|
++ fld f16, THREAD_F16(a0)
|
|
|
++ fld f17, THREAD_F17(a0)
|
|
|
++ fld f18, THREAD_F18(a0)
|
|
|
++ fld f19, THREAD_F19(a0)
|
|
|
++ fld f20, THREAD_F20(a0)
|
|
|
++ fld f21, THREAD_F21(a0)
|
|
|
++ fld f22, THREAD_F22(a0)
|
|
|
++ fld f23, THREAD_F23(a0)
|
|
|
++ fld f24, THREAD_F24(a0)
|
|
|
++ fld f25, THREAD_F25(a0)
|
|
|
++ fld f26, THREAD_F26(a0)
|
|
|
++ fld f27, THREAD_F27(a0)
|
|
|
++ fld f28, THREAD_F28(a0)
|
|
|
++ fld f29, THREAD_F29(a0)
|
|
|
++ fld f30, THREAD_F30(a0)
|
|
|
++ fld f31, THREAD_F31(a0)
|
|
|
++ fscsr t0
|
|
|
++ csrc sstatus, t1
|
|
|
++ ret
|
|
|
++ENDPROC(__fstate_restore)
|
|
|
++
|
|
|
++
|
|
|
++ .section ".rodata"
|
|
|
++ /* Exception vector table */
|
|
|
++ENTRY(excp_vect_table)
|
|
|
++ PTR do_trap_insn_misaligned
|
|
|
++ PTR do_page_fault
|
|
|
++ PTR do_trap_insn_illegal
|
|
|
++ PTR do_trap_unknown
|
|
|
++ PTR do_trap_unknown
|
|
|
++ PTR do_page_fault
|
|
|
++ PTR do_trap_amo_misaligned
|
|
|
++ PTR do_page_fault
|
|
|
++ PTR 0 /* handle_syscall */
|
|
|
++ PTR do_trap_break
|
|
|
++excp_vect_table_end:
|
|
|
++END(excp_vect_table)
|
|
|
++
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/head.S linux-4.6.2.riscv/arch/riscv/kernel/head.S
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/head.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/head.S 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,77 @@
|
|
|
++#include <asm/thread_info.h>
|
|
|
++#include <asm/asm-offsets.h>
|
|
|
++#include <asm/asm.h>
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/linkage.h>
|
|
|
++#include <asm/thread_info.h>
|
|
|
++#include <asm/page.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++__INIT
|
|
|
++ENTRY(_start)
|
|
|
++ /* Load the global pointer (before any use of la) */
|
|
|
++1: auipc gp, %pcrel_hi(_gp)
|
|
|
++ addi gp, gp, %pcrel_lo(1b)
|
|
|
++
|
|
|
++ /* Disable FPU to detect illegal usage of
|
|
|
++ floating point in kernel space */
|
|
|
++ li t0, SR_FS
|
|
|
++ csrc sstatus, t0
|
|
|
++
|
|
|
++#ifdef CONFIG_RV_PUM
|
|
|
++ /* Prevent inadvertent access to user memory */
|
|
|
++ li t0, SR_PUM
|
|
|
++ csrs sstatus, t0
|
|
|
++#endif
|
|
|
++
|
|
|
++ /* See if we're the main hart */
|
|
|
++ call sbi_hart_id
|
|
|
++ bnez a0, .Lsecondary_start
|
|
|
++
|
|
|
++ /* Clear the .bss segment */
|
|
|
++ la a0, __bss_start
|
|
|
++ li a1, 0
|
|
|
++ la a2, __bss_stop
|
|
|
++ sub a2, a2, a0
|
|
|
++ call memset
|
|
|
++
|
|
|
++ /* Initialize stack pointer */
|
|
|
++ la sp, init_thread_union + THREAD_SIZE
|
|
|
++ /* Initialize current task_struct pointer */
|
|
|
++ la tp, init_task
|
|
|
++
|
|
|
++ tail start_kernel
|
|
|
++
|
|
|
++.Lsecondary_start:
|
|
|
++#ifdef CONFIG_SMP
|
|
|
++ li a1, CONFIG_NR_CPUS
|
|
|
++ bgeu a0, a1, .Lsecondary_park
|
|
|
++
|
|
|
++ la a1, __cpu_up_stack_pointer
|
|
|
++ slli a0, a0, LGREG
|
|
|
++ add a0, a0, a1
|
|
|
++
|
|
|
++.Lwait_for_cpu_up:
|
|
|
++ REG_L sp, (a0)
|
|
|
++ beqz sp, .Lwait_for_cpu_up
|
|
|
++
|
|
|
++ /* Initialize task_struct pointer */
|
|
|
++ li tp, -THREAD_SIZE
|
|
|
++ add tp, tp, sp
|
|
|
++ REG_L tp, (tp)
|
|
|
++
|
|
|
++ tail smp_callin
|
|
|
++#endif
|
|
|
++
|
|
|
++.Lsecondary_park:
|
|
|
++ /* We lack SMP support or have too many harts, so park this hart */
|
|
|
++ wfi
|
|
|
++ j .Lsecondary_park
|
|
|
++END(_start)
|
|
|
++
|
|
|
++__PAGE_ALIGNED_BSS
|
|
|
++ /* Empty zero page */
|
|
|
++ .balign PAGE_SIZE
|
|
|
++ENTRY(empty_zero_page)
|
|
|
++ .fill (empty_zero_page + PAGE_SIZE) - ., 1, 0x00
|
|
|
++END(empty_zero_page)
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/irq.c linux-4.6.2.riscv/arch/riscv/kernel/irq.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/irq.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/irq.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,102 @@
|
|
|
++#include <linux/interrupt.h>
|
|
|
++#include <linux/ftrace.h>
|
|
|
++#include <linux/seq_file.h>
|
|
|
++
|
|
|
++#include <asm/ptrace.h>
|
|
|
++#include <asm/sbi.h>
|
|
|
++#include <asm/sbi-con.h>
|
|
|
++#include <asm/smp.h>
|
|
|
++
|
|
|
++struct plic_context {
|
|
|
++ volatile int priority_threshold;
|
|
|
++ volatile int claim;
|
|
|
++};
|
|
|
++
|
|
|
++static DEFINE_PER_CPU(struct plic_context *, plic_context);
|
|
|
++static DEFINE_PER_CPU(unsigned int, irq_in_progress);
|
|
|
++
|
|
|
++static void riscv_software_interrupt(void)
|
|
|
++{
|
|
|
++ irqreturn_t ret;
|
|
|
++
|
|
|
++#ifdef CONFIG_SMP
|
|
|
++ ret = handle_ipi();
|
|
|
++ if (ret != IRQ_NONE)
|
|
|
++ return;
|
|
|
++#endif
|
|
|
++
|
|
|
++ ret = sbi_console_isr();
|
|
|
++ if (ret != IRQ_NONE)
|
|
|
++ return;
|
|
|
++
|
|
|
++ BUG();
|
|
|
++}
|
|
|
++
|
|
|
++static void plic_interrupt(void)
|
|
|
++{
|
|
|
++ unsigned int cpu = smp_processor_id();
|
|
|
++ unsigned int irq = per_cpu(plic_context, cpu)->claim;
|
|
|
++
|
|
|
++ BUG_ON(per_cpu(irq_in_progress, cpu) != 0);
|
|
|
++
|
|
|
++ if (irq) {
|
|
|
++ per_cpu(irq_in_progress, cpu) = irq;
|
|
|
++ generic_handle_irq(irq);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++asmlinkage void __irq_entry do_IRQ(unsigned int cause, struct pt_regs *regs)
|
|
|
++{
|
|
|
++ struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
++ irq_enter();
|
|
|
++
|
|
|
++ /* There are three classes of interrupt: timer, software, and
|
|
|
++ external devices. We dispatch between them here. External
|
|
|
++ device interrupts use the generic IRQ mechanisms. */
|
|
|
++ switch (cause) {
|
|
|
++ case INTERRUPT_CAUSE_TIMER:
|
|
|
++ riscv_timer_interrupt();
|
|
|
++ break;
|
|
|
++ case INTERRUPT_CAUSE_SOFTWARE:
|
|
|
++ riscv_software_interrupt();
|
|
|
++ break;
|
|
|
++ case INTERRUPT_CAUSE_EXTERNAL:
|
|
|
++ plic_interrupt();
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ BUG();
|
|
|
++ }
|
|
|
++
|
|
|
++ irq_exit();
|
|
|
++ set_irq_regs(old_regs);
|
|
|
++}
|
|
|
++
|
|
|
++static void plic_irq_mask(struct irq_data *d)
|
|
|
++{
|
|
|
++ unsigned int cpu = smp_processor_id();
|
|
|
++
|
|
|
++ BUG_ON(d->irq != per_cpu(irq_in_progress, cpu));
|
|
|
++}
|
|
|
++
|
|
|
++static void plic_irq_unmask(struct irq_data *d)
|
|
|
++{
|
|
|
++ unsigned int cpu = smp_processor_id();
|
|
|
++
|
|
|
++ BUG_ON(d->irq != per_cpu(irq_in_progress, cpu));
|
|
|
++
|
|
|
++ per_cpu(plic_context, cpu)->claim = per_cpu(irq_in_progress, cpu);
|
|
|
++ per_cpu(irq_in_progress, cpu) = 0;
|
|
|
++}
|
|
|
++
|
|
|
++struct irq_chip plic_irq_chip = {
|
|
|
++ .name = "riscv",
|
|
|
++ .irq_mask = plic_irq_mask,
|
|
|
++ .irq_mask_ack = plic_irq_mask,
|
|
|
++ .irq_unmask = plic_irq_unmask,
|
|
|
++};
|
|
|
++
|
|
|
++void __init init_IRQ(void)
|
|
|
++{
|
|
|
++ /* Enable software interrupts (and disable the others) */
|
|
|
++ csr_write(sie, SIE_SSIE);
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/Makefile linux-4.6.2.riscv/arch/riscv/kernel/Makefile
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/Makefile 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/Makefile 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,15 @@
|
|
|
++#
|
|
|
++# Makefile for the RISC-V Linux kernel
|
|
|
++#
|
|
|
++
|
|
|
++extra-y := head.o vmlinux.lds
|
|
|
++
|
|
|
++obj-y := cpu.o entry.o irq.o process.o ptrace.o reset.o setup.o \
|
|
|
++ sbi.o signal.o syscall_table.o sys_riscv.o time.o traps.o \
|
|
|
++ stacktrace.o vdso.o vdso/
|
|
|
++
|
|
|
++obj-$(CONFIG_SMP) += smpboot.o smp.o
|
|
|
++obj-$(CONFIG_SBI_CONSOLE) += sbi-con.o
|
|
|
++obj-$(CONFIG_PCI) += pci.o
|
|
|
++
|
|
|
++clean:
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/pci.c linux-4.6.2.riscv/arch/riscv/kernel/pci.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/pci.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/pci.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,23 @@
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/io.h>
|
|
|
++#include <linux/kernel.h>
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/slab.h>
|
|
|
++#include <linux/pci.h>
|
|
|
++
|
|
|
++/*
|
|
|
++ * Called after each bus is probed, but before its children are examined
|
|
|
++ */
|
|
|
++void pcibios_fixup_bus(struct pci_bus *bus)
|
|
|
++{
|
|
|
++ /* nothing to do, expected to be removed in the future */
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * We don't have to worry about legacy ISA devices, so nothing to do here
|
|
|
++ */
|
|
|
++resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
|
|
++ resource_size_t size, resource_size_t align)
|
|
|
++{
|
|
|
++ return res->start;
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/process.c linux-4.6.2.riscv/arch/riscv/kernel/process.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/process.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/process.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,106 @@
|
|
|
++#include <linux/kernel.h>
|
|
|
++#include <linux/sched.h>
|
|
|
++#include <linux/tick.h>
|
|
|
++#include <linux/ptrace.h>
|
|
|
++
|
|
|
++#include <asm/unistd.h>
|
|
|
++#include <asm/uaccess.h>
|
|
|
++#include <asm/processor.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++#include <asm/string.h>
|
|
|
++#include <asm/switch_to.h>
|
|
|
++
|
|
|
++extern asmlinkage void ret_from_fork(void);
|
|
|
++extern asmlinkage void ret_from_kernel_thread(void);
|
|
|
++
|
|
|
++void arch_cpu_idle(void)
|
|
|
++{
|
|
|
++ wait_for_interrupt();
|
|
|
++ local_irq_enable();
|
|
|
++}
|
|
|
++
|
|
|
++void show_regs(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ show_regs_print_info(KERN_DEFAULT);
|
|
|
++
|
|
|
++ printk("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
|
|
|
++ regs->sepc, regs->ra, regs->sp);
|
|
|
++ printk(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
|
|
|
++ regs->gp, regs->tp, regs->t0);
|
|
|
++ printk(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
|
|
|
++ regs->t1, regs->t2, regs->s0);
|
|
|
++ printk(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
|
|
|
++ regs->s1, regs->a0, regs->a1);
|
|
|
++ printk(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
|
|
|
++ regs->a2, regs->a3, regs->a4);
|
|
|
++ printk(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
|
|
|
++ regs->a5, regs->a6, regs->a7);
|
|
|
++ printk(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
|
|
|
++ regs->s2, regs->s3, regs->s4);
|
|
|
++ printk(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
|
|
|
++ regs->s5, regs->s6, regs->s7);
|
|
|
++ printk(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
|
|
|
++ regs->s8, regs->s9, regs->s10);
|
|
|
++ printk(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
|
|
|
++ regs->s11, regs->t3, regs->t4);
|
|
|
++ printk(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
|
|
|
++ regs->t5, regs->t6);
|
|
|
++
|
|
|
++ printk("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n",
|
|
|
++ regs->sstatus, regs->sbadaddr, regs->scause);
|
|
|
++}
|
|
|
++
|
|
|
++void start_thread(struct pt_regs *regs, unsigned long pc,
|
|
|
++ unsigned long sp)
|
|
|
++{
|
|
|
++ regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
|
|
|
++ regs->sepc = pc;
|
|
|
++ regs->sp = sp;
|
|
|
++ set_fs(USER_DS);
|
|
|
++}
|
|
|
++
|
|
|
++void flush_thread(void)
|
|
|
++{
|
|
|
++ /* Reset FPU context
|
|
|
++ * frm: round to nearest, ties to even (IEEE default)
|
|
|
++ * fflags: accrued exceptions cleared
|
|
|
++ */
|
|
|
++ memset(¤t->thread.fstate, 0,
|
|
|
++ sizeof(struct user_fpregs_struct));
|
|
|
++}
|
|
|
++
|
|
|
++int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|
|
++{
|
|
|
++ fstate_save(src, task_pt_regs(src));
|
|
|
++ *dst = *src;
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|
|
++ unsigned long arg, struct task_struct *p)
|
|
|
++{
|
|
|
++ struct pt_regs *childregs = task_pt_regs(p);
|
|
|
++
|
|
|
++ /* p->thread holds context to be restored by __switch_to() */
|
|
|
++ if (unlikely(p->flags & PF_KTHREAD)) {
|
|
|
++ /* Kernel thread */
|
|
|
++ const register unsigned long gp __asm__ ("gp");
|
|
|
++ memset(childregs, 0, sizeof(struct pt_regs));
|
|
|
++ childregs->gp = gp;
|
|
|
++ childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
|
|
|
++
|
|
|
++ p->thread.ra = (unsigned long)ret_from_kernel_thread;
|
|
|
++ p->thread.s[0] = usp; /* fn */
|
|
|
++ p->thread.s[1] = arg;
|
|
|
++ } else {
|
|
|
++ *childregs = *(current_pt_regs());
|
|
|
++ if (usp) /* User fork */
|
|
|
++ childregs->sp = usp;
|
|
|
++ if (clone_flags & CLONE_SETTLS)
|
|
|
++ childregs->tp = childregs->a5;
|
|
|
++ childregs->a0 = 0; /* Return value of fork() */
|
|
|
++ p->thread.ra = (unsigned long)ret_from_fork;
|
|
|
++ }
|
|
|
++ p->thread.sp = (unsigned long)childregs; /* kernel sp */
|
|
|
++ return 0;
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/ptrace.c linux-4.6.2.riscv/arch/riscv/kernel/ptrace.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/ptrace.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/ptrace.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,128 @@
|
|
|
++#include <asm/ptrace.h>
|
|
|
++#include <asm/syscall.h>
|
|
|
++#include <asm/thread_info.h>
|
|
|
++#include <linux/ptrace.h>
|
|
|
++#include <linux/elf.h>
|
|
|
++#include <linux/regset.h>
|
|
|
++#include <linux/tracehook.h>
|
|
|
++#include <trace/events/syscalls.h>
|
|
|
++
|
|
|
++enum riscv_regset {
|
|
|
++ REGSET_X,
|
|
|
++};
|
|
|
++
|
|
|
++/*
|
|
|
++ * Get registers from task and ready the result for userspace.
|
|
|
++ */
|
|
|
++static char *getregs(struct task_struct *child, struct pt_regs *uregs)
|
|
|
++{
|
|
|
++ *uregs = *task_pt_regs(child);
|
|
|
++ return (char *)uregs;
|
|
|
++}
|
|
|
++
|
|
|
++/* Put registers back to task. */
|
|
|
++static void putregs(struct task_struct *child, struct pt_regs *uregs)
|
|
|
++{
|
|
|
++ struct pt_regs *regs = task_pt_regs(child);
|
|
|
++ *regs = *uregs;
|
|
|
++}
|
|
|
++
|
|
|
++static int riscv_gpr_get(struct task_struct *target,
|
|
|
++ const struct user_regset *regset,
|
|
|
++ unsigned int pos, unsigned int count,
|
|
|
++ void *kbuf, void __user *ubuf)
|
|
|
++{
|
|
|
++ struct pt_regs regs;
|
|
|
++
|
|
|
++ getregs(target, ®s);
|
|
|
++
|
|
|
++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, ®s, 0,
|
|
|
++ sizeof(regs));
|
|
|
++}
|
|
|
++
|
|
|
++static int riscv_gpr_set(struct task_struct *target,
|
|
|
++ const struct user_regset *regset,
|
|
|
++ unsigned int pos, unsigned int count,
|
|
|
++ const void *kbuf, const void __user *ubuf)
|
|
|
++{
|
|
|
++ int ret;
|
|
|
++ struct pt_regs regs;
|
|
|
++
|
|
|
++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0,
|
|
|
++ sizeof(regs));
|
|
|
++ if (ret)
|
|
|
++ return ret;
|
|
|
++
|
|
|
++ putregs(target, ®s);
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++
|
|
|
++static const struct user_regset riscv_user_regset[] = {
|
|
|
++ [REGSET_X] = {
|
|
|
++ .core_note_type = NT_PRSTATUS,
|
|
|
++ .n = ELF_NGREG,
|
|
|
++ .size = sizeof(elf_greg_t),
|
|
|
++ .align = sizeof(elf_greg_t),
|
|
|
++ .get = &riscv_gpr_get,
|
|
|
++ .set = &riscv_gpr_set,
|
|
|
++ },
|
|
|
++};
|
|
|
++
|
|
|
++static const struct user_regset_view riscv_user_native_view = {
|
|
|
++ .name = "riscv",
|
|
|
++ .e_machine = EM_RISCV,
|
|
|
++ .regsets = riscv_user_regset,
|
|
|
++ .n = ARRAY_SIZE(riscv_user_regset),
|
|
|
++};
|
|
|
++
|
|
|
++const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
|
|
++{
|
|
|
++ return &riscv_user_native_view;
|
|
|
++}
|
|
|
++
|
|
|
++void ptrace_disable(struct task_struct *child)
|
|
|
++{
|
|
|
++ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
|
|
++}
|
|
|
++
|
|
|
++long arch_ptrace(struct task_struct *child, long request,
|
|
|
++ unsigned long addr, unsigned long data)
|
|
|
++{
|
|
|
++ long ret = -EIO;
|
|
|
++
|
|
|
++ switch (request) {
|
|
|
++ default:
|
|
|
++ ret = ptrace_request(child, request, addr, data);
|
|
|
++ break;
|
|
|
++ }
|
|
|
++
|
|
|
++ return ret;
|
|
|
++}
|
|
|
++
|
|
|
++/* Allows PTRACE_SYSCALL to work. These are called from entry.S in
|
|
|
++ * {handle,ret_from}_syscall. */
|
|
|
++void do_syscall_trace_enter(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
|
|
|
++ if (tracehook_report_syscall_entry(regs))
|
|
|
++ syscall_set_nr(current, regs, -1);
|
|
|
++ }
|
|
|
++
|
|
|
++#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
|
|
|
++ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
|
|
++ trace_sys_enter(regs, syscall_get_nr(current, regs));
|
|
|
++#endif
|
|
|
++}
|
|
|
++
|
|
|
++void do_syscall_trace_exit(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ if (test_thread_flag(TIF_SYSCALL_TRACE))
|
|
|
++ tracehook_report_syscall_exit(regs, 0);
|
|
|
++
|
|
|
++#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
|
|
|
++ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
|
|
++ trace_sys_exit(regs, regs->regs[0]);
|
|
|
++#endif
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/reset.c linux-4.6.2.riscv/arch/riscv/kernel/reset.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/reset.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/reset.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,19 @@
|
|
|
++#include <linux/reboot.h>
|
|
|
++#include <linux/export.h>
|
|
|
++#include <asm/sbi.h>
|
|
|
++
|
|
|
++void (*pm_power_off)(void) = machine_power_off;
|
|
|
++EXPORT_SYMBOL(pm_power_off);
|
|
|
++
|
|
|
++void machine_restart(char *cmd)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++void machine_halt(void)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++void machine_power_off(void)
|
|
|
++{
|
|
|
++ sbi_shutdown();
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/sbi-con.c linux-4.6.2.riscv/arch/riscv/kernel/sbi-con.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/sbi-con.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/sbi-con.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,155 @@
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/console.h>
|
|
|
++#include <linux/tty.h>
|
|
|
++#include <linux/tty_flip.h>
|
|
|
++#include <linux/tty_driver.h>
|
|
|
++#include <linux/module.h>
|
|
|
++#include <linux/interrupt.h>
|
|
|
++
|
|
|
++#include <asm/sbi.h>
|
|
|
++
|
|
|
++static DEFINE_SPINLOCK(sbi_tty_port_lock);
|
|
|
++static struct tty_port sbi_tty_port;
|
|
|
++static struct tty_driver *sbi_tty_driver;
|
|
|
++
|
|
|
++irqreturn_t sbi_console_isr(void)
|
|
|
++{
|
|
|
++ int ch = sbi_console_getchar();
|
|
|
++ if (ch < 0)
|
|
|
++ return IRQ_NONE;
|
|
|
++
|
|
|
++ spin_lock(&sbi_tty_port_lock);
|
|
|
++ tty_insert_flip_char(&sbi_tty_port, ch, TTY_NORMAL);
|
|
|
++ tty_flip_buffer_push(&sbi_tty_port);
|
|
|
++ spin_unlock(&sbi_tty_port_lock);
|
|
|
++
|
|
|
++ return IRQ_HANDLED;
|
|
|
++}
|
|
|
++
|
|
|
++static int sbi_tty_open(struct tty_struct *tty, struct file *filp)
|
|
|
++{
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static int sbi_tty_write(struct tty_struct *tty,
|
|
|
++ const unsigned char *buf, int count)
|
|
|
++{
|
|
|
++ const unsigned char *end;
|
|
|
++
|
|
|
++ for (end = buf + count; buf < end; buf++) {
|
|
|
++ sbi_console_putchar(*buf);
|
|
|
++ }
|
|
|
++ return count;
|
|
|
++}
|
|
|
++
|
|
|
++static int sbi_tty_write_room(struct tty_struct *tty)
|
|
|
++{
|
|
|
++ return 1024; /* arbitrary */
|
|
|
++}
|
|
|
++
|
|
|
++static const struct tty_operations sbi_tty_ops = {
|
|
|
++ .open = sbi_tty_open,
|
|
|
++ .write = sbi_tty_write,
|
|
|
++ .write_room = sbi_tty_write_room,
|
|
|
++};
|
|
|
++
|
|
|
++
|
|
|
++static void sbi_console_write(struct console *co, const char *buf, unsigned n)
|
|
|
++{
|
|
|
++ for ( ; n > 0; n--, buf++) {
|
|
|
++ if (*buf == '\n')
|
|
|
++ sbi_console_putchar('\r');
|
|
|
++ sbi_console_putchar(*buf);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++static struct tty_driver *sbi_console_device(struct console *co, int *index)
|
|
|
++{
|
|
|
++ *index = co->index;
|
|
|
++ return sbi_tty_driver;
|
|
|
++}
|
|
|
++
|
|
|
++static int sbi_console_setup(struct console *co, char *options)
|
|
|
++{
|
|
|
++ return co->index != 0 ? -ENODEV : 0;
|
|
|
++}
|
|
|
++
|
|
|
++static struct console sbi_console = {
|
|
|
++ .name = "sbi_console",
|
|
|
++ .write = sbi_console_write,
|
|
|
++ .device = sbi_console_device,
|
|
|
++ .setup = sbi_console_setup,
|
|
|
++ .flags = CON_PRINTBUFFER,
|
|
|
++ .index = -1
|
|
|
++};
|
|
|
++
|
|
|
++static int __init sbi_console_init(void)
|
|
|
++{
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ register_console(&sbi_console);
|
|
|
++
|
|
|
++ sbi_tty_driver = tty_alloc_driver(1,
|
|
|
++ TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV);
|
|
|
++ if (unlikely(IS_ERR(sbi_tty_driver)))
|
|
|
++ return PTR_ERR(sbi_tty_driver);
|
|
|
++
|
|
|
++ sbi_tty_driver->driver_name = "sbi";
|
|
|
++ sbi_tty_driver->name = "ttySBI";
|
|
|
++ sbi_tty_driver->major = TTY_MAJOR;
|
|
|
++ sbi_tty_driver->minor_start = 0;
|
|
|
++ sbi_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
|
|
|
++ sbi_tty_driver->subtype = SERIAL_TYPE_NORMAL;
|
|
|
++ sbi_tty_driver->init_termios = tty_std_termios;
|
|
|
++ tty_set_operations(sbi_tty_driver, &sbi_tty_ops);
|
|
|
++
|
|
|
++ tty_port_init(&sbi_tty_port);
|
|
|
++ tty_port_link_device(&sbi_tty_port, sbi_tty_driver, 0);
|
|
|
++
|
|
|
++ ret = tty_register_driver(sbi_tty_driver);
|
|
|
++ if (unlikely(ret))
|
|
|
++ goto out_tty_put;
|
|
|
++
|
|
|
++ /* Poll the console once, which will trigger future interrupts */
|
|
|
++ sbi_console_isr();
|
|
|
++
|
|
|
++ return ret;
|
|
|
++
|
|
|
++out_tty_put:
|
|
|
++ put_tty_driver(sbi_tty_driver);
|
|
|
++ return ret;
|
|
|
++}
|
|
|
++
|
|
|
++static void __exit sbi_console_exit(void)
|
|
|
++{
|
|
|
++ tty_unregister_driver(sbi_tty_driver);
|
|
|
++ put_tty_driver(sbi_tty_driver);
|
|
|
++}
|
|
|
++
|
|
|
++module_init(sbi_console_init);
|
|
|
++module_exit(sbi_console_exit);
|
|
|
++
|
|
|
++MODULE_DESCRIPTION("RISC-V SBI console driver");
|
|
|
++MODULE_LICENSE("GPL");
|
|
|
++
|
|
|
++#ifdef CONFIG_EARLY_PRINTK
|
|
|
++
|
|
|
++static struct console early_console_dev __initdata = {
|
|
|
++ .name = "early",
|
|
|
++ .write = sbi_console_write,
|
|
|
++ .flags = CON_PRINTBUFFER | CON_BOOT,
|
|
|
++ .index = -1
|
|
|
++};
|
|
|
++
|
|
|
++static int __init setup_early_printk(char *str)
|
|
|
++{
|
|
|
++ if (early_console == NULL) {
|
|
|
++ early_console = &early_console_dev;
|
|
|
++ register_console(early_console);
|
|
|
++ }
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++early_param("earlyprintk", setup_early_printk);
|
|
|
++
|
|
|
++#endif
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/sbi.S linux-4.6.2.riscv/arch/riscv/kernel/sbi.S
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/sbi.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/sbi.S 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,15 @@
|
|
|
++.globl sbi_hart_id; sbi_hart_id = -2048
|
|
|
++.globl sbi_num_harts; sbi_num_harts = -2032
|
|
|
++.globl sbi_query_memory; sbi_query_memory = -2016
|
|
|
++.globl sbi_console_putchar; sbi_console_putchar = -2000
|
|
|
++.globl sbi_console_getchar; sbi_console_getchar = -1984
|
|
|
++.globl sbi_send_ipi; sbi_send_ipi = -1952
|
|
|
++.globl sbi_clear_ipi; sbi_clear_ipi = -1936
|
|
|
++.globl sbi_timebase; sbi_timebase = -1920
|
|
|
++.globl sbi_shutdown; sbi_shutdown = -1904
|
|
|
++.globl sbi_set_timer; sbi_set_timer = -1888
|
|
|
++.globl sbi_mask_interrupt; sbi_mask_interrupt = -1872
|
|
|
++.globl sbi_unmask_interrupt; sbi_unmask_interrupt = -1856
|
|
|
++.globl sbi_remote_sfence_vm; sbi_remote_sfence_vm = -1840
|
|
|
++.globl sbi_remote_sfence_vm_range; sbi_remote_sfence_vm_range = -1824
|
|
|
++.globl sbi_remote_fence_i; sbi_remote_fence_i = -1808
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/setup.c linux-4.6.2.riscv/arch/riscv/kernel/setup.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/setup.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/setup.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,161 @@
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/memblock.h>
|
|
|
++#include <linux/sched.h>
|
|
|
++#include <linux/initrd.h>
|
|
|
++#include <linux/console.h>
|
|
|
++#include <linux/screen_info.h>
|
|
|
++
|
|
|
++#include <asm/setup.h>
|
|
|
++#include <asm/sections.h>
|
|
|
++#include <asm/pgtable.h>
|
|
|
++#include <asm/smp.h>
|
|
|
++#include <asm/sbi.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_DUMMY_CONSOLE
|
|
|
++struct screen_info screen_info = {
|
|
|
++ .orig_video_lines = 30,
|
|
|
++ .orig_video_cols = 80,
|
|
|
++ .orig_video_mode = 0,
|
|
|
++ .orig_video_ega_bx = 0,
|
|
|
++ .orig_video_isVGA = 1,
|
|
|
++ .orig_video_points = 8
|
|
|
++};
|
|
|
++#endif
|
|
|
++
|
|
|
++static char __initdata command_line[COMMAND_LINE_SIZE];
|
|
|
++#ifdef CONFIG_CMDLINE_BOOL
|
|
|
++static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
|
|
|
++#endif /* CONFIG_CMDLINE_BOOL */
|
|
|
++
|
|
|
++unsigned long va_pa_offset;
|
|
|
++unsigned long pfn_base;
|
|
|
++
|
|
|
++#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
++static void __init setup_initrd(void)
|
|
|
++{
|
|
|
++ extern char __initramfs_start[];
|
|
|
++ extern unsigned long __initramfs_size;
|
|
|
++ unsigned long size;
|
|
|
++
|
|
|
++ if (__initramfs_size > 0) {
|
|
|
++ initrd_start = (unsigned long)(&__initramfs_start);
|
|
|
++ initrd_end = initrd_start + __initramfs_size;
|
|
|
++ }
|
|
|
++
|
|
|
++ if (initrd_start >= initrd_end) {
|
|
|
++ printk(KERN_INFO "initrd not found or empty");
|
|
|
++ goto disable;
|
|
|
++ }
|
|
|
++ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
|
|
++ printk(KERN_ERR "initrd extends beyond end of memory");
|
|
|
++ goto disable;
|
|
|
++ }
|
|
|
++
|
|
|
++ size = initrd_end - initrd_start;
|
|
|
++ memblock_reserve(__pa(initrd_start), size);
|
|
|
++ initrd_below_start_ok = 1;
|
|
|
++
|
|
|
++ printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n",
|
|
|
++ (void *)(initrd_start), size);
|
|
|
++ return;
|
|
|
++disable:
|
|
|
++ printk(KERN_CONT " - disabling initrd\n");
|
|
|
++ initrd_start = 0;
|
|
|
++ initrd_end = 0;
|
|
|
++}
|
|
|
++#endif /* CONFIG_BLK_DEV_INITRD */
|
|
|
++
|
|
|
++static resource_size_t __initdata mem_size;
|
|
|
++
|
|
|
++/* Parse "mem=nn[KkMmGg]" */
|
|
|
++static int __init early_mem(char *p)
|
|
|
++{
|
|
|
++ if (!p)
|
|
|
++ return -EINVAL;
|
|
|
++ mem_size = memparse(p, &p) & PMD_MASK;
|
|
|
++ if (mem_size == 0)
|
|
|
++ return -EINVAL;
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++early_param("mem", early_mem);
|
|
|
++
|
|
|
++static void __init reserve_boot_page_table(pte_t *table)
|
|
|
++{
|
|
|
++ unsigned long i;
|
|
|
++
|
|
|
++ memblock_reserve(__pa(table), PAGE_SIZE);
|
|
|
++
|
|
|
++ for (i = 0; i < PTRS_PER_PTE; i++) {
|
|
|
++ if (pte_present(table[i]) && !pte_huge(table[i]))
|
|
|
++ reserve_boot_page_table(pfn_to_virt(pte_pfn(table[i])));
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++static void __init setup_bootmem(void)
|
|
|
++{
|
|
|
++ unsigned long ret;
|
|
|
++ memory_block_info info;
|
|
|
++
|
|
|
++ ret = sbi_query_memory(0, &info);
|
|
|
++ BUG_ON(ret != 0);
|
|
|
++ BUG_ON((info.base & ~PMD_MASK) != 0);
|
|
|
++ BUG_ON((info.size & ~PMD_MASK) != 0);
|
|
|
++ pr_info("Available physical memory: %ldMB\n", info.size >> 20);
|
|
|
++
|
|
|
++ /* The kernel image is mapped at VA=PAGE_OFFSET and PA=info.base */
|
|
|
++ va_pa_offset = PAGE_OFFSET - info.base;
|
|
|
++ pfn_base = PFN_DOWN(info.base);
|
|
|
++
|
|
|
++ if ((mem_size != 0) && (mem_size < info.size)) {
|
|
|
++ memblock_enforce_memory_limit(mem_size);
|
|
|
++ info.size = mem_size;
|
|
|
++ pr_notice("Physical memory usage limited to %lluMB\n",
|
|
|
++ (unsigned long long)(mem_size >> 20));
|
|
|
++ }
|
|
|
++ set_max_mapnr(PFN_DOWN(info.size));
|
|
|
++ max_low_pfn = PFN_DOWN(info.base + info.size);
|
|
|
++
|
|
|
++#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
++ setup_initrd();
|
|
|
++#endif /* CONFIG_BLK_DEV_INITRD */
|
|
|
++
|
|
|
++ memblock_reserve(info.base, __pa(_end) - info.base);
|
|
|
++ reserve_boot_page_table(pfn_to_virt(csr_read(sptbr)));
|
|
|
++ memblock_allow_resize();
|
|
|
++}
|
|
|
++
|
|
|
++void __init setup_arch(char **cmdline_p)
|
|
|
++{
|
|
|
++#ifdef CONFIG_CMDLINE_BOOL
|
|
|
++#ifdef CONFIG_CMDLINE_OVERRIDE
|
|
|
++ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
|
|
++#else
|
|
|
++ if (builtin_cmdline[0] != '\0') {
|
|
|
++ /* Append bootloader command line to built-in */
|
|
|
++ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
|
|
|
++ strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
|
|
|
++ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
|
|
++ }
|
|
|
++#endif /* CONFIG_CMDLINE_OVERRIDE */
|
|
|
++#endif /* CONFIG_CMDLINE_BOOL */
|
|
|
++ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
|
|
|
++ *cmdline_p = command_line;
|
|
|
++
|
|
|
++ parse_early_param();
|
|
|
++
|
|
|
++ init_mm.start_code = (unsigned long) _stext;
|
|
|
++ init_mm.end_code = (unsigned long) _etext;
|
|
|
++ init_mm.end_data = (unsigned long) _edata;
|
|
|
++ init_mm.brk = (unsigned long) _end;
|
|
|
++
|
|
|
++ setup_bootmem();
|
|
|
++#ifdef CONFIG_SMP
|
|
|
++ setup_smp();
|
|
|
++#endif
|
|
|
++ paging_init();
|
|
|
++
|
|
|
++#ifdef CONFIG_DUMMY_CONSOLE
|
|
|
++ conswitchp = &dummy_con;
|
|
|
++#endif
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/signal.c linux-4.6.2.riscv/arch/riscv/kernel/signal.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/signal.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/signal.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,236 @@
|
|
|
++#include <linux/signal.h>
|
|
|
++#include <linux/uaccess.h>
|
|
|
++#include <linux/syscalls.h>
|
|
|
++#include <linux/tracehook.h>
|
|
|
++#include <linux/linkage.h>
|
|
|
++
|
|
|
++#include <asm/ucontext.h>
|
|
|
++#include <asm/vdso.h>
|
|
|
++#include <asm/switch_to.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++#define DEBUG_SIG 0
|
|
|
++
|
|
|
++struct rt_sigframe {
|
|
|
++ struct siginfo info;
|
|
|
++ struct ucontext uc;
|
|
|
++};
|
|
|
++
|
|
|
++static long restore_sigcontext(struct pt_regs *regs,
|
|
|
++ struct sigcontext __user *sc)
|
|
|
++{
|
|
|
++ struct task_struct *task = current;
|
|
|
++ long err;
|
|
|
++ /* sc_regs is structured the same as the start of pt_regs */
|
|
|
++ err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
|
|
|
++ err |= __copy_from_user(&task->thread.fstate, &sc->sc_fpregs,
|
|
|
++ sizeof(sc->sc_fpregs));
|
|
|
++ if (likely(!err))
|
|
|
++ fstate_restore(task, regs);
|
|
|
++ return err;
|
|
|
++}
|
|
|
++
|
|
|
++SYSCALL_DEFINE0(rt_sigreturn)
|
|
|
++{
|
|
|
++ struct pt_regs *regs = current_pt_regs();
|
|
|
++ struct rt_sigframe __user *frame;
|
|
|
++ struct task_struct *task;
|
|
|
++ sigset_t set;
|
|
|
++
|
|
|
++ /* Always make any pending restarted system calls return -EINTR */
|
|
|
++ current->restart_block.fn = do_no_restart_syscall;
|
|
|
++
|
|
|
++ frame = (struct rt_sigframe __user *)regs->sp;
|
|
|
++
|
|
|
++ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
|
|
++ goto badframe;
|
|
|
++
|
|
|
++ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
|
|
++ goto badframe;
|
|
|
++
|
|
|
++ set_current_blocked(&set);
|
|
|
++
|
|
|
++ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
|
|
|
++ goto badframe;
|
|
|
++
|
|
|
++ if (restore_altstack(&frame->uc.uc_stack))
|
|
|
++ goto badframe;
|
|
|
++
|
|
|
++ return regs->a0;
|
|
|
++
|
|
|
++badframe:
|
|
|
++ task = current;
|
|
|
++ if (show_unhandled_signals) {
|
|
|
++ pr_info_ratelimited("%s[%d]: bad frame in %s: "
|
|
|
++ "frame=%p pc=%p sp=%p\n",
|
|
|
++ task->comm, task_pid_nr(task), __func__,
|
|
|
++ frame, (void *)regs->sepc, (void *)regs->sp);
|
|
|
++ }
|
|
|
++ force_sig(SIGSEGV, task);
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static long setup_sigcontext(struct sigcontext __user *sc,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ struct task_struct *task = current;
|
|
|
++ long err;
|
|
|
++ /* sc_regs is structured the same as the start of pt_regs */
|
|
|
++ err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
|
|
|
++ fstate_save(task, regs);
|
|
|
++ err |= __copy_to_user(&sc->sc_fpregs, &task->thread.fstate,
|
|
|
++ sizeof(sc->sc_fpregs));
|
|
|
++ return err;
|
|
|
++}
|
|
|
++
|
|
|
++static inline void __user *get_sigframe(struct ksignal *ksig,
|
|
|
++ struct pt_regs *regs, size_t framesize)
|
|
|
++{
|
|
|
++ unsigned long sp;
|
|
|
++ /* Default to using normal stack */
|
|
|
++ sp = regs->sp;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * If we are on the alternate signal stack and would overflow it, don't.
|
|
|
++ * Return an always-bogus address instead so we will die with SIGSEGV.
|
|
|
++ */
|
|
|
++ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
|
|
|
++ return (void __user __force *)(-1UL);
|
|
|
++
|
|
|
++ /* This is the X/Open sanctioned signal stack switching. */
|
|
|
++ sp = sigsp(sp, ksig) - framesize;
|
|
|
++
|
|
|
++ /* Align the stack frame. */
|
|
|
++ sp &= ~0xfUL;
|
|
|
++
|
|
|
++ return (void __user *)sp;
|
|
|
++}
|
|
|
++
|
|
|
++
|
|
|
++static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
|
|
++ struct pt_regs *regs)
|
|
|
++{
|
|
|
++ struct rt_sigframe __user *frame;
|
|
|
++ long err = 0;
|
|
|
++
|
|
|
++ frame = get_sigframe(ksig, regs, sizeof(*frame));
|
|
|
++ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
|
|
++ return -EFAULT;
|
|
|
++
|
|
|
++ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
|
|
++
|
|
|
++ /* Create the ucontext. */
|
|
|
++ err |= __put_user(0, &frame->uc.uc_flags);
|
|
|
++ err |= __put_user(NULL, &frame->uc.uc_link);
|
|
|
++ err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
|
|
|
++ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
|
|
|
++ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
|
|
++ if (err)
|
|
|
++ return -EFAULT;
|
|
|
++
|
|
|
++ /* Set up to return from userspace. */
|
|
|
++ regs->ra = (unsigned long)VDSO_SYMBOL(
|
|
|
++ current->mm->context.vdso, rt_sigreturn);
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Set up registers for signal handler.
|
|
|
++ * Registers that we don't modify keep the value they had from
|
|
|
++ * user-space at the time we took the signal.
|
|
|
++ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
|
|
|
++ * since some things rely on this (e.g. glibc's debug/segfault.c).
|
|
|
++ */
|
|
|
++ regs->sepc = (unsigned long)ksig->ka.sa.sa_handler;
|
|
|
++ regs->sp = (unsigned long)frame;
|
|
|
++ regs->a0 = ksig->sig; /* a0: signal number */
|
|
|
++ regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
|
|
|
++ regs->a2 = (unsigned long)(&frame->uc); /* a2: ucontext pointer */
|
|
|
++
|
|
|
++#if DEBUG_SIG
|
|
|
++ pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
|
|
|
++ current->comm, task_pid_nr(current), ksig->sig,
|
|
|
++ (void *)regs->sepc, (void *)regs->ra, frame);
|
|
|
++#endif
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|
|
++{
|
|
|
++ sigset_t *oldset = sigmask_to_save();
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ /* Are we from a system call? */
|
|
|
++ if (regs->scause == EXC_SYSCALL) {
|
|
|
++ /* If so, check system call restarting.. */
|
|
|
++ switch (regs->a0) {
|
|
|
++ case -ERESTART_RESTARTBLOCK:
|
|
|
++ case -ERESTARTNOHAND:
|
|
|
++ regs->a0 = -EINTR;
|
|
|
++ break;
|
|
|
++
|
|
|
++ case -ERESTARTSYS:
|
|
|
++ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
|
|
|
++ regs->a0 = -EINTR;
|
|
|
++ break;
|
|
|
++ }
|
|
|
++ /* fallthrough */
|
|
|
++ case -ERESTARTNOINTR:
|
|
|
++ regs->sepc -= 0x4;
|
|
|
++ break;
|
|
|
++ }
|
|
|
++ }
|
|
|
++
|
|
|
++ /* Set up the stack frame */
|
|
|
++ ret = setup_rt_frame(ksig, oldset, regs);
|
|
|
++
|
|
|
++ signal_setup_done(ret, ksig, 0);
|
|
|
++}
|
|
|
++
|
|
|
++static void do_signal(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ struct ksignal ksig;
|
|
|
++
|
|
|
++ if (get_signal(&ksig)) {
|
|
|
++ /* Actually deliver the signal */
|
|
|
++ handle_signal(&ksig, regs);
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ /* Did we come from a system call? */
|
|
|
++ if (regs->scause == EXC_SYSCALL) {
|
|
|
++ /* Restart the system call - no handlers present */
|
|
|
++ switch (regs->a0) {
|
|
|
++ case -ERESTARTNOHAND:
|
|
|
++ case -ERESTARTSYS:
|
|
|
++ case -ERESTARTNOINTR:
|
|
|
++ regs->sepc -= 0x4;
|
|
|
++ break;
|
|
|
++ case -ERESTART_RESTARTBLOCK:
|
|
|
++ regs->a7 = __NR_restart_syscall;
|
|
|
++ regs->sepc -= 0x4;
|
|
|
++ break;
|
|
|
++ }
|
|
|
++ }
|
|
|
++
|
|
|
++ /* If there is no signal to deliver, we just put the saved
|
|
|
++ sigmask back. */
|
|
|
++ restore_saved_sigmask();
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * notification of userspace execution resumption
|
|
|
++ * - triggered by the _TIF_WORK_MASK flags
|
|
|
++ */
|
|
|
++asmlinkage void do_notify_resume(struct pt_regs *regs,
|
|
|
++ unsigned long thread_info_flags)
|
|
|
++{
|
|
|
++ /* Handle pending signal delivery */
|
|
|
++ if (thread_info_flags & _TIF_SIGPENDING) {
|
|
|
++ do_signal(regs);
|
|
|
++ }
|
|
|
++
|
|
|
++ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
|
|
++ clear_thread_flag(TIF_NOTIFY_RESUME);
|
|
|
++ tracehook_notify_resume(regs);
|
|
|
++ }
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/smpboot.c linux-4.6.2.riscv/arch/riscv/kernel/smpboot.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/smpboot.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/smpboot.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,72 @@
|
|
|
++#include <linux/module.h>
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/kernel.h>
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/sched.h>
|
|
|
++#include <linux/kernel_stat.h>
|
|
|
++#include <linux/notifier.h>
|
|
|
++#include <linux/cpu.h>
|
|
|
++#include <linux/percpu.h>
|
|
|
++#include <linux/delay.h>
|
|
|
++#include <linux/err.h>
|
|
|
++#include <linux/irq.h>
|
|
|
++#include <asm/mmu_context.h>
|
|
|
++#include <asm/tlbflush.h>
|
|
|
++#include <asm/sections.h>
|
|
|
++#include <asm/sbi.h>
|
|
|
++
|
|
|
++void *__cpu_up_stack_pointer[NR_CPUS];
|
|
|
++
|
|
|
++void __init smp_prepare_boot_cpu(void)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++void __init setup_smp(void)
|
|
|
++{
|
|
|
++ int i, num_cpus = sbi_num_harts();
|
|
|
++
|
|
|
++ for (i = 0; i < min(num_cpus, NR_CPUS); i++) {
|
|
|
++ set_cpu_possible(i, true);
|
|
|
++ set_cpu_present(i, true);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|
|
++{
|
|
|
++ /* Signal cpu to start */
|
|
|
++ __cpu_up_stack_pointer[cpu] = task_stack_page(tidle) + THREAD_SIZE;
|
|
|
++
|
|
|
++ while (!cpu_online(cpu))
|
|
|
++ ;
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++void __init smp_cpus_done(unsigned int max_cpus)
|
|
|
++{
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * C entry point for a secondary processor.
|
|
|
++ */
|
|
|
++asmlinkage void __init smp_callin(void)
|
|
|
++{
|
|
|
++ struct mm_struct *mm = &init_mm;
|
|
|
++
|
|
|
++ /* All kernel threads share the same mm context. */
|
|
|
++ atomic_inc(&mm->mm_count);
|
|
|
++ current->active_mm = mm;
|
|
|
++
|
|
|
++ trap_init();
|
|
|
++ init_clockevent();
|
|
|
++ notify_cpu_starting(smp_processor_id());
|
|
|
++ set_cpu_online(smp_processor_id(), 1);
|
|
|
++ local_flush_tlb_all();
|
|
|
++ local_irq_enable();
|
|
|
++ preempt_disable();
|
|
|
++ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/smp.c linux-4.6.2.riscv/arch/riscv/kernel/smp.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/smp.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/smp.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,85 @@
|
|
|
++#include <linux/interrupt.h>
|
|
|
++#include <linux/smp.h>
|
|
|
++#include <linux/sched.h>
|
|
|
++
|
|
|
++#include <asm/sbi.h>
|
|
|
++#include <asm/tlbflush.h>
|
|
|
++#include <asm/cacheflush.h>
|
|
|
++
|
|
|
++/* A collection of single bit ipi messages. */
|
|
|
++static struct {
|
|
|
++ unsigned long bits ____cacheline_aligned;
|
|
|
++} ipi_data[NR_CPUS] __cacheline_aligned;
|
|
|
++
|
|
|
++enum ipi_message_type {
|
|
|
++ IPI_RESCHEDULE,
|
|
|
++ IPI_CALL_FUNC,
|
|
|
++ IPI_MAX
|
|
|
++};
|
|
|
++
|
|
|
++irqreturn_t handle_ipi(void)
|
|
|
++{
|
|
|
++ unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
|
|
|
++ unsigned long ops;
|
|
|
++
|
|
|
++ /* Clear pending IPI */
|
|
|
++ if (!sbi_clear_ipi())
|
|
|
++ return IRQ_NONE;
|
|
|
++
|
|
|
++ mb(); /* Order interrupt and bit testing. */
|
|
|
++ while ((ops = xchg(pending_ipis, 0)) != 0) {
|
|
|
++ mb(); /* Order bit clearing and data access. */
|
|
|
++
|
|
|
++ if (ops & (1 << IPI_RESCHEDULE))
|
|
|
++ scheduler_ipi();
|
|
|
++
|
|
|
++ if (ops & (1 << IPI_CALL_FUNC))
|
|
|
++ generic_smp_call_function_interrupt();
|
|
|
++
|
|
|
++ BUG_ON((ops >> IPI_MAX) != 0);
|
|
|
++
|
|
|
++ mb(); /* Order data access and bit testing. */
|
|
|
++ }
|
|
|
++
|
|
|
++ return IRQ_HANDLED;
|
|
|
++}
|
|
|
++
|
|
|
++static void
|
|
|
++send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
|
|
|
++{
|
|
|
++ int i;
|
|
|
++
|
|
|
++ mb();
|
|
|
++ for_each_cpu(i, to_whom)
|
|
|
++ set_bit(operation, &ipi_data[i].bits);
|
|
|
++
|
|
|
++ mb();
|
|
|
++ for_each_cpu(i, to_whom)
|
|
|
++ sbi_send_ipi(i);
|
|
|
++}
|
|
|
++
|
|
|
++void arch_send_call_function_ipi_mask(struct cpumask *mask)
|
|
|
++{
|
|
|
++ send_ipi_message(mask, IPI_CALL_FUNC);
|
|
|
++}
|
|
|
++
|
|
|
++void arch_send_call_function_single_ipi(int cpu)
|
|
|
++{
|
|
|
++ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
|
|
|
++}
|
|
|
++
|
|
|
++static void ipi_stop(void *unused)
|
|
|
++{
|
|
|
++ while (1)
|
|
|
++ wait_for_interrupt();
|
|
|
++}
|
|
|
++
|
|
|
++void smp_send_stop(void)
|
|
|
++{
|
|
|
++ on_each_cpu(ipi_stop, NULL, 1);
|
|
|
++}
|
|
|
++
|
|
|
++void smp_send_reschedule(int cpu)
|
|
|
++{
|
|
|
++ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/stacktrace.c linux-4.6.2.riscv/arch/riscv/kernel/stacktrace.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/stacktrace.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/stacktrace.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,163 @@
|
|
|
++#include <linux/export.h>
|
|
|
++#include <linux/kallsyms.h>
|
|
|
++#include <linux/sched.h>
|
|
|
++#include <linux/stacktrace.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_FRAME_POINTER
|
|
|
++
|
|
|
++struct stackframe {
|
|
|
++ unsigned long fp;
|
|
|
++ unsigned long ra;
|
|
|
++};
|
|
|
++
|
|
|
++static void notrace walk_stackframe(struct task_struct *task,
|
|
|
++ struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
|
|
|
++{
|
|
|
++ unsigned long fp, sp, pc;
|
|
|
++
|
|
|
++ if (regs) {
|
|
|
++ fp = GET_FP(regs);
|
|
|
++ sp = GET_USP(regs);
|
|
|
++ pc = GET_IP(regs);
|
|
|
++ } else if (task == NULL || task == current) {
|
|
|
++ const register unsigned long current_sp __asm__ ("sp");
|
|
|
++ fp = (unsigned long)__builtin_frame_address(0);
|
|
|
++ sp = current_sp;
|
|
|
++ pc = (unsigned long)walk_stackframe;
|
|
|
++ } else {
|
|
|
++ /* task blocked in __switch_to */
|
|
|
++ fp = task->thread.s[0];
|
|
|
++ sp = task->thread.sp;
|
|
|
++ pc = task->thread.ra;
|
|
|
++ }
|
|
|
++
|
|
|
++ for (;;) {
|
|
|
++ unsigned long low, high;
|
|
|
++ struct stackframe *frame;
|
|
|
++
|
|
|
++ if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
|
|
|
++ break;
|
|
|
++
|
|
|
++ /* Validate frame pointer */
|
|
|
++ low = sp + sizeof(struct stackframe);
|
|
|
++ high = ALIGN(sp, THREAD_SIZE);
|
|
|
++ if (unlikely(fp < low || fp > high || fp & 0x7))
|
|
|
++ break;
|
|
|
++ /* Unwind stack frame */
|
|
|
++ frame = (struct stackframe *)fp - 1;
|
|
|
++ sp = fp;
|
|
|
++ fp = frame->fp;
|
|
|
++ pc = frame->ra - 0x4;
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++#else /* !CONFIG_FRAME_POINTER */
|
|
|
++
|
|
|
++static void notrace walk_stackframe(struct task_struct *task,
|
|
|
++ struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
|
|
|
++{
|
|
|
++ unsigned long sp, pc;
|
|
|
++ unsigned long *ksp;
|
|
|
++
|
|
|
++ if (regs) {
|
|
|
++ sp = GET_USP(regs);
|
|
|
++ pc = GET_IP(regs);
|
|
|
++ } else if (task == NULL || task == current) {
|
|
|
++ const register unsigned long current_sp __asm__ ("sp");
|
|
|
++ sp = current_sp;
|
|
|
++ pc = (unsigned long)walk_stackframe;
|
|
|
++ } else {
|
|
|
++ /* task blocked in __switch_to */
|
|
|
++ sp = task->thread.sp;
|
|
|
++ pc = task->thread.ra;
|
|
|
++ }
|
|
|
++
|
|
|
++ if (unlikely(sp & 0x7))
|
|
|
++ return;
|
|
|
++
|
|
|
++ ksp = (unsigned long *)sp;
|
|
|
++ while (!kstack_end(ksp)) {
|
|
|
++ if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) {
|
|
|
++ break;
|
|
|
++ }
|
|
|
++ pc = (*ksp++) - 0x4;
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++#endif /* CONFIG_FRAME_POINTER */
|
|
|
++
|
|
|
++
|
|
|
++static bool print_trace_address(unsigned long pc, void *arg)
|
|
|
++{
|
|
|
++ print_ip_sym(pc);
|
|
|
++ return false;
|
|
|
++}
|
|
|
++
|
|
|
++void show_stack(struct task_struct *task, unsigned long *sp)
|
|
|
++{
|
|
|
++ printk("Call Trace:\n");
|
|
|
++ walk_stackframe(task, NULL, print_trace_address, NULL);
|
|
|
++}
|
|
|
++
|
|
|
++
|
|
|
++static bool save_wchan(unsigned long pc, void *arg)
|
|
|
++{
|
|
|
++ if (!in_sched_functions(pc)) {
|
|
|
++ unsigned long *p = arg;
|
|
|
++ *p = pc;
|
|
|
++ return true;
|
|
|
++ }
|
|
|
++ return false;
|
|
|
++}
|
|
|
++
|
|
|
++unsigned long get_wchan(struct task_struct *task)
|
|
|
++{
|
|
|
++ unsigned long pc;
|
|
|
++ pc = 0;
|
|
|
++ if (likely(task && task != current && task->state != TASK_RUNNING)) {
|
|
|
++ walk_stackframe(task, NULL, save_wchan, &pc);
|
|
|
++ }
|
|
|
++ return pc;
|
|
|
++}
|
|
|
++
|
|
|
++
|
|
|
++#ifdef CONFIG_STACKTRACE
|
|
|
++
|
|
|
++static bool __save_trace(unsigned long pc, void *arg, bool nosched)
|
|
|
++{
|
|
|
++ struct stack_trace *trace = arg;
|
|
|
++
|
|
|
++ if (unlikely(nosched && in_sched_functions(pc)))
|
|
|
++ return false;
|
|
|
++ if (unlikely(trace->skip > 0)) {
|
|
|
++ trace->skip--;
|
|
|
++ return false;
|
|
|
++ }
|
|
|
++
|
|
|
++ trace->entries[trace->nr_entries++] = pc;
|
|
|
++ return (trace->nr_entries >= trace->max_entries);
|
|
|
++}
|
|
|
++
|
|
|
++static bool save_trace(unsigned long pc, void *arg)
|
|
|
++{
|
|
|
++ return __save_trace(pc, arg, false);
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * Save stack-backtrace addresses into a stack_trace buffer.
|
|
|
++ */
|
|
|
++void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
|
++{
|
|
|
++ walk_stackframe(tsk, NULL, save_trace, trace);
|
|
|
++ if (trace->nr_entries < trace->max_entries)
|
|
|
++ trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
|
++}
|
|
|
++EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
|
|
++
|
|
|
++void save_stack_trace(struct stack_trace *trace)
|
|
|
++{
|
|
|
++ save_stack_trace_tsk(NULL, trace);
|
|
|
++}
|
|
|
++EXPORT_SYMBOL_GPL(save_stack_trace);
|
|
|
++
|
|
|
++#endif /* CONFIG_STACKTRACE */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/syscall_table.c linux-4.6.2.riscv/arch/riscv/kernel/syscall_table.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/syscall_table.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/syscall_table.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,10 @@
|
|
|
++#include <linux/syscalls.h>
|
|
|
++
|
|
|
++#include <asm/syscalls.h>
|
|
|
++
|
|
|
++#undef __SYSCALL
|
|
|
++#define __SYSCALL(nr, call) [nr] = (call),
|
|
|
++
|
|
|
++void *sys_call_table[__NR_syscalls] = {
|
|
|
++#include <asm/unistd.h>
|
|
|
++};
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/sys_riscv.c linux-4.6.2.riscv/arch/riscv/kernel/sys_riscv.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/sys_riscv.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/sys_riscv.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,70 @@
|
|
|
++#include <linux/syscalls.h>
|
|
|
++#include <asm/unistd.h>
|
|
|
++
|
|
|
++SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
|
|
++ unsigned long, prot, unsigned long, flags,
|
|
|
++ unsigned long, fd, off_t, offset)
|
|
|
++{
|
|
|
++ if (unlikely(offset & (~PAGE_MASK)))
|
|
|
++ return -EINVAL;
|
|
|
++ return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
|
|
|
++}
|
|
|
++
|
|
|
++#ifndef CONFIG_64BIT
|
|
|
++SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
|
|
|
++ unsigned long, prot, unsigned long, flags,
|
|
|
++ unsigned long, fd, off_t, offset)
|
|
|
++{
|
|
|
++ /* Note that the shift for mmap2 is constant (12),
|
|
|
++ regardless of PAGE_SIZE */
|
|
|
++ if (unlikely(offset & (~PAGE_MASK >> 12)))
|
|
|
++ return -EINVAL;
|
|
|
++ return sys_mmap_pgoff(addr, len, prot, flags, fd,
|
|
|
++ offset >> (PAGE_SHIFT - 12));
|
|
|
++}
|
|
|
++#endif /* !CONFIG_64BIT */
|
|
|
++
|
|
|
++#ifdef CONFIG_RV_SYSRISCV_ATOMIC
|
|
|
++SYSCALL_DEFINE4(sysriscv, unsigned long, cmd, unsigned long, arg1,
|
|
|
++ unsigned long, arg2, unsigned long, arg3)
|
|
|
++{
|
|
|
++ unsigned long flags;
|
|
|
++ unsigned long prev;
|
|
|
++ unsigned int *ptr;
|
|
|
++ unsigned int err;
|
|
|
++
|
|
|
++ switch (cmd) {
|
|
|
++ case RISCV_ATOMIC_CMPXCHG:
|
|
|
++ ptr = (unsigned int *)arg1;
|
|
|
++ if (!access_ok(VERIFY_WRITE, ptr, sizeof(unsigned int)))
|
|
|
++ return -EFAULT;
|
|
|
++
|
|
|
++ preempt_disable();
|
|
|
++ raw_local_irq_save(flags);
|
|
|
++ err = __get_user(prev, ptr);
|
|
|
++ if (likely(!err && prev == arg2))
|
|
|
++ err = __put_user(arg3, ptr);
|
|
|
++ raw_local_irq_restore(flags);
|
|
|
++ preempt_enable();
|
|
|
++
|
|
|
++ return unlikely(err) ? err : prev;
|
|
|
++
|
|
|
++ case RISCV_ATOMIC_CMPXCHG64:
|
|
|
++ ptr = (unsigned int *)arg1;
|
|
|
++ if (!access_ok(VERIFY_WRITE, ptr, sizeof(unsigned long)))
|
|
|
++ return -EFAULT;
|
|
|
++
|
|
|
++ preempt_disable();
|
|
|
++ raw_local_irq_save(flags);
|
|
|
++ err = __get_user(prev, ptr);
|
|
|
++ if (likely(!err && prev == arg2))
|
|
|
++ err = __put_user(arg3, ptr);
|
|
|
++ raw_local_irq_restore(flags);
|
|
|
++ preempt_enable();
|
|
|
++
|
|
|
++ return unlikely(err) ? err : prev;
|
|
|
++ }
|
|
|
++
|
|
|
++ return -EINVAL;
|
|
|
++}
|
|
|
++#endif /* CONFIG_RV_SYSRISCV_ATOMIC */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/time.c linux-4.6.2.riscv/arch/riscv/kernel/time.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/time.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/time.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,88 @@
|
|
|
++#include <linux/clocksource.h>
|
|
|
++#include <linux/clockchips.h>
|
|
|
++#include <linux/interrupt.h>
|
|
|
++#include <linux/irq.h>
|
|
|
++#include <linux/delay.h>
|
|
|
++
|
|
|
++#include <asm/irq.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++#include <asm/sbi.h>
|
|
|
++#include <asm/delay.h>
|
|
|
++
|
|
|
++unsigned long timebase;
|
|
|
++
|
|
|
++static DEFINE_PER_CPU(struct clock_event_device, clock_event);
|
|
|
++
|
|
|
++static int riscv_timer_set_next_event(unsigned long delta,
|
|
|
++ struct clock_event_device *evdev)
|
|
|
++{
|
|
|
++ sbi_set_timer(get_cycles() + delta);
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static int riscv_timer_set_oneshot(struct clock_event_device *evt)
|
|
|
++{
|
|
|
++ /* no-op; only one mode */
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static int riscv_timer_set_shutdown(struct clock_event_device *evt)
|
|
|
++{
|
|
|
++ /* can't stop the clock! */
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++static cycle_t riscv_rdtime(struct clocksource *cs)
|
|
|
++{
|
|
|
++ return get_cycles();
|
|
|
++}
|
|
|
++
|
|
|
++static struct clocksource riscv_clocksource = {
|
|
|
++ .name = "riscv_clocksource",
|
|
|
++ .rating = 300,
|
|
|
++ .read = riscv_rdtime,
|
|
|
++#ifdef CONFIG_64BITS
|
|
|
++ .mask = CLOCKSOURCE_MASK(64),
|
|
|
++#else
|
|
|
++ .mask = CLOCKSOURCE_MASK(32),
|
|
|
++#endif /* CONFIG_64BITS */
|
|
|
++ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
|
++};
|
|
|
++
|
|
|
++void riscv_timer_interrupt(void)
|
|
|
++{
|
|
|
++ int cpu = smp_processor_id();
|
|
|
++ struct clock_event_device *evdev = &per_cpu(clock_event, cpu);
|
|
|
++ evdev->event_handler(evdev);
|
|
|
++}
|
|
|
++
|
|
|
++void __init init_clockevent(void)
|
|
|
++{
|
|
|
++ int cpu = smp_processor_id();
|
|
|
++ struct clock_event_device *ce = &per_cpu(clock_event, cpu);
|
|
|
++
|
|
|
++ *ce = (struct clock_event_device){
|
|
|
++ .name = "riscv_timer_clockevent",
|
|
|
++ .features = CLOCK_EVT_FEAT_ONESHOT,
|
|
|
++ .rating = 300,
|
|
|
++ .cpumask = cpumask_of(cpu),
|
|
|
++ .set_next_event = riscv_timer_set_next_event,
|
|
|
++ .set_state_oneshot = riscv_timer_set_oneshot,
|
|
|
++ .set_state_shutdown = riscv_timer_set_shutdown,
|
|
|
++ };
|
|
|
++
|
|
|
++ clockevents_config_and_register(ce, sbi_timebase(), 100, 0x7fffffff);
|
|
|
++}
|
|
|
++
|
|
|
++void __init time_init(void)
|
|
|
++{
|
|
|
++ timebase = sbi_timebase();
|
|
|
++ lpj_fine = timebase;
|
|
|
++ do_div(lpj_fine, HZ);
|
|
|
++
|
|
|
++ clocksource_register_hz(&riscv_clocksource, timebase);
|
|
|
++ init_clockevent();
|
|
|
++
|
|
|
++ /* Enable timer interrupts. */
|
|
|
++ csr_set(sie, SIE_STIE);
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/traps.c linux-4.6.2.riscv/arch/riscv/kernel/traps.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/traps.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/traps.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,146 @@
|
|
|
++#include <linux/kernel.h>
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/sched.h>
|
|
|
++#include <linux/signal.h>
|
|
|
++#include <linux/kdebug.h>
|
|
|
++#include <linux/uaccess.h>
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/module.h>
|
|
|
++
|
|
|
++#include <asm/processor.h>
|
|
|
++#include <asm/ptrace.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++int show_unhandled_signals = 1;
|
|
|
++
|
|
|
++extern asmlinkage void handle_exception(void);
|
|
|
++
|
|
|
++static DEFINE_SPINLOCK(die_lock);
|
|
|
++
|
|
|
++void die(struct pt_regs *regs, const char *str)
|
|
|
++{
|
|
|
++ static int die_counter;
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ oops_enter();
|
|
|
++
|
|
|
++ spin_lock_irq(&die_lock);
|
|
|
++ console_verbose();
|
|
|
++ bust_spinlocks(1);
|
|
|
++
|
|
|
++ pr_emerg("%s [#%d]\n", str, ++die_counter);
|
|
|
++ print_modules();
|
|
|
++ show_regs(regs);
|
|
|
++
|
|
|
++ ret = notify_die(DIE_OOPS, str, regs, 0, regs->scause, SIGSEGV);
|
|
|
++
|
|
|
++ bust_spinlocks(0);
|
|
|
++ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
|
|
|
++ spin_unlock_irq(&die_lock);
|
|
|
++ oops_exit();
|
|
|
++
|
|
|
++ if (in_interrupt())
|
|
|
++ panic("Fatal exception in interrupt");
|
|
|
++ if (panic_on_oops)
|
|
|
++ panic("Fatal exception");
|
|
|
++ if (ret != NOTIFY_STOP)
|
|
|
++ do_exit(SIGSEGV);
|
|
|
++}
|
|
|
++
|
|
|
++static inline void do_trap_siginfo(int signo, int code,
|
|
|
++ unsigned long addr, struct task_struct *tsk)
|
|
|
++{
|
|
|
++ siginfo_t info;
|
|
|
++
|
|
|
++ info.si_signo = signo;
|
|
|
++ info.si_errno = 0;
|
|
|
++ info.si_code = code;
|
|
|
++ info.si_addr = (void __user *)addr;
|
|
|
++ force_sig_info(signo, &info, tsk);
|
|
|
++}
|
|
|
++
|
|
|
++void do_trap(struct pt_regs *regs, int signo, int code,
|
|
|
++ unsigned long addr, struct task_struct *tsk)
|
|
|
++{
|
|
|
++ if (show_unhandled_signals && unhandled_signal(tsk, signo)
|
|
|
++ && printk_ratelimit()) {
|
|
|
++ pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
|
|
|
++ tsk->comm, task_pid_nr(tsk), signo, code, addr);
|
|
|
++ print_vma_addr(KERN_CONT " in ", GET_IP(regs));
|
|
|
++ pr_cont("\n");
|
|
|
++ show_regs(regs);
|
|
|
++ }
|
|
|
++
|
|
|
++ do_trap_siginfo(signo, code, addr, tsk);
|
|
|
++}
|
|
|
++
|
|
|
++static void do_trap_error(struct pt_regs *regs, int signo, int code,
|
|
|
++ unsigned long addr, const char *str)
|
|
|
++{
|
|
|
++ if (user_mode(regs)) {
|
|
|
++ do_trap(regs, signo, code, addr, current);
|
|
|
++ } else {
|
|
|
++ if (!fixup_exception(regs))
|
|
|
++ die(regs, str);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++#define DO_ERROR_INFO(name, signo, code, str) \
|
|
|
++asmlinkage void name(struct pt_regs *regs) \
|
|
|
++{ \
|
|
|
++ do_trap_error(regs, signo, code, regs->sepc, "Oops - " str); \
|
|
|
++}
|
|
|
++
|
|
|
++DO_ERROR_INFO(do_trap_unknown,
|
|
|
++ SIGILL, ILL_ILLTRP, "unknown exception");
|
|
|
++DO_ERROR_INFO(do_trap_amo_misaligned,
|
|
|
++ SIGBUS, BUS_ADRALN, "AMO address misaligned");
|
|
|
++DO_ERROR_INFO(do_trap_insn_misaligned,
|
|
|
++ SIGBUS, BUS_ADRALN, "instruction address misaligned");
|
|
|
++DO_ERROR_INFO(do_trap_insn_illegal,
|
|
|
++ SIGILL, ILL_ILLOPC, "illegal instruction");
|
|
|
++
|
|
|
++asmlinkage void do_trap_break(struct pt_regs *regs)
|
|
|
++{
|
|
|
++#ifdef CONFIG_GENERIC_BUG
|
|
|
++ if (!user_mode(regs)) {
|
|
|
++ enum bug_trap_type type;
|
|
|
++
|
|
|
++ type = report_bug(regs->sepc, regs);
|
|
|
++ switch (type) {
|
|
|
++ case BUG_TRAP_TYPE_NONE:
|
|
|
++ break;
|
|
|
++ case BUG_TRAP_TYPE_WARN:
|
|
|
++ regs->sepc += sizeof(bug_insn_t);
|
|
|
++ return;
|
|
|
++ case BUG_TRAP_TYPE_BUG:
|
|
|
++ die(regs, "Kernel BUG");
|
|
|
++ }
|
|
|
++ }
|
|
|
++#endif /* CONFIG_GENERIC_BUG */
|
|
|
++
|
|
|
++ do_trap_siginfo(SIGTRAP, TRAP_BRKPT, regs->sepc, current);
|
|
|
++ regs->sepc += 0x4;
|
|
|
++}
|
|
|
++
|
|
|
++#ifdef CONFIG_GENERIC_BUG
|
|
|
++int is_valid_bugaddr(unsigned long pc)
|
|
|
++{
|
|
|
++ bug_insn_t insn;
|
|
|
++
|
|
|
++ if (pc < PAGE_OFFSET)
|
|
|
++ return 0;
|
|
|
++ if (probe_kernel_address((bug_insn_t __user *)pc, insn))
|
|
|
++ return 0;
|
|
|
++ return (insn == __BUG_INSN);
|
|
|
++}
|
|
|
++#endif /* CONFIG_GENERIC_BUG */
|
|
|
++
|
|
|
++void __init trap_init(void)
|
|
|
++{
|
|
|
++ /* Set sup0 scratch register to 0, indicating to exception vector
|
|
|
++ that we are presently executing in the kernel */
|
|
|
++ csr_write(sscratch, 0);
|
|
|
++ /* Set the exception vector address */
|
|
|
++ csr_write(stvec, &handle_exception);
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/vdso/.gitignore linux-4.6.2.riscv/arch/riscv/kernel/vdso/.gitignore
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/vdso/.gitignore 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/vdso/.gitignore 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1 @@
|
|
|
++vdso.lds
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/vdso/Makefile linux-4.6.2.riscv/arch/riscv/kernel/vdso/Makefile
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/vdso/Makefile 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/vdso/Makefile 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,61 @@
|
|
|
++# Derived from arch/{arm64,tile}/kernel/vdso/Makefile
|
|
|
++
|
|
|
++obj-vdso := sigreturn.o
|
|
|
++
|
|
|
++# Build rules
|
|
|
++targets := $(obj-vdso) vdso.so vdso.so.dbg
|
|
|
++obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
|
|
|
++
|
|
|
++#ccflags-y := -shared -fno-common -fno-builtin
|
|
|
++#ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
|
|
|
++ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
|
|
++
|
|
|
++CFLAGS_vdso.so = $(c_flags)
|
|
|
++CFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
|
|
|
++ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
|
|
++CFLAGS_vdso_syms.o = -r
|
|
|
++
|
|
|
++obj-y += vdso.o
|
|
|
++
|
|
|
++# We also create a special relocatable object that should mirror the symbol
|
|
|
++# table and layout of the linked DSO. With ld -R we can then refer to
|
|
|
++# these symbols in the kernel code rather than hand-coded addresses.
|
|
|
++extra-y += vdso.lds vdso-syms.o
|
|
|
++$(obj)/built-in.o: $(obj)/vdso-syms.o
|
|
|
++$(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
|
|
|
++
|
|
|
++CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
|
|
++
|
|
|
++# Force dependency
|
|
|
++$(obj)/vdso.o : $(obj)/vdso.so
|
|
|
++
|
|
|
++# Link rule for the *.so file; *.lds must be first
|
|
|
++$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
|
|
|
++ $(call if_changed,vdsold)
|
|
|
++$(obj)/vdso-syms.o: $(src)/vdso.lds $(obj-vdso)
|
|
|
++ $(call if_changed,vdsold)
|
|
|
++
|
|
|
++# Strip rule for the *.so file
|
|
|
++$(obj)/%.so: OBJCOPYFLAGS := -S
|
|
|
++$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
|
|
++ $(call if_changed,objcopy)
|
|
|
++
|
|
|
++# Assembly rules for the *.S files
|
|
|
++$(obj-vdso): %.o: %.S
|
|
|
++ $(call if_changed_dep,vdsoas)
|
|
|
++
|
|
|
++# Actual build commands
|
|
|
++quiet_cmd_vdsold = VDSOLD $@
|
|
|
++ cmd_vdsold = $(CC) $(c_flags) -nostdlib $(CFLAGS_$(@F)) -Wl,-n -Wl,-T $^ -o $@
|
|
|
++quiet_cmd_vdsoas = VDSOAS $@
|
|
|
++ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
|
|
|
++
|
|
|
++# Install commands for the unstripped file
|
|
|
++quiet_cmd_vdso_install = INSTALL $@
|
|
|
++ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
|
|
|
++
|
|
|
++vdso.so: $(obj)/vdso.so.dbg
|
|
|
++ @mkdir -p $(MODLIB)/vdso
|
|
|
++ $(call cmd,vdso_install)
|
|
|
++
|
|
|
++vdso_install: vdso.so
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/vdso/sigreturn.S linux-4.6.2.riscv/arch/riscv/kernel/vdso/sigreturn.S
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/vdso/sigreturn.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/vdso/sigreturn.S 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,11 @@
|
|
|
++#include <linux/linkage.h>
|
|
|
++#include <asm/unistd.h>
|
|
|
++
|
|
|
++ .text
|
|
|
++ENTRY(__vdso_rt_sigreturn)
|
|
|
++ .cfi_startproc
|
|
|
++ .cfi_signal_frame
|
|
|
++ li a7, __NR_rt_sigreturn
|
|
|
++ scall
|
|
|
++ .cfi_endproc
|
|
|
++ENDPROC(__vdso_rt_sigreturn)
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/vdso/vdso.lds.S linux-4.6.2.riscv/arch/riscv/kernel/vdso/vdso.lds.S
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/vdso/vdso.lds.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/vdso/vdso.lds.S 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,63 @@
|
|
|
++OUTPUT_ARCH(riscv)
|
|
|
++
|
|
|
++SECTIONS
|
|
|
++{
|
|
|
++ . = SIZEOF_HEADERS;
|
|
|
++
|
|
|
++ .hash : { *(.hash) } :text
|
|
|
++ .gnu.hash : { *(.gnu.hash) }
|
|
|
++ .dynsym : { *(.dynsym) }
|
|
|
++ .dynstr : { *(.dynstr) }
|
|
|
++ .gnu.version : { *(.gnu.version) }
|
|
|
++ .gnu.version_d : { *(.gnu.version_d) }
|
|
|
++ .gnu.version_r : { *(.gnu.version_r) }
|
|
|
++
|
|
|
++ .note : { *(.note.*) } :text :note
|
|
|
++ .dynamic : { *(.dynamic) } :text :dynamic
|
|
|
++
|
|
|
++ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
|
|
|
++ .eh_frame : { KEEP (*(.eh_frame)) } :text
|
|
|
++
|
|
|
++ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
|
|
|
++
|
|
|
++ /*
|
|
|
++ * This linker script is used both with -r and with -shared.
|
|
|
++ * For the layouts to match, we need to skip more than enough
|
|
|
++ * space for the dynamic symbol table, etc. If this amount is
|
|
|
++ * insufficient, ld -shared will error; simply increase it here.
|
|
|
++ */
|
|
|
++ . = 0x800;
|
|
|
++ .text : { *(.text .text.*) } :text
|
|
|
++
|
|
|
++ .data : {
|
|
|
++ *(.got.plt) *(.got)
|
|
|
++ *(.data .data.* .gnu.linkonce.d.*)
|
|
|
++ *(.dynbss)
|
|
|
++ *(.bss .bss.* .gnu.linkonce.b.*)
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * We must supply the ELF program headers explicitly to get just one
|
|
|
++ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
|
|
|
++ */
|
|
|
++PHDRS
|
|
|
++{
|
|
|
++ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
|
|
|
++ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
|
|
|
++ note PT_NOTE FLAGS(4); /* PF_R */
|
|
|
++ eh_frame_hdr PT_GNU_EH_FRAME;
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * This controls what symbols we export from the DSO.
|
|
|
++ */
|
|
|
++VERSION
|
|
|
++{
|
|
|
++ LINUX_2.6 {
|
|
|
++ global:
|
|
|
++ __vdso_rt_sigreturn;
|
|
|
++ local: *;
|
|
|
++ };
|
|
|
++}
|
|
|
++
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/vdso/vdso.S linux-4.6.2.riscv/arch/riscv/kernel/vdso/vdso.S
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/vdso/vdso.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/vdso/vdso.S 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,14 @@
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/linkage.h>
|
|
|
++#include <asm/page.h>
|
|
|
++
|
|
|
++ __PAGE_ALIGNED_DATA
|
|
|
++
|
|
|
++ .globl vdso_start, vdso_end
|
|
|
++ .balign PAGE_SIZE
|
|
|
++vdso_start:
|
|
|
++ .incbin "arch/riscv/kernel/vdso/vdso.so"
|
|
|
++ .balign PAGE_SIZE
|
|
|
++vdso_end:
|
|
|
++
|
|
|
++ .previous
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/vdso.c linux-4.6.2.riscv/arch/riscv/kernel/vdso.c
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/vdso.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/vdso.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,106 @@
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/slab.h>
|
|
|
++#include <linux/binfmts.h>
|
|
|
++#include <linux/err.h>
|
|
|
++
|
|
|
++#include <asm/vdso.h>
|
|
|
++
|
|
|
++extern char vdso_start[], vdso_end[];
|
|
|
++
|
|
|
++static unsigned int vdso_pages;
|
|
|
++static struct page **vdso_pagelist;
|
|
|
++
|
|
|
++/*
|
|
|
++ * The vDSO data page.
|
|
|
++ */
|
|
|
++static union {
|
|
|
++ struct vdso_data data;
|
|
|
++ u8 page[PAGE_SIZE];
|
|
|
++} vdso_data_store __page_aligned_data;
|
|
|
++struct vdso_data *vdso_data = &vdso_data_store.data;
|
|
|
++
|
|
|
++static int __init vdso_init(void)
|
|
|
++{
|
|
|
++ unsigned int i;
|
|
|
++
|
|
|
++ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
|
|
|
++ vdso_pagelist = kzalloc(sizeof(struct page *) * (vdso_pages + 1), GFP_KERNEL);
|
|
|
++ if (unlikely(vdso_pagelist == NULL)) {
|
|
|
++ pr_err("vdso: pagelist allocation failed\n");
|
|
|
++ return -ENOMEM;
|
|
|
++ }
|
|
|
++
|
|
|
++ for (i = 0; i < vdso_pages; i++) {
|
|
|
++ struct page *pg;
|
|
|
++ pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
|
|
|
++ ClearPageReserved(pg);
|
|
|
++ vdso_pagelist[i] = pg;
|
|
|
++ }
|
|
|
++ vdso_pagelist[i] = virt_to_page(vdso_data);
|
|
|
++
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++arch_initcall(vdso_init);
|
|
|
++
|
|
|
++int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|
|
++ int uses_interp)
|
|
|
++{
|
|
|
++ struct mm_struct *mm = current->mm;
|
|
|
++ unsigned long vdso_base, vdso_len;
|
|
|
++ int ret;
|
|
|
++
|
|
|
++ vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
|
|
|
++
|
|
|
++ down_write(&mm->mmap_sem);
|
|
|
++ vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
|
|
|
++ if (unlikely(IS_ERR_VALUE(vdso_base))) {
|
|
|
++ ret = vdso_base;
|
|
|
++ goto end;
|
|
|
++ }
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Put vDSO base into mm struct. We need to do this before calling
|
|
|
++ * install_special_mapping or the perf counter mmap tracking code
|
|
|
++ * will fail to recognise it as a vDSO (since arch_vma_name fails).
|
|
|
++ */
|
|
|
++ mm->context.vdso = (void *)vdso_base;
|
|
|
++
|
|
|
++ ret = install_special_mapping(mm, vdso_base, vdso_len,
|
|
|
++ (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
|
|
|
++ vdso_pagelist);
|
|
|
++
|
|
|
++ if (unlikely(ret)) {
|
|
|
++ mm->context.vdso = NULL;
|
|
|
++ }
|
|
|
++
|
|
|
++end:
|
|
|
++ up_write(&mm->mmap_sem);
|
|
|
++ return ret;
|
|
|
++}
|
|
|
++
|
|
|
++const char *arch_vma_name(struct vm_area_struct *vma)
|
|
|
++{
|
|
|
++ if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) {
|
|
|
++ return "[vdso]";
|
|
|
++ }
|
|
|
++ return NULL;
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * Function stubs to prevent linker errors when AT_SYSINFO_EHDR is defined
|
|
|
++ */
|
|
|
++
|
|
|
++int in_gate_area_no_mm(unsigned long addr)
|
|
|
++{
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++int in_gate_area(struct mm_struct *mm, unsigned long addr)
|
|
|
++{
|
|
|
++ return 0;
|
|
|
++}
|
|
|
++
|
|
|
++struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
|
|
|
++{
|
|
|
++ return NULL;
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/kernel/vmlinux.lds.S linux-4.6.2.riscv/arch/riscv/kernel/vmlinux.lds.S
|
|
|
+--- linux-4.6.2/arch/riscv/kernel/vmlinux.lds.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/kernel/vmlinux.lds.S 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,75 @@
|
|
|
++#define LOAD_OFFSET PAGE_OFFSET
|
|
|
++#include <asm/vmlinux.lds.h>
|
|
|
++#include <asm/page.h>
|
|
|
++#include <asm/cache.h>
|
|
|
++#include <asm/thread_info.h>
|
|
|
++
|
|
|
++OUTPUT_ARCH(riscv)
|
|
|
++ENTRY(_start)
|
|
|
++
|
|
|
++jiffies = jiffies_64;
|
|
|
++
|
|
|
++SECTIONS
|
|
|
++{
|
|
|
++ /* Beginning of code and text segment */
|
|
|
++ . = LOAD_OFFSET;
|
|
|
++ _start = .;
|
|
|
++ __init_begin = .;
|
|
|
++ HEAD_TEXT_SECTION
|
|
|
++ INIT_TEXT_SECTION(PAGE_SIZE)
|
|
|
++ INIT_DATA_SECTION(16)
|
|
|
++ /* we have to discard exit text and such at runtime, not link time */
|
|
|
++ .exit.text :
|
|
|
++ {
|
|
|
++ EXIT_TEXT
|
|
|
++ }
|
|
|
++ .exit.data :
|
|
|
++ {
|
|
|
++ EXIT_DATA
|
|
|
++ }
|
|
|
++ PERCPU_SECTION(L1_CACHE_BYTES)
|
|
|
++ __init_end = .;
|
|
|
++
|
|
|
++ .text : {
|
|
|
++ _text = .;
|
|
|
++ _stext = .;
|
|
|
++ TEXT_TEXT
|
|
|
++ SCHED_TEXT
|
|
|
++ LOCK_TEXT
|
|
|
++ KPROBES_TEXT
|
|
|
++ ENTRY_TEXT
|
|
|
++ IRQENTRY_TEXT
|
|
|
++ *(.fixup)
|
|
|
++ _etext = .;
|
|
|
++ }
|
|
|
++
|
|
|
++ /* Start of data section */
|
|
|
++ _sdata = .;
|
|
|
++ RO_DATA_SECTION(PAGE_SIZE)
|
|
|
++ RW_DATA_SECTION(0x40, PAGE_SIZE, THREAD_SIZE)
|
|
|
++ .sdata : {
|
|
|
++ _gp = . + 0x800;
|
|
|
++ *(.sdata*)
|
|
|
++ }
|
|
|
++ .srodata : {
|
|
|
++ *(.srodata*)
|
|
|
++ }
|
|
|
++ /* End of data section */
|
|
|
++ _edata = .;
|
|
|
++
|
|
|
++ BSS_SECTION(0x20, 0, 0x20)
|
|
|
++
|
|
|
++ EXCEPTION_TABLE(0x10)
|
|
|
++ NOTES
|
|
|
++
|
|
|
++ .rel.dyn : {
|
|
|
++ *(.rel.dyn*)
|
|
|
++ }
|
|
|
++
|
|
|
++ _end = .;
|
|
|
++
|
|
|
++ STABS_DEBUG
|
|
|
++ DWARF_DEBUG
|
|
|
++
|
|
|
++ DISCARDS
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/lib/ashldi3.c linux-4.6.2.riscv/arch/riscv/lib/ashldi3.c
|
|
|
+--- linux-4.6.2/arch/riscv/lib/ashldi3.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/lib/ashldi3.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,28 @@
|
|
|
++#include <linux/export.h>
|
|
|
++
|
|
|
++#include "libgcc.h"
|
|
|
++
|
|
|
++long long __ashldi3(long long u, word_type b)
|
|
|
++{
|
|
|
++ DWunion uu, w;
|
|
|
++ word_type bm;
|
|
|
++
|
|
|
++ if (b == 0)
|
|
|
++ return u;
|
|
|
++
|
|
|
++ uu.ll = u;
|
|
|
++ bm = 32 - b;
|
|
|
++
|
|
|
++ if (bm <= 0) {
|
|
|
++ w.s.low = 0;
|
|
|
++ w.s.high = (unsigned int) uu.s.low << -bm;
|
|
|
++ } else {
|
|
|
++ const unsigned int carries = (unsigned int) uu.s.low >> bm;
|
|
|
++
|
|
|
++ w.s.low = (unsigned int) uu.s.low << b;
|
|
|
++ w.s.high = ((unsigned int) uu.s.high << b) | carries;
|
|
|
++ }
|
|
|
++
|
|
|
++ return w.ll;
|
|
|
++}
|
|
|
++EXPORT_SYMBOL(__ashldi3);
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/lib/ashrdi3.c linux-4.6.2.riscv/arch/riscv/lib/ashrdi3.c
|
|
|
+--- linux-4.6.2/arch/riscv/lib/ashrdi3.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/lib/ashrdi3.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,30 @@
|
|
|
++#include <linux/export.h>
|
|
|
++
|
|
|
++#include "libgcc.h"
|
|
|
++
|
|
|
++long long __ashrdi3(long long u, word_type b)
|
|
|
++{
|
|
|
++ DWunion uu, w;
|
|
|
++ word_type bm;
|
|
|
++
|
|
|
++ if (b == 0)
|
|
|
++ return u;
|
|
|
++
|
|
|
++ uu.ll = u;
|
|
|
++ bm = 32 - b;
|
|
|
++
|
|
|
++ if (bm <= 0) {
|
|
|
++ /* w.s.high = 1..1 or 0..0 */
|
|
|
++ w.s.high =
|
|
|
++ uu.s.high >> 31;
|
|
|
++ w.s.low = uu.s.high >> -bm;
|
|
|
++ } else {
|
|
|
++ const unsigned int carries = (unsigned int) uu.s.high << bm;
|
|
|
++
|
|
|
++ w.s.high = uu.s.high >> b;
|
|
|
++ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
|
|
|
++ }
|
|
|
++
|
|
|
++ return w.ll;
|
|
|
++}
|
|
|
++EXPORT_SYMBOL(__ashrdi3);
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/lib/delay.c linux-4.6.2.riscv/arch/riscv/lib/delay.c
|
|
|
+--- linux-4.6.2/arch/riscv/lib/delay.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/lib/delay.c 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,24 @@
|
|
|
++#include <linux/delay.h>
|
|
|
++#include <linux/param.h>
|
|
|
++#include <linux/timex.h>
|
|
|
++#include <linux/export.h>
|
|
|
++
|
|
|
++void __delay(unsigned long cycles)
|
|
|
++{
|
|
|
++ cycle_t t0 = get_cycles();
|
|
|
++ while ((unsigned long)(get_cycles() - t0) < cycles)
|
|
|
++ cpu_relax();
|
|
|
++}
|
|
|
++
|
|
|
++void udelay(unsigned long usecs)
|
|
|
++{
|
|
|
++ __delay((unsigned long)(((u64)usecs * timebase) / 1000000UL));
|
|
|
++
|
|
|
++}
|
|
|
++EXPORT_SYMBOL(udelay);
|
|
|
++
|
|
|
++void ndelay(unsigned long nsecs)
|
|
|
++{
|
|
|
++ __delay((unsigned long)(((u64)nsecs * timebase) / 1000000000UL));
|
|
|
++}
|
|
|
++EXPORT_SYMBOL(ndelay);
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/lib/libgcc.h linux-4.6.2.riscv/arch/riscv/lib/libgcc.h
|
|
|
+--- linux-4.6.2/arch/riscv/lib/libgcc.h 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/lib/libgcc.h 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1,32 @@
|
|
|
++#ifndef __ASM_LIBGCC_H
|
|
|
++#define __ASM_LIBGCC_H
|
|
|
++
|
|
|
++#include <asm/byteorder.h>
|
|
|
++
|
|
|
++typedef int word_type __attribute__ ((mode (__word__)));
|
|
|
++
|
|
|
++#ifdef __BIG_ENDIAN
|
|
|
++struct DWstruct {
|
|
|
++ int high, low;
|
|
|
++};
|
|
|
++#elif defined(__LITTLE_ENDIAN)
|
|
|
++struct DWstruct {
|
|
|
++ int low, high;
|
|
|
++};
|
|
|
++#else
|
|
|
++#error I feel sick.
|
|
|
++#endif
|
|
|
++
|
|
|
++typedef union {
|
|
|
++ struct DWstruct s;
|
|
|
++ long long ll;
|
|
|
++} DWunion;
|
|
|
++
|
|
|
++extern long long __ashldi3(long long u, word_type b);
|
|
|
++extern long long __ashrdi3(long long u, word_type b);
|
|
|
++extern word_type __cmpdi2(long long a, long long b);
|
|
|
++extern long long __lshrdi3(long long u, word_type b);
|
|
|
++extern long long __muldi3(long long u, long long v);
|
|
|
++extern word_type __ucmpdi2(unsigned long long a, unsigned long long b);
|
|
|
++
|
|
|
++#endif /* __ASM_LIBGCC_H */
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/lib/lshrdi3.c linux-4.6.2.riscv/arch/riscv/lib/lshrdi3.c
|
|
|
+--- linux-4.6.2/arch/riscv/lib/lshrdi3.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/lib/lshrdi3.c 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1,28 @@
|
|
|
++#include <linux/export.h>
|
|
|
++
|
|
|
++#include "libgcc.h"
|
|
|
++
|
|
|
++long long __lshrdi3(long long u, word_type b)
|
|
|
++{
|
|
|
++ DWunion uu, w;
|
|
|
++ word_type bm;
|
|
|
++
|
|
|
++ if (b == 0)
|
|
|
++ return u;
|
|
|
++
|
|
|
++ uu.ll = u;
|
|
|
++ bm = 32 - b;
|
|
|
++
|
|
|
++ if (bm <= 0) {
|
|
|
++ w.s.high = 0;
|
|
|
++ w.s.low = (unsigned int) uu.s.high >> -bm;
|
|
|
++ } else {
|
|
|
++ const unsigned int carries = (unsigned int) uu.s.high << bm;
|
|
|
++
|
|
|
++ w.s.high = (unsigned int) uu.s.high >> b;
|
|
|
++ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
|
|
|
++ }
|
|
|
++
|
|
|
++ return w.ll;
|
|
|
++}
|
|
|
++EXPORT_SYMBOL(__lshrdi3);
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/lib/Makefile linux-4.6.2.riscv/arch/riscv/lib/Makefile
|
|
|
+--- linux-4.6.2/arch/riscv/lib/Makefile 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/lib/Makefile 2017-03-04 02:48:34.166888015 +0100
|
|
|
+@@ -0,0 +1,5 @@
|
|
|
++lib-y := delay.o memcpy.o memset.o uaccess.o
|
|
|
++
|
|
|
++ifeq ($(CONFIG_64BIT),)
|
|
|
++lib-y += ashldi3.o ashrdi3.o lshrdi3.o
|
|
|
++endif
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/lib/memcpy.S linux-4.6.2.riscv/arch/riscv/lib/memcpy.S
|
|
|
+--- linux-4.6.2/arch/riscv/lib/memcpy.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/lib/memcpy.S 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1,85 @@
|
|
|
++#include <linux/linkage.h>
|
|
|
++#include <asm/asm.h>
|
|
|
++
|
|
|
++/* void *memcpy(void *, const void *, size_t) */
|
|
|
++ENTRY(memcpy)
|
|
|
++ move t6, a0 /* Preserve return value */
|
|
|
++
|
|
|
++ /* Defer to byte-oriented copy for small sizes */
|
|
|
++ sltiu a3, a2, 128
|
|
|
++ bnez a3, 4f
|
|
|
++ /* Use word-oriented copy only if low-order bits match */
|
|
|
++ andi a3, t6, SZREG-1
|
|
|
++ andi a4, a1, SZREG-1
|
|
|
++ bne a3, a4, 4f
|
|
|
++
|
|
|
++ beqz a3, 2f /* Skip if already aligned */
|
|
|
++ /* Round to nearest double word-aligned address
|
|
|
++ greater than or equal to start address */
|
|
|
++ andi a3, a1, ~(SZREG-1)
|
|
|
++ addi a3, a3, SZREG
|
|
|
++ /* Handle initial misalignment */
|
|
|
++ sub a4, a3, a1
|
|
|
++1:
|
|
|
++ lb a5, 0(a1)
|
|
|
++ addi a1, a1, 1
|
|
|
++ sb a5, 0(t6)
|
|
|
++ addi t6, t6, 1
|
|
|
++ bltu a1, a3, 1b
|
|
|
++ sub a2, a2, a4 /* Update count */
|
|
|
++
|
|
|
++2:
|
|
|
++ andi a4, a2, ~((16*SZREG)-1)
|
|
|
++ beqz a4, 4f
|
|
|
++ add a3, a1, a4
|
|
|
++3:
|
|
|
++ REG_L a4, 0(a1)
|
|
|
++ REG_L a5, SZREG(a1)
|
|
|
++ REG_L a6, 2*SZREG(a1)
|
|
|
++ REG_L a7, 3*SZREG(a1)
|
|
|
++ REG_L t0, 4*SZREG(a1)
|
|
|
++ REG_L t1, 5*SZREG(a1)
|
|
|
++ REG_L t2, 6*SZREG(a1)
|
|
|
++ REG_L t3, 7*SZREG(a1)
|
|
|
++ REG_L t4, 8*SZREG(a1)
|
|
|
++ REG_L t5, 9*SZREG(a1)
|
|
|
++ REG_S a4, 0(t6)
|
|
|
++ REG_S a5, SZREG(t6)
|
|
|
++ REG_S a6, 2*SZREG(t6)
|
|
|
++ REG_S a7, 3*SZREG(t6)
|
|
|
++ REG_S t0, 4*SZREG(t6)
|
|
|
++ REG_S t1, 5*SZREG(t6)
|
|
|
++ REG_S t2, 6*SZREG(t6)
|
|
|
++ REG_S t3, 7*SZREG(t6)
|
|
|
++ REG_S t4, 8*SZREG(t6)
|
|
|
++ REG_S t5, 9*SZREG(t6)
|
|
|
++ REG_L a4, 10*SZREG(a1)
|
|
|
++ REG_L a5, 11*SZREG(a1)
|
|
|
++ REG_L a6, 12*SZREG(a1)
|
|
|
++ REG_L a7, 13*SZREG(a1)
|
|
|
++ REG_L t0, 14*SZREG(a1)
|
|
|
++ REG_L t1, 15*SZREG(a1)
|
|
|
++ addi a1, a1, 16*SZREG
|
|
|
++ REG_S a4, 10*SZREG(t6)
|
|
|
++ REG_S a5, 11*SZREG(t6)
|
|
|
++ REG_S a6, 12*SZREG(t6)
|
|
|
++ REG_S a7, 13*SZREG(t6)
|
|
|
++ REG_S t0, 14*SZREG(t6)
|
|
|
++ REG_S t1, 15*SZREG(t6)
|
|
|
++ addi t6, t6, 16*SZREG
|
|
|
++ bltu a1, a3, 3b
|
|
|
++ andi a2, a2, (16*SZREG)-1 /* Update count */
|
|
|
++
|
|
|
++4:
|
|
|
++ /* Handle trailing misalignment */
|
|
|
++ beqz a2, 6f
|
|
|
++ add a3, a1, a2
|
|
|
++5:
|
|
|
++ lb a4, 0(a1)
|
|
|
++ addi a1, a1, 1
|
|
|
++ sb a4, 0(t6)
|
|
|
++ addi t6, t6, 1
|
|
|
++ bltu a1, a3, 5b
|
|
|
++6:
|
|
|
++ ret
|
|
|
++END(memcpy)
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/lib/memset.S linux-4.6.2.riscv/arch/riscv/lib/memset.S
|
|
|
+--- linux-4.6.2/arch/riscv/lib/memset.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/lib/memset.S 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1,104 @@
|
|
|
++#include <linux/linkage.h>
|
|
|
++#include <asm/asm.h>
|
|
|
++
|
|
|
++/* void *memset(void *, int, size_t) */
|
|
|
++ENTRY(memset)
|
|
|
++ move t0, a0 /* Preserve return value */
|
|
|
++
|
|
|
++ /* Defer to byte-oriented fill for small sizes */
|
|
|
++ sltiu a3, a2, 16
|
|
|
++ bnez a3, 4f
|
|
|
++
|
|
|
++ /* Round to nearest XLEN-aligned address
|
|
|
++ greater than or equal to start address */
|
|
|
++ addi a3, t0, SZREG-1
|
|
|
++ andi a3, a3, ~(SZREG-1)
|
|
|
++ beq a3, t0, 2f /* Skip if already aligned */
|
|
|
++ /* Handle initial misalignment */
|
|
|
++ sub a4, a3, t0
|
|
|
++1:
|
|
|
++ sb a1, 0(t0)
|
|
|
++ addi t0, t0, 1
|
|
|
++ bltu t0, a3, 1b
|
|
|
++ sub a2, a2, a4 /* Update count */
|
|
|
++
|
|
|
++2: /* Duff's device with 32 XLEN stores per iteration */
|
|
|
++ /* Broadcast value into all bytes */
|
|
|
++ andi a1, a1, 0xff
|
|
|
++ slli a3, a1, 8
|
|
|
++ or a1, a3, a1
|
|
|
++ slli a3, a1, 16
|
|
|
++ or a1, a3, a1
|
|
|
++#ifdef CONFIG_64BIT
|
|
|
++ slli a3, a1, 32
|
|
|
++ or a1, a3, a1
|
|
|
++#endif
|
|
|
++
|
|
|
++ /* Calculate end address */
|
|
|
++ andi a4, a2, ~(SZREG-1)
|
|
|
++ add a3, t0, a4
|
|
|
++
|
|
|
++ andi a4, a4, 31*SZREG /* Calculate remainder */
|
|
|
++ beqz a4, 3f /* Shortcut if no remainder */
|
|
|
++ neg a4, a4
|
|
|
++ addi a4, a4, 32*SZREG /* Calculate initial offset */
|
|
|
++
|
|
|
++ /* Adjust start address with offset */
|
|
|
++ sub t0, t0, a4
|
|
|
++
|
|
|
++ /* Jump into loop body */
|
|
|
++ /* Assumes 32-bit instruction lengths */
|
|
|
++ la a5, 3f
|
|
|
++#ifdef CONFIG_64BIT
|
|
|
++ srli a4, a4, 1
|
|
|
++#endif
|
|
|
++ add a5, a5, a4
|
|
|
++ jr a5
|
|
|
++3:
|
|
|
++ REG_S a1, 0(t0)
|
|
|
++ REG_S a1, SZREG(t0)
|
|
|
++ REG_S a1, 2*SZREG(t0)
|
|
|
++ REG_S a1, 3*SZREG(t0)
|
|
|
++ REG_S a1, 4*SZREG(t0)
|
|
|
++ REG_S a1, 5*SZREG(t0)
|
|
|
++ REG_S a1, 6*SZREG(t0)
|
|
|
++ REG_S a1, 7*SZREG(t0)
|
|
|
++ REG_S a1, 8*SZREG(t0)
|
|
|
++ REG_S a1, 9*SZREG(t0)
|
|
|
++ REG_S a1, 10*SZREG(t0)
|
|
|
++ REG_S a1, 11*SZREG(t0)
|
|
|
++ REG_S a1, 12*SZREG(t0)
|
|
|
++ REG_S a1, 13*SZREG(t0)
|
|
|
++ REG_S a1, 14*SZREG(t0)
|
|
|
++ REG_S a1, 15*SZREG(t0)
|
|
|
++ REG_S a1, 16*SZREG(t0)
|
|
|
++ REG_S a1, 17*SZREG(t0)
|
|
|
++ REG_S a1, 18*SZREG(t0)
|
|
|
++ REG_S a1, 19*SZREG(t0)
|
|
|
++ REG_S a1, 20*SZREG(t0)
|
|
|
++ REG_S a1, 21*SZREG(t0)
|
|
|
++ REG_S a1, 22*SZREG(t0)
|
|
|
++ REG_S a1, 23*SZREG(t0)
|
|
|
++ REG_S a1, 24*SZREG(t0)
|
|
|
++ REG_S a1, 25*SZREG(t0)
|
|
|
++ REG_S a1, 26*SZREG(t0)
|
|
|
++ REG_S a1, 27*SZREG(t0)
|
|
|
++ REG_S a1, 28*SZREG(t0)
|
|
|
++ REG_S a1, 29*SZREG(t0)
|
|
|
++ REG_S a1, 30*SZREG(t0)
|
|
|
++ REG_S a1, 31*SZREG(t0)
|
|
|
++ addi t0, t0, 32*SZREG
|
|
|
++ bltu t0, a3, 3b
|
|
|
++ andi a2, a2, SZREG-1 /* Update count */
|
|
|
++
|
|
|
++4:
|
|
|
++ /* Handle trailing misalignment */
|
|
|
++ beqz a2, 6f
|
|
|
++ add a3, t0, a2
|
|
|
++5:
|
|
|
++ sb a1, 0(t0)
|
|
|
++ addi t0, t0, 1
|
|
|
++ bltu t0, a3, 5b
|
|
|
++6:
|
|
|
++ ret
|
|
|
++END(memset)
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/lib/uaccess.S linux-4.6.2.riscv/arch/riscv/lib/uaccess.S
|
|
|
+--- linux-4.6.2/arch/riscv/lib/uaccess.S 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/lib/uaccess.S 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1,125 @@
|
|
|
++#include <linux/linkage.h>
|
|
|
++#include <asm/asm.h>
|
|
|
++#include <asm/csr.h>
|
|
|
++
|
|
|
++ .altmacro
|
|
|
++ .macro fixup op reg addr lbl
|
|
|
++ LOCAL _epc
|
|
|
++_epc:
|
|
|
++ \op \reg, \addr
|
|
|
++ .section __ex_table,"a"
|
|
|
++ .balign SZPTR
|
|
|
++ PTR _epc, \lbl
|
|
|
++ .previous
|
|
|
++ .endm
|
|
|
++
|
|
|
++ENTRY(__copy_user)
|
|
|
++
|
|
|
++#ifdef CONFIG_RV_PUM
|
|
|
++ /* Enable access to user memory */
|
|
|
++ li t6, SR_PUM
|
|
|
++ csrc sstatus, t6
|
|
|
++#endif
|
|
|
++
|
|
|
++ add a3, a1, a2
|
|
|
++ /* Use word-oriented copy only if low-order bits match */
|
|
|
++ andi t0, a0, SZREG-1
|
|
|
++ andi t1, a1, SZREG-1
|
|
|
++ bne t0, t1, 2f
|
|
|
++
|
|
|
++ addi t0, a1, SZREG-1
|
|
|
++ andi t1, a3, ~(SZREG-1)
|
|
|
++ andi t0, t0, ~(SZREG-1)
|
|
|
++ /* a3: terminal address of source region
|
|
|
++ * t0: lowest XLEN-aligned address in source
|
|
|
++ * t1: highest XLEN-aligned address in source
|
|
|
++ */
|
|
|
++ bgeu t0, t1, 2f
|
|
|
++ bltu a1, t0, 4f
|
|
|
++1:
|
|
|
++ fixup REG_L, t2, (a1), 10f
|
|
|
++ fixup REG_S, t2, (a0), 10f
|
|
|
++ addi a1, a1, SZREG
|
|
|
++ addi a0, a0, SZREG
|
|
|
++ bltu a1, t1, 1b
|
|
|
++2:
|
|
|
++ bltu a1, a3, 5f
|
|
|
++
|
|
|
++3:
|
|
|
++#ifdef CONFIG_RV_PUM
|
|
|
++ /* Disable access to user memory */
|
|
|
++ csrs sstatus, t6
|
|
|
++#endif
|
|
|
++ li a0, 0
|
|
|
++ ret
|
|
|
++4: /* Edge case: unalignment */
|
|
|
++ fixup lbu, t2, (a1), 10f
|
|
|
++ fixup sb, t2, (a0), 10f
|
|
|
++ addi a1, a1, 1
|
|
|
++ addi a0, a0, 1
|
|
|
++ bltu a1, t0, 4b
|
|
|
++ j 1b
|
|
|
++5: /* Edge case: remainder */
|
|
|
++ fixup lbu, t2, (a1), 10f
|
|
|
++ fixup sb, t2, (a0), 10f
|
|
|
++ addi a1, a1, 1
|
|
|
++ addi a0, a0, 1
|
|
|
++ bltu a1, a3, 5b
|
|
|
++ j 3b
|
|
|
++ENDPROC(__copy_user)
|
|
|
++
|
|
|
++
|
|
|
++ENTRY(__clear_user)
|
|
|
++
|
|
|
++#ifdef CONFIG_RV_PUM
|
|
|
++ /* Enable access to user memory */
|
|
|
++ li t6, SR_PUM
|
|
|
++ csrc sstatus, t6
|
|
|
++#endif
|
|
|
++
|
|
|
++ add a3, a0, a1
|
|
|
++ addi t0, a0, SZREG-1
|
|
|
++ andi t1, a3, ~(SZREG-1)
|
|
|
++ andi t0, t0, ~(SZREG-1)
|
|
|
++ /* a3: terminal address of target region
|
|
|
++ * t0: lowest doubleword-aligned address in target region
|
|
|
++ * t1: highest doubleword-aligned address in target region
|
|
|
++ */
|
|
|
++ bgeu t0, t1, 2f
|
|
|
++ bltu a0, t0, 4f
|
|
|
++1:
|
|
|
++ fixup REG_S, zero, (a0), 10f
|
|
|
++ addi a0, a0, SZREG
|
|
|
++ bltu a0, t1, 1b
|
|
|
++2:
|
|
|
++ bltu a0, a3, 5f
|
|
|
++
|
|
|
++3:
|
|
|
++#ifdef CONFIG_RV_PUM
|
|
|
++ /* Disable access to user memory */
|
|
|
++ csrs sstatus, t6
|
|
|
++#endif
|
|
|
++ li a0, 0
|
|
|
++ ret
|
|
|
++4: /* Edge case: unalignment */
|
|
|
++ fixup sb, zero, (a0), 10f
|
|
|
++ addi a0, a0, 1
|
|
|
++ bltu a0, t0, 4b
|
|
|
++ j 1b
|
|
|
++5: /* Edge case: remainder */
|
|
|
++ fixup sb, zero, (a0), 10f
|
|
|
++ addi a0, a0, 1
|
|
|
++ bltu a0, a3, 5b
|
|
|
++ j 3b
|
|
|
++ENDPROC(__clear_user)
|
|
|
++
|
|
|
++ .section .fixup,"ax"
|
|
|
++ .balign 4
|
|
|
++10:
|
|
|
++#ifdef CONFIG_RV_PUM
|
|
|
++ /* Disable access to user memory */
|
|
|
++ csrs sstatus, t6
|
|
|
++#endif
|
|
|
++ sub a0, a3, a0
|
|
|
++ ret
|
|
|
++ .previous
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/Makefile linux-4.6.2.riscv/arch/riscv/Makefile
|
|
|
+--- linux-4.6.2/arch/riscv/Makefile 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/Makefile 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -0,0 +1,70 @@
|
|
|
++# BK Id: %F% %I% %G% %U% %#%
|
|
|
++#
|
|
|
++# This file is included by the global makefile so that you can add your own
|
|
|
++# architecture-specific flags and dependencies. Remember to do have actions
|
|
|
++# for "archclean" and "archdep" for cleaning up and making dependencies for
|
|
|
++# this architecture
|
|
|
++#
|
|
|
++# This file is subject to the terms and conditions of the GNU General Public
|
|
|
++# License. See the file "COPYING" in the main directory of this archive
|
|
|
++# for more details.
|
|
|
++#
|
|
|
++# Copyright (C) 1994 by Linus Torvalds
|
|
|
++# Modifications for the RISC-V architecture:
|
|
|
++# Quan Nguyen <quannguyen@eecs.berkeley.edu>
|
|
|
++# Albert Ou <a_ou@eecs.berkeley.edu>
|
|
|
++#
|
|
|
++# Based on:
|
|
|
++# arch/score/Makefile
|
|
|
++
|
|
|
++LDFLAGS :=
|
|
|
++OBJCOPYFLAGS := -O binary
|
|
|
++LDFLAGS_vmlinux :=
|
|
|
++
|
|
|
++ifeq ($(ARCH),riscv)
|
|
|
++ KBUILD_DEFCONFIG = riscv64_spike
|
|
|
++else
|
|
|
++ KBUILD_DEFCONFIG = $(ARCH)_spike
|
|
|
++endif
|
|
|
++
|
|
|
++export BITS
|
|
|
++ifeq ($(CONFIG_64BIT),y)
|
|
|
++ BITS := 64
|
|
|
++ UTS_MACHINE := riscv64
|
|
|
++
|
|
|
++ KBUILD_CFLAGS += -mabi=lp64
|
|
|
++ KBUILD_AFLAGS += -mabi=lp64
|
|
|
++ KBUILD_MARCH = rv64im
|
|
|
++ LDFLAGS += -melf64lriscv
|
|
|
++else
|
|
|
++ BITS := 32
|
|
|
++ UTS_MACHINE := riscv32
|
|
|
++
|
|
|
++ KBUILD_CFLAGS += -mabi=ilp32
|
|
|
++ KBUILD_AFLAGS += -mabi=ilp32
|
|
|
++ KBUILD_MARCH = rv32im
|
|
|
++ LDFLAGS += -melf32lriscv
|
|
|
++endif
|
|
|
++
|
|
|
++ifeq ($(CONFIG_RV_ATOMIC),y)
|
|
|
++ KBUILD_RV_ATOMIC = a
|
|
|
++endif
|
|
|
++
|
|
|
++KBUILD_CFLAGS += -Wall
|
|
|
++
|
|
|
++ifeq ($(CONFIG_RVC),y)
|
|
|
++ KBUILD_RVC = c
|
|
|
++endif
|
|
|
++
|
|
|
++KBUILD_AFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_RV_ATOMIC)fd$(KBUILD_RVC)
|
|
|
++
|
|
|
++KBUILD_CFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_RV_ATOMIC)$(KBUILD_RVC)
|
|
|
++KBUILD_CFLAGS += -mno-save-restore
|
|
|
++
|
|
|
++head-y := arch/riscv/kernel/head.o
|
|
|
++
|
|
|
++core-y += arch/riscv/kernel/ arch/riscv/mm/
|
|
|
++
|
|
|
++libs-y += arch/riscv/lib/
|
|
|
++
|
|
|
++all: vmlinux
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/mm/extable.c linux-4.6.2.riscv/arch/riscv/mm/extable.c
|
|
|
+--- linux-4.6.2/arch/riscv/mm/extable.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/mm/extable.c 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1,14 @@
|
|
|
++#include <linux/module.h>
|
|
|
++#include <linux/uaccess.h>
|
|
|
++
|
|
|
++int fixup_exception(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ const struct exception_table_entry *fixup;
|
|
|
++
|
|
|
++ fixup = search_exception_tables(regs->sepc);
|
|
|
++ if (fixup) {
|
|
|
++ regs->sepc = fixup->fixup;
|
|
|
++ return 1;
|
|
|
++ }
|
|
|
++ return 0;
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/mm/fault.c linux-4.6.2.riscv/arch/riscv/mm/fault.c
|
|
|
+--- linux-4.6.2/arch/riscv/mm/fault.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/mm/fault.c 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1,250 @@
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/kernel.h>
|
|
|
++#include <linux/interrupt.h>
|
|
|
++#include <linux/perf_event.h>
|
|
|
++#include <linux/signal.h>
|
|
|
++#include <linux/uaccess.h>
|
|
|
++
|
|
|
++#include <asm/pgalloc.h>
|
|
|
++#include <asm/ptrace.h>
|
|
|
++#include <asm/uaccess.h>
|
|
|
++
|
|
|
++/*
|
|
|
++ * This routine handles page faults. It determines the address and the
|
|
|
++ * problem, and then passes it off to one of the appropriate routines.
|
|
|
++ */
|
|
|
++asmlinkage void do_page_fault(struct pt_regs *regs)
|
|
|
++{
|
|
|
++ struct task_struct *tsk;
|
|
|
++ struct vm_area_struct *vma;
|
|
|
++ struct mm_struct *mm;
|
|
|
++ unsigned long addr, cause;
|
|
|
++ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
|
|
++ int fault, code = SEGV_MAPERR;
|
|
|
++
|
|
|
++ cause = regs->scause;
|
|
|
++ addr = regs->sbadaddr;
|
|
|
++
|
|
|
++ tsk = current;
|
|
|
++ mm = tsk->mm;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Fault-in kernel-space virtual memory on-demand.
|
|
|
++ * The 'reference' page table is init_mm.pgd.
|
|
|
++ *
|
|
|
++ * NOTE! We MUST NOT take any locks for this case. We may
|
|
|
++ * be in an interrupt or a critical region, and should
|
|
|
++ * only copy the information from the master page table,
|
|
|
++ * nothing more.
|
|
|
++ */
|
|
|
++ if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
|
|
|
++ goto vmalloc_fault;
|
|
|
++
|
|
|
++ /* Enable interrupts if they were enabled in the parent context. */
|
|
|
++ if (likely(regs->sstatus & SR_PIE))
|
|
|
++ local_irq_enable();
|
|
|
++
|
|
|
++ /*
|
|
|
++ * If we're in an interrupt, have no user context, or are running
|
|
|
++ * in an atomic region, then we must not take the fault.
|
|
|
++ */
|
|
|
++ if (unlikely(faulthandler_disabled() || !mm))
|
|
|
++ goto no_context;
|
|
|
++
|
|
|
++ if (user_mode(regs))
|
|
|
++ flags |= FAULT_FLAG_USER;
|
|
|
++
|
|
|
++ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
|
|
++
|
|
|
++retry:
|
|
|
++ down_read(&mm->mmap_sem);
|
|
|
++ vma = find_vma(mm, addr);
|
|
|
++ if (unlikely(!vma))
|
|
|
++ goto bad_area;
|
|
|
++ if (likely(vma->vm_start <= addr))
|
|
|
++ goto good_area;
|
|
|
++ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
|
|
|
++ goto bad_area;
|
|
|
++ if (unlikely(expand_stack(vma, addr)))
|
|
|
++ goto bad_area;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Ok, we have a good vm_area for this memory access, so
|
|
|
++ * we can handle it.
|
|
|
++ */
|
|
|
++good_area:
|
|
|
++ code = SEGV_ACCERR;
|
|
|
++
|
|
|
++ switch (cause) {
|
|
|
++ case EXC_INST_ACCESS:
|
|
|
++ if (!(vma->vm_flags & VM_EXEC))
|
|
|
++ goto bad_area;
|
|
|
++ break;
|
|
|
++ case EXC_LOAD_ACCESS:
|
|
|
++ if (!(vma->vm_flags & VM_READ))
|
|
|
++ goto bad_area;
|
|
|
++ break;
|
|
|
++ case EXC_STORE_ACCESS:
|
|
|
++ if (!(vma->vm_flags & VM_WRITE))
|
|
|
++ goto bad_area;
|
|
|
++ flags |= FAULT_FLAG_WRITE;
|
|
|
++ break;
|
|
|
++ default:
|
|
|
++ panic("%s: unhandled cause %lu", __func__, cause);
|
|
|
++ }
|
|
|
++
|
|
|
++ /*
|
|
|
++ * If for any reason at all we could not handle the fault,
|
|
|
++ * make sure we exit gracefully rather than endlessly redo
|
|
|
++ * the fault.
|
|
|
++ */
|
|
|
++ fault = handle_mm_fault(mm, vma, addr, flags);
|
|
|
++
|
|
|
++ /*
|
|
|
++ * If we need to retry but a fatal signal is pending, handle the
|
|
|
++ * signal first. We do not need to release the mmap_sem because it
|
|
|
++ * would already be released in __lock_page_or_retry in mm/filemap.c.
|
|
|
++ */
|
|
|
++ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
|
|
|
++ return;
|
|
|
++
|
|
|
++ if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
|
++ if (fault & VM_FAULT_OOM)
|
|
|
++ goto out_of_memory;
|
|
|
++ else if (fault & VM_FAULT_SIGBUS)
|
|
|
++ goto do_sigbus;
|
|
|
++ BUG();
|
|
|
++ }
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Major/minor page fault accounting is only done on the
|
|
|
++ * initial attempt. If we go through a retry, it is extremely
|
|
|
++ * likely that the page will be found in page cache at that point.
|
|
|
++ */
|
|
|
++ if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
|
|
++ if (fault & VM_FAULT_MAJOR) {
|
|
|
++ tsk->maj_flt++;
|
|
|
++ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
|
|
|
++ } else {
|
|
|
++ tsk->min_flt++;
|
|
|
++ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
|
|
|
++ }
|
|
|
++ if (fault & VM_FAULT_RETRY) {
|
|
|
++ /*
|
|
|
++ * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
|
|
|
++ * of starvation.
|
|
|
++ */
|
|
|
++ flags &= ~(FAULT_FLAG_ALLOW_RETRY);
|
|
|
++ flags |= FAULT_FLAG_TRIED;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * No need to up_read(&mm->mmap_sem) as we would
|
|
|
++ * have already released it in __lock_page_or_retry
|
|
|
++ * in mm/filemap.c.
|
|
|
++ */
|
|
|
++ goto retry;
|
|
|
++ }
|
|
|
++ }
|
|
|
++
|
|
|
++ up_read(&mm->mmap_sem);
|
|
|
++ return;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Something tried to access memory that isn't in our memory map.
|
|
|
++ * Fix it, but check if it's kernel or user first.
|
|
|
++ */
|
|
|
++bad_area:
|
|
|
++ up_read(&mm->mmap_sem);
|
|
|
++ /* User mode accesses just cause a SIGSEGV */
|
|
|
++ if (user_mode(regs)) {
|
|
|
++ do_trap(regs, SIGSEGV, code, addr, tsk);
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++no_context:
|
|
|
++ /* Are we prepared to handle this kernel fault? */
|
|
|
++ if (fixup_exception(regs)) {
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Oops. The kernel tried to access some bad page. We'll have to
|
|
|
++ * terminate things with extreme prejudice.
|
|
|
++ */
|
|
|
++ bust_spinlocks(1);
|
|
|
++ pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
|
|
|
++ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
|
|
|
++ "paging request", addr);
|
|
|
++ die(regs, "Oops");
|
|
|
++ do_exit(SIGKILL);
|
|
|
++
|
|
|
++ /*
|
|
|
++ * We ran out of memory, call the OOM killer, and return the userspace
|
|
|
++ * (which will retry the fault, or kill us if we got oom-killed).
|
|
|
++ */
|
|
|
++out_of_memory:
|
|
|
++ up_read(&mm->mmap_sem);
|
|
|
++ if (!user_mode(regs))
|
|
|
++ goto no_context;
|
|
|
++ pagefault_out_of_memory();
|
|
|
++ return;
|
|
|
++
|
|
|
++do_sigbus:
|
|
|
++ up_read(&mm->mmap_sem);
|
|
|
++ /* Kernel mode? Handle exceptions or die */
|
|
|
++ if (!user_mode(regs))
|
|
|
++ goto no_context;
|
|
|
++ do_trap(regs, SIGBUS, BUS_ADRERR, addr, tsk);
|
|
|
++ return;
|
|
|
++
|
|
|
++vmalloc_fault:
|
|
|
++ {
|
|
|
++ pgd_t *pgd, *pgd_k;
|
|
|
++ pud_t *pud, *pud_k;
|
|
|
++ pmd_t *pmd, *pmd_k;
|
|
|
++ pte_t *pte_k;
|
|
|
++ int index;
|
|
|
++
|
|
|
++ if (user_mode(regs))
|
|
|
++ goto bad_area;
|
|
|
++
|
|
|
++ /*
|
|
|
++ * Synchronize this task's top level page-table
|
|
|
++ * with the 'reference' page table.
|
|
|
++ *
|
|
|
++ * Do _not_ use "tsk->active_mm->pgd" here.
|
|
|
++ * We might be inside an interrupt in the middle
|
|
|
++ * of a task switch.
|
|
|
++ */
|
|
|
++ index = pgd_index(addr);
|
|
|
++ pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
|
|
|
++ pgd_k = init_mm.pgd + index;
|
|
|
++
|
|
|
++ if (!pgd_present(*pgd_k))
|
|
|
++ goto no_context;
|
|
|
++ set_pgd(pgd, *pgd_k);
|
|
|
++
|
|
|
++ pud = pud_offset(pgd, addr);
|
|
|
++ pud_k = pud_offset(pgd_k, addr);
|
|
|
++ if (!pud_present(*pud_k))
|
|
|
++ goto no_context;
|
|
|
++
|
|
|
++ /* Since the vmalloc area is global, it is unnecessary
|
|
|
++ to copy individual PTEs */
|
|
|
++ pmd = pmd_offset(pud, addr);
|
|
|
++ pmd_k = pmd_offset(pud_k, addr);
|
|
|
++ if (!pmd_present(*pmd_k))
|
|
|
++ goto no_context;
|
|
|
++ set_pmd(pmd, *pmd_k);
|
|
|
++
|
|
|
++ /* Make sure the actual PTE exists as well to
|
|
|
++ * catch kernel vmalloc-area accesses to non-mapped
|
|
|
++ * addresses. If we don't do this, this will just
|
|
|
++ * silently loop forever.
|
|
|
++ */
|
|
|
++ pte_k = pte_offset_kernel(pmd_k, addr);
|
|
|
++ if (!pte_present(*pte_k))
|
|
|
++ goto no_context;
|
|
|
++ return;
|
|
|
++ }
|
|
|
++}
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/mm/init.c linux-4.6.2.riscv/arch/riscv/mm/init.c
|
|
|
+--- linux-4.6.2/arch/riscv/mm/init.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/mm/init.c 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1,84 @@
|
|
|
++#include <linux/init.h>
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/bootmem.h>
|
|
|
++#include <linux/initrd.h>
|
|
|
++#include <linux/memblock.h>
|
|
|
++#include <linux/swap.h>
|
|
|
++
|
|
|
++#include <asm/tlbflush.h>
|
|
|
++#include <asm/sections.h>
|
|
|
++#include <asm/pgtable.h>
|
|
|
++#include <asm/io.h>
|
|
|
++
|
|
|
++#ifdef CONFIG_NUMA
|
|
|
++static void __init zone_sizes_init(void)
|
|
|
++{
|
|
|
++ unsigned long zones_size[MAX_NR_ZONES];
|
|
|
++ int nid;
|
|
|
++
|
|
|
++ memset(zones_size, 0, sizeof(zones_size));
|
|
|
++
|
|
|
++ for_each_online_node(nid) {
|
|
|
++ pg_data_t *pgdat;
|
|
|
++ unsigned long start_pfn, end_pfn;
|
|
|
++
|
|
|
++ pgdat = NODE_DATA(nid);
|
|
|
++ start_pfn = pgdat->bdata->node_min_pfn;
|
|
|
++ end_pfn = pgdat->bdata->node_low_pfn;
|
|
|
++ memblock_add_node(start_pfn,
|
|
|
++ PFN_PHYS(end_pfn - start_pfn), nid);
|
|
|
++ }
|
|
|
++
|
|
|
++ zones_size[ZONE_NORMAL] = pfn_base + max_mapnr;
|
|
|
++ free_area_init_nodes(zones_size);
|
|
|
++}
|
|
|
++#else
|
|
|
++static void __init zone_sizes_init(void)
|
|
|
++{
|
|
|
++ unsigned long zones_size[MAX_NR_ZONES];
|
|
|
++
|
|
|
++ memset(zones_size, 0, sizeof(zones_size));
|
|
|
++ memblock_add_node(PFN_PHYS(pfn_base), PFN_PHYS(max_mapnr), 0);
|
|
|
++ zones_size[ZONE_NORMAL] = pfn_base + max_mapnr;
|
|
|
++ free_area_init_nodes(zones_size);
|
|
|
++}
|
|
|
++#endif /* CONFIG_NUMA */
|
|
|
++
|
|
|
++void setup_zero_page(void)
|
|
|
++{
|
|
|
++ memset((void *)empty_zero_page, 0, PAGE_SIZE);
|
|
|
++}
|
|
|
++
|
|
|
++void __init paging_init(void)
|
|
|
++{
|
|
|
++ init_mm.pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr));
|
|
|
++
|
|
|
++ setup_zero_page();
|
|
|
++ local_flush_tlb_all();
|
|
|
++ zone_sizes_init();
|
|
|
++}
|
|
|
++
|
|
|
++void __init mem_init(void)
|
|
|
++{
|
|
|
++#ifdef CONFIG_FLATMEM
|
|
|
++ BUG_ON(!mem_map);
|
|
|
++#endif /* CONFIG_FLATMEM */
|
|
|
++
|
|
|
++ high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
|
|
|
++ free_all_bootmem();
|
|
|
++
|
|
|
++ mem_init_print_info(NULL);
|
|
|
++}
|
|
|
++
|
|
|
++void free_initmem(void)
|
|
|
++{
|
|
|
++ free_initmem_default(0);
|
|
|
++}
|
|
|
++
|
|
|
++#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
++void free_initrd_mem(unsigned long start, unsigned long end)
|
|
|
++{
|
|
|
++// free_reserved_area(start, end, 0, "initrd");
|
|
|
++}
|
|
|
++#endif /* CONFIG_BLK_DEV_INITRD */
|
|
|
++
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/mm/ioremap.c linux-4.6.2.riscv/arch/riscv/mm/ioremap.c
|
|
|
+--- linux-4.6.2/arch/riscv/mm/ioremap.c 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/mm/ioremap.c 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1,81 @@
|
|
|
++#include <linux/export.h>
|
|
|
++#include <linux/mm.h>
|
|
|
++#include <linux/vmalloc.h>
|
|
|
++#include <linux/io.h>
|
|
|
++
|
|
|
++#include <asm/pgtable.h>
|
|
|
++
|
|
|
++/*
|
|
|
++ * Remap an arbitrary physical address space into the kernel virtual
|
|
|
++ * address space. Needed when the kernel wants to access high addresses
|
|
|
++ * directly.
|
|
|
++ *
|
|
|
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
|
|
++ * have to convert them into an offset in a page-aligned mapping, but the
|
|
|
++ * caller shouldn't need to know that small detail.
|
|
|
++ */
|
|
|
++static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
|
|
|
++ pgprot_t prot, void *caller)
|
|
|
++{
|
|
|
++ phys_addr_t last_addr;
|
|
|
++ unsigned long offset, vaddr;
|
|
|
++ struct vm_struct *area;
|
|
|
++
|
|
|
++ /* Disallow wrap-around or zero size */
|
|
|
++ last_addr = addr + size - 1;
|
|
|
++ if (!size || last_addr < addr) {
|
|
|
++ return NULL;
|
|
|
++ }
|
|
|
++
|
|
|
++ /* Page-align mappings */
|
|
|
++ offset = addr & (~PAGE_MASK);
|
|
|
++ addr &= PAGE_MASK;
|
|
|
++ size = PAGE_ALIGN(size + offset);
|
|
|
++
|
|
|
++ area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
|
|
++ if (!area) {
|
|
|
++ return NULL;
|
|
|
++ }
|
|
|
++ vaddr = (unsigned long)area->addr;
|
|
|
++
|
|
|
++ if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
|
|
|
++ free_vm_area(area);
|
|
|
++ return NULL;
|
|
|
++ }
|
|
|
++
|
|
|
++ return (void __iomem *)(vaddr + offset);
|
|
|
++}
|
|
|
++
|
|
|
++/*
|
|
|
++ * ioremap - map bus memory into CPU space
|
|
|
++ * @offset: bus address of the memory
|
|
|
++ * @size: size of the resource to map
|
|
|
++ *
|
|
|
++ * ioremap performs a platform specific sequence of operations to
|
|
|
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
|
|
|
++ * writew/writel functions and the other mmio helpers. The returned
|
|
|
++ * address is not guaranteed to be usable directly as a virtual
|
|
|
++ * address.
|
|
|
++ *
|
|
|
++ * Must be freed with iounmap.
|
|
|
++ */
|
|
|
++void __iomem *ioremap(phys_addr_t offset, unsigned long size)
|
|
|
++{
|
|
|
++ return __ioremap_caller(offset, size, PAGE_KERNEL,
|
|
|
++ __builtin_return_address(0));
|
|
|
++}
|
|
|
++EXPORT_SYMBOL(ioremap);
|
|
|
++
|
|
|
++
|
|
|
++/**
|
|
|
++ * iounmap - Free a IO remapping
|
|
|
++ * @addr: virtual address from ioremap_*
|
|
|
++ *
|
|
|
++ * Caller must ensure there is only one unmapping for the same pointer.
|
|
|
++ */
|
|
|
++void iounmap(void __iomem *addr)
|
|
|
++{
|
|
|
++ vunmap((void *)((unsigned long)addr & PAGE_MASK));
|
|
|
++}
|
|
|
++EXPORT_SYMBOL(iounmap);
|
|
|
++
|
|
|
+diff -Nur linux-4.6.2/arch/riscv/mm/Makefile linux-4.6.2.riscv/arch/riscv/mm/Makefile
|
|
|
+--- linux-4.6.2/arch/riscv/mm/Makefile 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/arch/riscv/mm/Makefile 2017-03-04 02:48:34.170888098 +0100
|
|
|
+@@ -0,0 +1 @@
|
|
|
++obj-y := init.o fault.o extable.o ioremap.o
|
|
|
+diff -Nur linux-4.6.2/.gitignore linux-4.6.2.riscv/.gitignore
|
|
|
+--- linux-4.6.2/.gitignore 2016-06-08 03:23:53.000000000 +0200
|
|
|
++++ linux-4.6.2.riscv/.gitignore 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -1,114 +1,11 @@
|
|
|
+-#
|
|
|
+-# NOTE! Don't add files that are generated in specific
|
|
|
+-# subdirectories here. Add them in the ".gitignore" file
|
|
|
+-# in that subdirectory instead.
|
|
|
+-#
|
|
|
+-# NOTE! Please use 'git ls-files -i --exclude-standard'
|
|
|
+-# command after changing this file, to see if there are
|
|
|
+-# any tracked files which get ignored after the change.
|
|
|
+-#
|
|
|
+-# Normal rules
|
|
|
+-#
|
|
|
+-.*
|
|
|
+-*.o
|
|
|
+-*.o.*
|
|
|
+-*.a
|
|
|
+-*.s
|
|
|
+-*.ko
|
|
|
+-*.so
|
|
|
+-*.so.dbg
|
|
|
+-*.mod.c
|
|
|
+-*.i
|
|
|
+-*.lst
|
|
|
+-*.symtypes
|
|
|
+-*.order
|
|
|
+-*.elf
|
|
|
+-*.bin
|
|
|
+-*.tar
|
|
|
+-*.gz
|
|
|
+-*.bz2
|
|
|
+-*.lzma
|
|
|
+-*.xz
|
|
|
+-*.lz4
|
|
|
+-*.lzo
|
|
|
+-*.patch
|
|
|
+-*.gcno
|
|
|
+-modules.builtin
|
|
|
+-Module.symvers
|
|
|
+-*.dwo
|
|
|
+-*.su
|
|
|
++# Ignore everything...
|
|
|
++*
|
|
|
+
|
|
|
+-#
|
|
|
+-# Top-level generic files
|
|
|
+-#
|
|
|
+-/tags
|
|
|
+-/TAGS
|
|
|
+-/linux
|
|
|
+-/vmlinux
|
|
|
+-/vmlinux.32
|
|
|
+-/vmlinux-gdb.py
|
|
|
+-/vmlinuz
|
|
|
+-/System.map
|
|
|
+-/Module.markers
|
|
|
+-
|
|
|
+-#
|
|
|
+-# Debian directory (make deb-pkg)
|
|
|
+-#
|
|
|
+-/debian/
|
|
|
+-
|
|
|
+-#
|
|
|
+-# tar directory (make tar*-pkg)
|
|
|
+-#
|
|
|
+-/tar-install/
|
|
|
+-
|
|
|
+-#
|
|
|
+-# git files that we don't want to ignore even it they are dot-files
|
|
|
+-#
|
|
|
++# Then include just this.
|
|
|
++!arch/
|
|
|
+ !.gitignore
|
|
|
+-!.mailmap
|
|
|
+-
|
|
|
+-#
|
|
|
+-# Generated include files
|
|
|
+-#
|
|
|
+-include/config
|
|
|
+-include/generated
|
|
|
+-arch/*/include/generated
|
|
|
+-
|
|
|
+-# stgit generated dirs
|
|
|
+-patches-*
|
|
|
+-
|
|
|
+-# quilt's files
|
|
|
+-patches
|
|
|
+-series
|
|
|
+-
|
|
|
+-# cscope files
|
|
|
+-cscope.*
|
|
|
+-ncscope.*
|
|
|
+-
|
|
|
+-# gnu global files
|
|
|
+-GPATH
|
|
|
+-GRTAGS
|
|
|
+-GSYMS
|
|
|
+-GTAGS
|
|
|
+-
|
|
|
+-# id-utils files
|
|
|
+-ID
|
|
|
+-
|
|
|
+-*.orig
|
|
|
+-*~
|
|
|
+-\#*#
|
|
|
+-
|
|
|
+-#
|
|
|
+-# Leavings from module signing
|
|
|
+-#
|
|
|
+-extra_certificates
|
|
|
+-signing_key.pem
|
|
|
+-signing_key.priv
|
|
|
+-signing_key.x509
|
|
|
+-x509.genkey
|
|
|
++!README.md
|
|
|
+
|
|
|
+-# Kconfig presets
|
|
|
+-all.config
|
|
|
++# The arch/.gitignore and arch/riscv/.gitignore have patterns to un-ignore the
|
|
|
++# appropriate files.
|
|
|
+
|
|
|
+-# Kdevelop4
|
|
|
+-*.kdev4
|
|
|
+diff -Nur linux-4.6.2/README.md linux-4.6.2.riscv/README.md
|
|
|
+--- linux-4.6.2/README.md 1970-01-01 01:00:00.000000000 +0100
|
|
|
++++ linux-4.6.2.riscv/README.md 2017-03-04 02:48:34.162887952 +0100
|
|
|
+@@ -0,0 +1,72 @@
|
|
|
++# Linux/RISC-V
|
|
|
++
|
|
|
++This is a port of Linux kernel for the [RISC-V](http://riscv.org/)
|
|
|
++instruction set architecture.
|
|
|
++Development is currently based on the
|
|
|
++[4.6 longterm branch](https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/log/?h=linux-4.6.y).
|
|
|
++
|
|
|
++
|
|
|
++## Obtaining kernel sources
|
|
|
++
|
|
|
++### Master
|
|
|
++
|
|
|
++Overlay the `riscv` architecture-specific subtree onto an upstream release:
|
|
|
++
|
|
|
++ $ curl -L https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.6.2.tar.xz | tar -xJ
|
|
|
++ $ cd linux-4.6.2
|
|
|
++ $ git init
|
|
|
++ $ git remote add -t master origin https://github.com/riscv/riscv-linux.git
|
|
|
++ $ git fetch
|
|
|
++ $ git checkout -f -t origin/master
|
|
|
++
|
|
|
++Note that the `-t <branch>` option minimizes the history fetched.
|
|
|
++To add another branch:
|
|
|
++
|
|
|
++ $ git remote set-branches --add origin <branch>
|
|
|
++ $ git fetch
|
|
|
++
|
|
|
++### Full kernel source trees
|
|
|
++
|
|
|
++For convenience, full kernel source trees are maintained on separate
|
|
|
++branches tracking
|
|
|
++[linux-stable](https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git):
|
|
|
++
|
|
|
++* `linux-4.6.y-riscv`
|
|
|
++* `linux-3.14.y-riscv` (historical)
|
|
|
++
|
|
|
++## Building the kernel image
|
|
|
++
|
|
|
++1. Create kernel configuration based on architecture defaults:
|
|
|
++
|
|
|
++ $ make ARCH=riscv defconfig
|
|
|
++
|
|
|
++1. Optionally edit the configuration via an ncurses interface:
|
|
|
++
|
|
|
++ $ make ARCH=riscv menuconfig
|
|
|
++
|
|
|
++1. Build the uncompressed kernel image:
|
|
|
++
|
|
|
++ $ make -j4 ARCH=riscv vmlinux
|
|
|
++
|
|
|
++1. Boot the kernel in the functional simulator, optionally specifying a
|
|
|
++ raw disk image for the root filesystem:
|
|
|
++
|
|
|
++ $ spike +disk=path/to/root.img bbl vmlinux
|
|
|
++
|
|
|
++ `bbl` (the Berkeley Boot Loader) is available from the
|
|
|
++ [riscv-pk](https://github.com/riscv/riscv-pk) repository.
|
|
|
++
|
|
|
++## Exporting kernel headers
|
|
|
++
|
|
|
++The `riscv-gnu-toolchain` repository includes a copy of the kernel header files.
|
|
|
++If the userspace API has changed, export the updated headers to the
|
|
|
++`riscv-gnu-toolchain` source directory:
|
|
|
++
|
|
|
++ $ make ARCH=riscv headers_check
|
|
|
++ $ make ARCH=riscv INSTALL_HDR_PATH=path/to/riscv-gnu-toolchain/linux-headers headers_install
|
|
|
++
|
|
|
++Rebuild `riscv64-unknown-linux-gnu-gcc` with the `linux` target:
|
|
|
++
|
|
|
++ $ cd path/to/riscv-gnu-toolchain
|
|
|
++ $ make linux
|
|
|
++
|