startup.patch 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265
  1. diff -Nur linux-5.4.69.orig/init/initramfs.c linux-5.4.69/init/initramfs.c
  2. --- linux-5.4.69.orig/init/initramfs.c 2020-10-01 13:19:27.000000000 +0200
  3. +++ linux-5.4.69/init/initramfs.c 2020-10-31 02:34:40.814412219 +0100
  4. @@ -674,6 +674,9 @@
  5. initrd_start = 0;
  6. initrd_end = 0;
  7. +#ifdef CONFIG_DEVTMPFS_MOUNT
  8. + devtmpfs_mount("dev");
  9. +#endif
  10. flush_delayed_fput();
  11. return 0;
  12. }
  13. diff -Nur linux-5.4.69.orig/init/main.c linux-5.4.69/init/main.c
  14. --- linux-5.4.69.orig/init/main.c 2020-10-01 13:19:27.000000000 +0200
  15. +++ linux-5.4.69/init/main.c 2020-10-31 02:32:35.699678836 +0100
  16. @@ -1198,6 +1198,8 @@
  17. if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
  18. pr_err("Warning: unable to open an initial console.\n");
  19. + printk(KERN_WARNING "Starting Linux (built with OpenADK).\n");
  20. +
  21. (void) ksys_dup(0);
  22. (void) ksys_dup(0);
  23. /*
  24. @@ -1206,7 +1208,7 @@
  25. */
  26. if (!ramdisk_execute_command)
  27. - ramdisk_execute_command = "/init";
  28. + ramdisk_execute_command = "/sbin/init";
  29. if (ksys_access((const char __user *)
  30. ramdisk_execute_command, 0) != 0) {
  31. diff -Nur linux-5.4.69.orig/init/main.c.orig linux-5.4.69/init/main.c.orig
  32. --- linux-5.4.69.orig/init/main.c.orig 1970-01-01 01:00:00.000000000 +0100
  33. +++ linux-5.4.69/init/main.c.orig 2020-10-01 13:19:27.000000000 +0200
  34. @@ -0,0 +1,1227 @@
  35. +// SPDX-License-Identifier: GPL-2.0-only
  36. +/*
  37. + * linux/init/main.c
  38. + *
  39. + * Copyright (C) 1991, 1992 Linus Torvalds
  40. + *
  41. + * GK 2/5/95 - Changed to support mounting root fs via NFS
  42. + * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
  43. + * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
  44. + * Simplified starting of init: Michael A. Griffith <grif@acm.org>
  45. + */
  46. +
  47. +#define DEBUG /* Enable initcall_debug */
  48. +
  49. +#include <linux/types.h>
  50. +#include <linux/extable.h>
  51. +#include <linux/module.h>
  52. +#include <linux/proc_fs.h>
  53. +#include <linux/binfmts.h>
  54. +#include <linux/kernel.h>
  55. +#include <linux/syscalls.h>
  56. +#include <linux/stackprotector.h>
  57. +#include <linux/string.h>
  58. +#include <linux/ctype.h>
  59. +#include <linux/delay.h>
  60. +#include <linux/ioport.h>
  61. +#include <linux/init.h>
  62. +#include <linux/initrd.h>
  63. +#include <linux/memblock.h>
  64. +#include <linux/acpi.h>
  65. +#include <linux/console.h>
  66. +#include <linux/nmi.h>
  67. +#include <linux/percpu.h>
  68. +#include <linux/kmod.h>
  69. +#include <linux/kprobes.h>
  70. +#include <linux/vmalloc.h>
  71. +#include <linux/kernel_stat.h>
  72. +#include <linux/start_kernel.h>
  73. +#include <linux/security.h>
  74. +#include <linux/smp.h>
  75. +#include <linux/profile.h>
  76. +#include <linux/rcupdate.h>
  77. +#include <linux/moduleparam.h>
  78. +#include <linux/kallsyms.h>
  79. +#include <linux/writeback.h>
  80. +#include <linux/cpu.h>
  81. +#include <linux/cpuset.h>
  82. +#include <linux/cgroup.h>
  83. +#include <linux/efi.h>
  84. +#include <linux/tick.h>
  85. +#include <linux/sched/isolation.h>
  86. +#include <linux/interrupt.h>
  87. +#include <linux/taskstats_kern.h>
  88. +#include <linux/delayacct.h>
  89. +#include <linux/unistd.h>
  90. +#include <linux/utsname.h>
  91. +#include <linux/rmap.h>
  92. +#include <linux/mempolicy.h>
  93. +#include <linux/key.h>
  94. +#include <linux/buffer_head.h>
  95. +#include <linux/page_ext.h>
  96. +#include <linux/debug_locks.h>
  97. +#include <linux/debugobjects.h>
  98. +#include <linux/lockdep.h>
  99. +#include <linux/kmemleak.h>
  100. +#include <linux/pid_namespace.h>
  101. +#include <linux/device.h>
  102. +#include <linux/kthread.h>
  103. +#include <linux/sched.h>
  104. +#include <linux/sched/init.h>
  105. +#include <linux/signal.h>
  106. +#include <linux/idr.h>
  107. +#include <linux/kgdb.h>
  108. +#include <linux/ftrace.h>
  109. +#include <linux/async.h>
  110. +#include <linux/sfi.h>
  111. +#include <linux/shmem_fs.h>
  112. +#include <linux/slab.h>
  113. +#include <linux/perf_event.h>
  114. +#include <linux/ptrace.h>
  115. +#include <linux/pti.h>
  116. +#include <linux/blkdev.h>
  117. +#include <linux/elevator.h>
  118. +#include <linux/sched/clock.h>
  119. +#include <linux/sched/task.h>
  120. +#include <linux/sched/task_stack.h>
  121. +#include <linux/context_tracking.h>
  122. +#include <linux/random.h>
  123. +#include <linux/list.h>
  124. +#include <linux/integrity.h>
  125. +#include <linux/proc_ns.h>
  126. +#include <linux/io.h>
  127. +#include <linux/cache.h>
  128. +#include <linux/rodata_test.h>
  129. +#include <linux/jump_label.h>
  130. +#include <linux/mem_encrypt.h>
  131. +
  132. +#include <asm/io.h>
  133. +#include <asm/bugs.h>
  134. +#include <asm/setup.h>
  135. +#include <asm/sections.h>
  136. +#include <asm/cacheflush.h>
  137. +
  138. +#define CREATE_TRACE_POINTS
  139. +#include <trace/events/initcall.h>
  140. +
  141. +static int kernel_init(void *);
  142. +
  143. +extern void init_IRQ(void);
  144. +extern void radix_tree_init(void);
  145. +
  146. +/*
  147. + * Debug helper: via this flag we know that we are in 'early bootup code'
  148. + * where only the boot processor is running with IRQ disabled. This means
  149. + * two things - IRQ must not be enabled before the flag is cleared and some
  150. + * operations which are not allowed with IRQ disabled are allowed while the
  151. + * flag is set.
  152. + */
  153. +bool early_boot_irqs_disabled __read_mostly;
  154. +
  155. +enum system_states system_state __read_mostly;
  156. +EXPORT_SYMBOL(system_state);
  157. +
  158. +/*
  159. + * Boot command-line arguments
  160. + */
  161. +#define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT
  162. +#define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT
  163. +
  164. +extern void time_init(void);
  165. +/* Default late time init is NULL. archs can override this later. */
  166. +void (*__initdata late_time_init)(void);
  167. +
  168. +/* Untouched command line saved by arch-specific code. */
  169. +char __initdata boot_command_line[COMMAND_LINE_SIZE];
  170. +/* Untouched saved command line (eg. for /proc) */
  171. +char *saved_command_line;
  172. +/* Command line for parameter parsing */
  173. +static char *static_command_line;
  174. +/* Command line for per-initcall parameter parsing */
  175. +static char *initcall_command_line;
  176. +
  177. +static char *execute_command;
  178. +static char *ramdisk_execute_command;
  179. +
  180. +/*
  181. + * Used to generate warnings if static_key manipulation functions are used
  182. + * before jump_label_init is called.
  183. + */
  184. +bool static_key_initialized __read_mostly;
  185. +EXPORT_SYMBOL_GPL(static_key_initialized);
  186. +
  187. +/*
  188. + * If set, this is an indication to the drivers that reset the underlying
  189. + * device before going ahead with the initialization otherwise driver might
  190. + * rely on the BIOS and skip the reset operation.
  191. + *
  192. + * This is useful if kernel is booting in an unreliable environment.
  193. + * For ex. kdump situation where previous kernel has crashed, BIOS has been
  194. + * skipped and devices will be in unknown state.
  195. + */
  196. +unsigned int reset_devices;
  197. +EXPORT_SYMBOL(reset_devices);
  198. +
  199. +static int __init set_reset_devices(char *str)
  200. +{
  201. + reset_devices = 1;
  202. + return 1;
  203. +}
  204. +
  205. +__setup("reset_devices", set_reset_devices);
  206. +
  207. +static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
  208. +const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
  209. +static const char *panic_later, *panic_param;
  210. +
  211. +extern const struct obs_kernel_param __setup_start[], __setup_end[];
  212. +
  213. +static bool __init obsolete_checksetup(char *line)
  214. +{
  215. + const struct obs_kernel_param *p;
  216. + bool had_early_param = false;
  217. +
  218. + p = __setup_start;
  219. + do {
  220. + int n = strlen(p->str);
  221. + if (parameqn(line, p->str, n)) {
  222. + if (p->early) {
  223. + /* Already done in parse_early_param?
  224. + * (Needs exact match on param part).
  225. + * Keep iterating, as we can have early
  226. + * params and __setups of same names 8( */
  227. + if (line[n] == '\0' || line[n] == '=')
  228. + had_early_param = true;
  229. + } else if (!p->setup_func) {
  230. + pr_warn("Parameter %s is obsolete, ignored\n",
  231. + p->str);
  232. + return true;
  233. + } else if (p->setup_func(line + n))
  234. + return true;
  235. + }
  236. + p++;
  237. + } while (p < __setup_end);
  238. +
  239. + return had_early_param;
  240. +}
  241. +
  242. +/*
  243. + * This should be approx 2 Bo*oMips to start (note initial shift), and will
  244. + * still work even if initially too large, it will just take slightly longer
  245. + */
  246. +unsigned long loops_per_jiffy = (1<<12);
  247. +EXPORT_SYMBOL(loops_per_jiffy);
  248. +
  249. +static int __init debug_kernel(char *str)
  250. +{
  251. + console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
  252. + return 0;
  253. +}
  254. +
  255. +static int __init quiet_kernel(char *str)
  256. +{
  257. + console_loglevel = CONSOLE_LOGLEVEL_QUIET;
  258. + return 0;
  259. +}
  260. +
  261. +early_param("debug", debug_kernel);
  262. +early_param("quiet", quiet_kernel);
  263. +
  264. +static int __init loglevel(char *str)
  265. +{
  266. + int newlevel;
  267. +
  268. + /*
  269. + * Only update loglevel value when a correct setting was passed,
  270. + * to prevent blind crashes (when loglevel being set to 0) that
  271. + * are quite hard to debug
  272. + */
  273. + if (get_option(&str, &newlevel)) {
  274. + console_loglevel = newlevel;
  275. + return 0;
  276. + }
  277. +
  278. + return -EINVAL;
  279. +}
  280. +
  281. +early_param("loglevel", loglevel);
  282. +
  283. +/* Change NUL term back to "=", to make "param" the whole string. */
  284. +static int __init repair_env_string(char *param, char *val,
  285. + const char *unused, void *arg)
  286. +{
  287. + if (val) {
  288. + /* param=val or param="val"? */
  289. + if (val == param+strlen(param)+1)
  290. + val[-1] = '=';
  291. + else if (val == param+strlen(param)+2) {
  292. + val[-2] = '=';
  293. + memmove(val-1, val, strlen(val)+1);
  294. + val--;
  295. + } else
  296. + BUG();
  297. + }
  298. + return 0;
  299. +}
  300. +
  301. +/* Anything after -- gets handed straight to init. */
  302. +static int __init set_init_arg(char *param, char *val,
  303. + const char *unused, void *arg)
  304. +{
  305. + unsigned int i;
  306. +
  307. + if (panic_later)
  308. + return 0;
  309. +
  310. + repair_env_string(param, val, unused, NULL);
  311. +
  312. + for (i = 0; argv_init[i]; i++) {
  313. + if (i == MAX_INIT_ARGS) {
  314. + panic_later = "init";
  315. + panic_param = param;
  316. + return 0;
  317. + }
  318. + }
  319. + argv_init[i] = param;
  320. + return 0;
  321. +}
  322. +
  323. +/*
  324. + * Unknown boot options get handed to init, unless they look like
  325. + * unused parameters (modprobe will find them in /proc/cmdline).
  326. + */
  327. +static int __init unknown_bootoption(char *param, char *val,
  328. + const char *unused, void *arg)
  329. +{
  330. + repair_env_string(param, val, unused, NULL);
  331. +
  332. + /* Handle obsolete-style parameters */
  333. + if (obsolete_checksetup(param))
  334. + return 0;
  335. +
  336. + /* Unused module parameter. */
  337. + if (strchr(param, '.') && (!val || strchr(param, '.') < val))
  338. + return 0;
  339. +
  340. + if (panic_later)
  341. + return 0;
  342. +
  343. + if (val) {
  344. + /* Environment option */
  345. + unsigned int i;
  346. + for (i = 0; envp_init[i]; i++) {
  347. + if (i == MAX_INIT_ENVS) {
  348. + panic_later = "env";
  349. + panic_param = param;
  350. + }
  351. + if (!strncmp(param, envp_init[i], val - param))
  352. + break;
  353. + }
  354. + envp_init[i] = param;
  355. + } else {
  356. + /* Command line option */
  357. + unsigned int i;
  358. + for (i = 0; argv_init[i]; i++) {
  359. + if (i == MAX_INIT_ARGS) {
  360. + panic_later = "init";
  361. + panic_param = param;
  362. + }
  363. + }
  364. + argv_init[i] = param;
  365. + }
  366. + return 0;
  367. +}
  368. +
  369. +static int __init init_setup(char *str)
  370. +{
  371. + unsigned int i;
  372. +
  373. + execute_command = str;
  374. + /*
  375. + * In case LILO is going to boot us with default command line,
  376. + * it prepends "auto" before the whole cmdline which makes
  377. + * the shell think it should execute a script with such name.
  378. + * So we ignore all arguments entered _before_ init=... [MJ]
  379. + */
  380. + for (i = 1; i < MAX_INIT_ARGS; i++)
  381. + argv_init[i] = NULL;
  382. + return 1;
  383. +}
  384. +__setup("init=", init_setup);
  385. +
  386. +static int __init rdinit_setup(char *str)
  387. +{
  388. + unsigned int i;
  389. +
  390. + ramdisk_execute_command = str;
  391. + /* See "auto" comment in init_setup */
  392. + for (i = 1; i < MAX_INIT_ARGS; i++)
  393. + argv_init[i] = NULL;
  394. + return 1;
  395. +}
  396. +__setup("rdinit=", rdinit_setup);
  397. +
  398. +#ifndef CONFIG_SMP
  399. +static const unsigned int setup_max_cpus = NR_CPUS;
  400. +static inline void setup_nr_cpu_ids(void) { }
  401. +static inline void smp_prepare_cpus(unsigned int maxcpus) { }
  402. +#endif
  403. +
  404. +/*
  405. + * We need to store the untouched command line for future reference.
  406. + * We also need to store the touched command line since the parameter
  407. + * parsing is performed in place, and we should allow a component to
  408. + * store reference of name/value for future reference.
  409. + */
  410. +static void __init setup_command_line(char *command_line)
  411. +{
  412. + size_t len = strlen(boot_command_line) + 1;
  413. +
  414. + saved_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
  415. + if (!saved_command_line)
  416. + panic("%s: Failed to allocate %zu bytes\n", __func__, len);
  417. +
  418. + initcall_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
  419. + if (!initcall_command_line)
  420. + panic("%s: Failed to allocate %zu bytes\n", __func__, len);
  421. +
  422. + static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
  423. + if (!static_command_line)
  424. + panic("%s: Failed to allocate %zu bytes\n", __func__, len);
  425. +
  426. + strcpy(saved_command_line, boot_command_line);
  427. + strcpy(static_command_line, command_line);
  428. +}
  429. +
  430. +/*
  431. + * We need to finalize in a non-__init function or else race conditions
  432. + * between the root thread and the init thread may cause start_kernel to
  433. + * be reaped by free_initmem before the root thread has proceeded to
  434. + * cpu_idle.
  435. + *
  436. + * gcc-3.4 accidentally inlines this function, so use noinline.
  437. + */
  438. +
  439. +static __initdata DECLARE_COMPLETION(kthreadd_done);
  440. +
  441. +noinline void __ref rest_init(void)
  442. +{
  443. + struct task_struct *tsk;
  444. + int pid;
  445. +
  446. + rcu_scheduler_starting();
  447. + /*
  448. + * We need to spawn init first so that it obtains pid 1, however
  449. + * the init task will end up wanting to create kthreads, which, if
  450. + * we schedule it before we create kthreadd, will OOPS.
  451. + */
  452. + pid = kernel_thread(kernel_init, NULL, CLONE_FS);
  453. + /*
  454. + * Pin init on the boot CPU. Task migration is not properly working
  455. + * until sched_init_smp() has been run. It will set the allowed
  456. + * CPUs for init to the non isolated CPUs.
  457. + */
  458. + rcu_read_lock();
  459. + tsk = find_task_by_pid_ns(pid, &init_pid_ns);
  460. + set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
  461. + rcu_read_unlock();
  462. +
  463. + numa_default_policy();
  464. + pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
  465. + rcu_read_lock();
  466. + kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
  467. + rcu_read_unlock();
  468. +
  469. + /*
  470. + * Enable might_sleep() and smp_processor_id() checks.
  471. + * They cannot be enabled earlier because with CONFIG_PREEMPTION=y
  472. + * kernel_thread() would trigger might_sleep() splats. With
  473. + * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
  474. + * already, but it's stuck on the kthreadd_done completion.
  475. + */
  476. + system_state = SYSTEM_SCHEDULING;
  477. +
  478. + complete(&kthreadd_done);
  479. +
  480. + /*
  481. + * The boot idle thread must execute schedule()
  482. + * at least once to get things moving:
  483. + */
  484. + schedule_preempt_disabled();
  485. + /* Call into cpu_idle with preempt disabled */
  486. + cpu_startup_entry(CPUHP_ONLINE);
  487. +}
  488. +
  489. +/* Check for early params. */
  490. +static int __init do_early_param(char *param, char *val,
  491. + const char *unused, void *arg)
  492. +{
  493. + const struct obs_kernel_param *p;
  494. +
  495. + for (p = __setup_start; p < __setup_end; p++) {
  496. + if ((p->early && parameq(param, p->str)) ||
  497. + (strcmp(param, "console") == 0 &&
  498. + strcmp(p->str, "earlycon") == 0)
  499. + ) {
  500. + if (p->setup_func(val) != 0)
  501. + pr_warn("Malformed early option '%s'\n", param);
  502. + }
  503. + }
  504. + /* We accept everything at this stage. */
  505. + return 0;
  506. +}
  507. +
  508. +void __init parse_early_options(char *cmdline)
  509. +{
  510. + parse_args("early options", cmdline, NULL, 0, 0, 0, NULL,
  511. + do_early_param);
  512. +}
  513. +
  514. +/* Arch code calls this early on, or if not, just before other parsing. */
  515. +void __init parse_early_param(void)
  516. +{
  517. + static int done __initdata;
  518. + static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
  519. +
  520. + if (done)
  521. + return;
  522. +
  523. + /* All fall through to do_early_param. */
  524. + strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
  525. + parse_early_options(tmp_cmdline);
  526. + done = 1;
  527. +}
  528. +
  529. +void __init __weak arch_post_acpi_subsys_init(void) { }
  530. +
  531. +void __init __weak smp_setup_processor_id(void)
  532. +{
  533. +}
  534. +
  535. +# if THREAD_SIZE >= PAGE_SIZE
  536. +void __init __weak thread_stack_cache_init(void)
  537. +{
  538. +}
  539. +#endif
  540. +
  541. +void __init __weak mem_encrypt_init(void) { }
  542. +
  543. +void __init __weak poking_init(void) { }
  544. +
  545. +void __init __weak pgtable_cache_init(void) { }
  546. +
  547. +bool initcall_debug;
  548. +core_param(initcall_debug, initcall_debug, bool, 0644);
  549. +
  550. +#ifdef TRACEPOINTS_ENABLED
  551. +static void __init initcall_debug_enable(void);
  552. +#else
  553. +static inline void initcall_debug_enable(void)
  554. +{
  555. +}
  556. +#endif
  557. +
  558. +/* Report memory auto-initialization states for this boot. */
  559. +static void __init report_meminit(void)
  560. +{
  561. + const char *stack;
  562. +
  563. + if (IS_ENABLED(CONFIG_INIT_STACK_ALL))
  564. + stack = "all";
  565. + else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
  566. + stack = "byref_all";
  567. + else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
  568. + stack = "byref";
  569. + else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
  570. + stack = "__user";
  571. + else
  572. + stack = "off";
  573. +
  574. + pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
  575. + stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
  576. + want_init_on_free() ? "on" : "off");
  577. + if (want_init_on_free())
  578. + pr_info("mem auto-init: clearing system memory may take some time...\n");
  579. +}
  580. +
  581. +/*
  582. + * Set up kernel memory allocators
  583. + */
  584. +static void __init mm_init(void)
  585. +{
  586. + /*
  587. + * page_ext requires contiguous pages,
  588. + * bigger than MAX_ORDER unless SPARSEMEM.
  589. + */
  590. + page_ext_init_flatmem();
  591. + init_debug_pagealloc();
  592. + report_meminit();
  593. + mem_init();
  594. + kmem_cache_init();
  595. + kmemleak_init();
  596. + pgtable_init();
  597. + debug_objects_mem_init();
  598. + vmalloc_init();
  599. + ioremap_huge_init();
  600. + /* Should be run before the first non-init thread is created */
  601. + init_espfix_bsp();
  602. + /* Should be run after espfix64 is set up. */
  603. + pti_init();
  604. +}
  605. +
  606. +void __init __weak arch_call_rest_init(void)
  607. +{
  608. + rest_init();
  609. +}
  610. +
  611. +asmlinkage __visible void __init start_kernel(void)
  612. +{
  613. + char *command_line;
  614. + char *after_dashes;
  615. +
  616. + set_task_stack_end_magic(&init_task);
  617. + smp_setup_processor_id();
  618. + debug_objects_early_init();
  619. +
  620. + cgroup_init_early();
  621. +
  622. + local_irq_disable();
  623. + early_boot_irqs_disabled = true;
  624. +
  625. + /*
  626. + * Interrupts are still disabled. Do necessary setups, then
  627. + * enable them.
  628. + */
  629. + boot_cpu_init();
  630. + page_address_init();
  631. + pr_notice("%s", linux_banner);
  632. + early_security_init();
  633. + setup_arch(&command_line);
  634. + setup_command_line(command_line);
  635. + setup_nr_cpu_ids();
  636. + setup_per_cpu_areas();
  637. + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
  638. + boot_cpu_hotplug_init();
  639. +
  640. + build_all_zonelists(NULL);
  641. + page_alloc_init();
  642. +
  643. + pr_notice("Kernel command line: %s\n", boot_command_line);
  644. + /* parameters may set static keys */
  645. + jump_label_init();
  646. + parse_early_param();
  647. + after_dashes = parse_args("Booting kernel",
  648. + static_command_line, __start___param,
  649. + __stop___param - __start___param,
  650. + -1, -1, NULL, &unknown_bootoption);
  651. + if (!IS_ERR_OR_NULL(after_dashes))
  652. + parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
  653. + NULL, set_init_arg);
  654. +
  655. + /*
  656. + * These use large bootmem allocations and must precede
  657. + * kmem_cache_init()
  658. + */
  659. + setup_log_buf(0);
  660. + vfs_caches_init_early();
  661. + sort_main_extable();
  662. + trap_init();
  663. + mm_init();
  664. +
  665. + ftrace_init();
  666. +
  667. + /* trace_printk can be enabled here */
  668. + early_trace_init();
  669. +
  670. + /*
  671. + * Set up the scheduler prior starting any interrupts (such as the
  672. + * timer interrupt). Full topology setup happens at smp_init()
  673. + * time - but meanwhile we still have a functioning scheduler.
  674. + */
  675. + sched_init();
  676. + /*
  677. + * Disable preemption - early bootup scheduling is extremely
  678. + * fragile until we cpu_idle() for the first time.
  679. + */
  680. + preempt_disable();
  681. + if (WARN(!irqs_disabled(),
  682. + "Interrupts were enabled *very* early, fixing it\n"))
  683. + local_irq_disable();
  684. + radix_tree_init();
  685. +
  686. + /*
  687. + * Set up housekeeping before setting up workqueues to allow the unbound
  688. + * workqueue to take non-housekeeping into account.
  689. + */
  690. + housekeeping_init();
  691. +
  692. + /*
  693. + * Allow workqueue creation and work item queueing/cancelling
  694. + * early. Work item execution depends on kthreads and starts after
  695. + * workqueue_init().
  696. + */
  697. + workqueue_init_early();
  698. +
  699. + rcu_init();
  700. +
  701. + /* Trace events are available after this */
  702. + trace_init();
  703. +
  704. + if (initcall_debug)
  705. + initcall_debug_enable();
  706. +
  707. + context_tracking_init();
  708. + /* init some links before init_ISA_irqs() */
  709. + early_irq_init();
  710. + init_IRQ();
  711. + tick_init();
  712. + rcu_init_nohz();
  713. + init_timers();
  714. + hrtimers_init();
  715. + softirq_init();
  716. + timekeeping_init();
  717. +
  718. + /*
  719. + * For best initial stack canary entropy, prepare it after:
  720. + * - setup_arch() for any UEFI RNG entropy and boot cmdline access
  721. + * - timekeeping_init() for ktime entropy used in rand_initialize()
  722. + * - rand_initialize() to get any arch-specific entropy like RDRAND
  723. + * - add_latent_entropy() to get any latent entropy
  724. + * - adding command line entropy
  725. + */
  726. + rand_initialize();
  727. + add_latent_entropy();
  728. + add_device_randomness(command_line, strlen(command_line));
  729. + boot_init_stack_canary();
  730. +
  731. + time_init();
  732. + perf_event_init();
  733. + profile_init();
  734. + call_function_init();
  735. + WARN(!irqs_disabled(), "Interrupts were enabled early\n");
  736. +
  737. + early_boot_irqs_disabled = false;
  738. + local_irq_enable();
  739. +
  740. + kmem_cache_init_late();
  741. +
  742. + /*
  743. + * HACK ALERT! This is early. We're enabling the console before
  744. + * we've done PCI setups etc, and console_init() must be aware of
  745. + * this. But we do want output early, in case something goes wrong.
  746. + */
  747. + console_init();
  748. + if (panic_later)
  749. + panic("Too many boot %s vars at `%s'", panic_later,
  750. + panic_param);
  751. +
  752. + lockdep_init();
  753. +
  754. + /*
  755. + * Need to run this when irqs are enabled, because it wants
  756. + * to self-test [hard/soft]-irqs on/off lock inversion bugs
  757. + * too:
  758. + */
  759. + locking_selftest();
  760. +
  761. + /*
  762. + * This needs to be called before any devices perform DMA
  763. + * operations that might use the SWIOTLB bounce buffers. It will
  764. + * mark the bounce buffers as decrypted so that their usage will
  765. + * not cause "plain-text" data to be decrypted when accessed.
  766. + */
  767. + mem_encrypt_init();
  768. +
  769. +#ifdef CONFIG_BLK_DEV_INITRD
  770. + if (initrd_start && !initrd_below_start_ok &&
  771. + page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
  772. + pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n",
  773. + page_to_pfn(virt_to_page((void *)initrd_start)),
  774. + min_low_pfn);
  775. + initrd_start = 0;
  776. + }
  777. +#endif
  778. + setup_per_cpu_pageset();
  779. + numa_policy_init();
  780. + acpi_early_init();
  781. + if (late_time_init)
  782. + late_time_init();
  783. + sched_clock_init();
  784. + calibrate_delay();
  785. + pid_idr_init();
  786. + anon_vma_init();
  787. +#ifdef CONFIG_X86
  788. + if (efi_enabled(EFI_RUNTIME_SERVICES))
  789. + efi_enter_virtual_mode();
  790. +#endif
  791. + thread_stack_cache_init();
  792. + cred_init();
  793. + fork_init();
  794. + proc_caches_init();
  795. + uts_ns_init();
  796. + buffer_init();
  797. + key_init();
  798. + security_init();
  799. + dbg_late_init();
  800. + vfs_caches_init();
  801. + pagecache_init();
  802. + signals_init();
  803. + seq_file_init();
  804. + proc_root_init();
  805. + nsfs_init();
  806. + cpuset_init();
  807. + cgroup_init();
  808. + taskstats_init_early();
  809. + delayacct_init();
  810. +
  811. + poking_init();
  812. + check_bugs();
  813. +
  814. + acpi_subsystem_init();
  815. + arch_post_acpi_subsys_init();
  816. + sfi_init_late();
  817. +
  818. + /* Do the rest non-__init'ed, we're now alive */
  819. + arch_call_rest_init();
  820. +
  821. + prevent_tail_call_optimization();
  822. +}
  823. +
  824. +/* Call all constructor functions linked into the kernel. */
  825. +static void __init do_ctors(void)
  826. +{
  827. +#ifdef CONFIG_CONSTRUCTORS
  828. + ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
  829. +
  830. + for (; fn < (ctor_fn_t *) __ctors_end; fn++)
  831. + (*fn)();
  832. +#endif
  833. +}
  834. +
  835. +#ifdef CONFIG_KALLSYMS
  836. +struct blacklist_entry {
  837. + struct list_head next;
  838. + char *buf;
  839. +};
  840. +
  841. +static __initdata_or_module LIST_HEAD(blacklisted_initcalls);
  842. +
  843. +static int __init initcall_blacklist(char *str)
  844. +{
  845. + char *str_entry;
  846. + struct blacklist_entry *entry;
  847. +
  848. + /* str argument is a comma-separated list of functions */
  849. + do {
  850. + str_entry = strsep(&str, ",");
  851. + if (str_entry) {
  852. + pr_debug("blacklisting initcall %s\n", str_entry);
  853. + entry = memblock_alloc(sizeof(*entry),
  854. + SMP_CACHE_BYTES);
  855. + if (!entry)
  856. + panic("%s: Failed to allocate %zu bytes\n",
  857. + __func__, sizeof(*entry));
  858. + entry->buf = memblock_alloc(strlen(str_entry) + 1,
  859. + SMP_CACHE_BYTES);
  860. + if (!entry->buf)
  861. + panic("%s: Failed to allocate %zu bytes\n",
  862. + __func__, strlen(str_entry) + 1);
  863. + strcpy(entry->buf, str_entry);
  864. + list_add(&entry->next, &blacklisted_initcalls);
  865. + }
  866. + } while (str_entry);
  867. +
  868. + return 0;
  869. +}
  870. +
  871. +static bool __init_or_module initcall_blacklisted(initcall_t fn)
  872. +{
  873. + struct blacklist_entry *entry;
  874. + char fn_name[KSYM_SYMBOL_LEN];
  875. + unsigned long addr;
  876. +
  877. + if (list_empty(&blacklisted_initcalls))
  878. + return false;
  879. +
  880. + addr = (unsigned long) dereference_function_descriptor(fn);
  881. + sprint_symbol_no_offset(fn_name, addr);
  882. +
  883. + /*
  884. + * fn will be "function_name [module_name]" where [module_name] is not
  885. + * displayed for built-in init functions. Strip off the [module_name].
  886. + */
  887. + strreplace(fn_name, ' ', '\0');
  888. +
  889. + list_for_each_entry(entry, &blacklisted_initcalls, next) {
  890. + if (!strcmp(fn_name, entry->buf)) {
  891. + pr_debug("initcall %s blacklisted\n", fn_name);
  892. + return true;
  893. + }
  894. + }
  895. +
  896. + return false;
  897. +}
  898. +#else
  899. +static int __init initcall_blacklist(char *str)
  900. +{
  901. + pr_warn("initcall_blacklist requires CONFIG_KALLSYMS\n");
  902. + return 0;
  903. +}
  904. +
  905. +static bool __init_or_module initcall_blacklisted(initcall_t fn)
  906. +{
  907. + return false;
  908. +}
  909. +#endif
  910. +__setup("initcall_blacklist=", initcall_blacklist);
  911. +
  912. +static __init_or_module void
  913. +trace_initcall_start_cb(void *data, initcall_t fn)
  914. +{
  915. + ktime_t *calltime = (ktime_t *)data;
  916. +
  917. + printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current));
  918. + *calltime = ktime_get();
  919. +}
  920. +
  921. +static __init_or_module void
  922. +trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
  923. +{
  924. + ktime_t *calltime = (ktime_t *)data;
  925. + ktime_t delta, rettime;
  926. + unsigned long long duration;
  927. +
  928. + rettime = ktime_get();
  929. + delta = ktime_sub(rettime, *calltime);
  930. + duration = (unsigned long long) ktime_to_ns(delta) >> 10;
  931. + printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
  932. + fn, ret, duration);
  933. +}
  934. +
  935. +static ktime_t initcall_calltime;
  936. +
  937. +#ifdef TRACEPOINTS_ENABLED
  938. +static void __init initcall_debug_enable(void)
  939. +{
  940. + int ret;
  941. +
  942. + ret = register_trace_initcall_start(trace_initcall_start_cb,
  943. + &initcall_calltime);
  944. + ret |= register_trace_initcall_finish(trace_initcall_finish_cb,
  945. + &initcall_calltime);
  946. + WARN(ret, "Failed to register initcall tracepoints\n");
  947. +}
  948. +# define do_trace_initcall_start trace_initcall_start
  949. +# define do_trace_initcall_finish trace_initcall_finish
  950. +#else
  951. +static inline void do_trace_initcall_start(initcall_t fn)
  952. +{
  953. + if (!initcall_debug)
  954. + return;
  955. + trace_initcall_start_cb(&initcall_calltime, fn);
  956. +}
  957. +static inline void do_trace_initcall_finish(initcall_t fn, int ret)
  958. +{
  959. + if (!initcall_debug)
  960. + return;
  961. + trace_initcall_finish_cb(&initcall_calltime, fn, ret);
  962. +}
  963. +#endif /* !TRACEPOINTS_ENABLED */
  964. +
  965. +int __init_or_module do_one_initcall(initcall_t fn)
  966. +{
  967. + int count = preempt_count();
  968. + char msgbuf[64];
  969. + int ret;
  970. +
  971. + if (initcall_blacklisted(fn))
  972. + return -EPERM;
  973. +
  974. + do_trace_initcall_start(fn);
  975. + ret = fn();
  976. + do_trace_initcall_finish(fn, ret);
  977. +
  978. + msgbuf[0] = 0;
  979. +
  980. + if (preempt_count() != count) {
  981. + sprintf(msgbuf, "preemption imbalance ");
  982. + preempt_count_set(count);
  983. + }
  984. + if (irqs_disabled()) {
  985. + strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
  986. + local_irq_enable();
  987. + }
  988. + WARN(msgbuf[0], "initcall %pS returned with %s\n", fn, msgbuf);
  989. +
  990. + add_latent_entropy();
  991. + return ret;
  992. +}
  993. +
  994. +
  995. +extern initcall_entry_t __initcall_start[];
  996. +extern initcall_entry_t __initcall0_start[];
  997. +extern initcall_entry_t __initcall1_start[];
  998. +extern initcall_entry_t __initcall2_start[];
  999. +extern initcall_entry_t __initcall3_start[];
  1000. +extern initcall_entry_t __initcall4_start[];
  1001. +extern initcall_entry_t __initcall5_start[];
  1002. +extern initcall_entry_t __initcall6_start[];
  1003. +extern initcall_entry_t __initcall7_start[];
  1004. +extern initcall_entry_t __initcall_end[];
  1005. +
  1006. +static initcall_entry_t *initcall_levels[] __initdata = {
  1007. + __initcall0_start,
  1008. + __initcall1_start,
  1009. + __initcall2_start,
  1010. + __initcall3_start,
  1011. + __initcall4_start,
  1012. + __initcall5_start,
  1013. + __initcall6_start,
  1014. + __initcall7_start,
  1015. + __initcall_end,
  1016. +};
  1017. +
  1018. +/* Keep these in sync with initcalls in include/linux/init.h */
  1019. +static const char *initcall_level_names[] __initdata = {
  1020. + "pure",
  1021. + "core",
  1022. + "postcore",
  1023. + "arch",
  1024. + "subsys",
  1025. + "fs",
  1026. + "device",
  1027. + "late",
  1028. +};
  1029. +
  1030. +static void __init do_initcall_level(int level)
  1031. +{
  1032. + initcall_entry_t *fn;
  1033. +
  1034. + strcpy(initcall_command_line, saved_command_line);
  1035. + parse_args(initcall_level_names[level],
  1036. + initcall_command_line, __start___param,
  1037. + __stop___param - __start___param,
  1038. + level, level,
  1039. + NULL, &repair_env_string);
  1040. +
  1041. + trace_initcall_level(initcall_level_names[level]);
  1042. + for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
  1043. + do_one_initcall(initcall_from_entry(fn));
  1044. +}
  1045. +
  1046. +static void __init do_initcalls(void)
  1047. +{
  1048. + int level;
  1049. +
  1050. + for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
  1051. + do_initcall_level(level);
  1052. +}
  1053. +
  1054. +/*
  1055. + * Ok, the machine is now initialized. None of the devices
  1056. + * have been touched yet, but the CPU subsystem is up and
  1057. + * running, and memory and process management works.
  1058. + *
  1059. + * Now we can finally start doing some real work..
  1060. + */
  1061. +static void __init do_basic_setup(void)
  1062. +{
  1063. + cpuset_init_smp();
  1064. + driver_init();
  1065. + init_irq_proc();
  1066. + do_ctors();
  1067. + usermodehelper_enable();
  1068. + do_initcalls();
  1069. +}
  1070. +
  1071. +static void __init do_pre_smp_initcalls(void)
  1072. +{
  1073. + initcall_entry_t *fn;
  1074. +
  1075. + trace_initcall_level("early");
  1076. + for (fn = __initcall_start; fn < __initcall0_start; fn++)
  1077. + do_one_initcall(initcall_from_entry(fn));
  1078. +}
  1079. +
  1080. +static int run_init_process(const char *init_filename)
  1081. +{
  1082. + argv_init[0] = init_filename;
  1083. + pr_info("Run %s as init process\n", init_filename);
  1084. + return do_execve(getname_kernel(init_filename),
  1085. + (const char __user *const __user *)argv_init,
  1086. + (const char __user *const __user *)envp_init);
  1087. +}
  1088. +
  1089. +static int try_to_run_init_process(const char *init_filename)
  1090. +{
  1091. + int ret;
  1092. +
  1093. + ret = run_init_process(init_filename);
  1094. +
  1095. + if (ret && ret != -ENOENT) {
  1096. + pr_err("Starting init: %s exists but couldn't execute it (error %d)\n",
  1097. + init_filename, ret);
  1098. + }
  1099. +
  1100. + return ret;
  1101. +}
  1102. +
  1103. +static noinline void __init kernel_init_freeable(void);
  1104. +
  1105. +#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
  1106. +bool rodata_enabled __ro_after_init = true;
  1107. +static int __init set_debug_rodata(char *str)
  1108. +{
  1109. + return strtobool(str, &rodata_enabled);
  1110. +}
  1111. +__setup("rodata=", set_debug_rodata);
  1112. +#endif
  1113. +
  1114. +#ifdef CONFIG_STRICT_KERNEL_RWX
  1115. +static void mark_readonly(void)
  1116. +{
  1117. + if (rodata_enabled) {
  1118. + /*
  1119. + * load_module() results in W+X mappings, which are cleaned
  1120. + * up with call_rcu(). Let's make sure that queued work is
  1121. + * flushed so that we don't hit false positives looking for
  1122. + * insecure pages which are W+X.
  1123. + */
  1124. + rcu_barrier();
  1125. + mark_rodata_ro();
  1126. + rodata_test();
  1127. + } else
  1128. + pr_info("Kernel memory protection disabled.\n");
  1129. +}
  1130. +#else
  1131. +static inline void mark_readonly(void)
  1132. +{
  1133. + pr_warn("This architecture does not have kernel memory protection.\n");
  1134. +}
  1135. +#endif
  1136. +
  1137. +void __weak free_initmem(void)
  1138. +{
  1139. + free_initmem_default(POISON_FREE_INITMEM);
  1140. +}
  1141. +
  1142. +static int __ref kernel_init(void *unused)
  1143. +{
  1144. + int ret;
  1145. +
  1146. + kernel_init_freeable();
  1147. + /* need to finish all async __init code before freeing the memory */
  1148. + async_synchronize_full();
  1149. + kprobe_free_init_mem();
  1150. + ftrace_free_init_mem();
  1151. + free_initmem();
  1152. + mark_readonly();
  1153. +
  1154. + /*
  1155. + * Kernel mappings are now finalized - update the userspace page-table
  1156. + * to finalize PTI.
  1157. + */
  1158. + pti_finalize();
  1159. +
  1160. + system_state = SYSTEM_RUNNING;
  1161. + numa_default_policy();
  1162. +
  1163. + rcu_end_inkernel_boot();
  1164. +
  1165. + if (ramdisk_execute_command) {
  1166. + ret = run_init_process(ramdisk_execute_command);
  1167. + if (!ret)
  1168. + return 0;
  1169. + pr_err("Failed to execute %s (error %d)\n",
  1170. + ramdisk_execute_command, ret);
  1171. + }
  1172. +
  1173. + /*
  1174. + * We try each of these until one succeeds.
  1175. + *
  1176. + * The Bourne shell can be used instead of init if we are
  1177. + * trying to recover a really broken machine.
  1178. + */
  1179. + if (execute_command) {
  1180. + ret = run_init_process(execute_command);
  1181. + if (!ret)
  1182. + return 0;
  1183. + panic("Requested init %s failed (error %d).",
  1184. + execute_command, ret);
  1185. + }
  1186. + if (!try_to_run_init_process("/sbin/init") ||
  1187. + !try_to_run_init_process("/etc/init") ||
  1188. + !try_to_run_init_process("/bin/init") ||
  1189. + !try_to_run_init_process("/bin/sh"))
  1190. + return 0;
  1191. +
  1192. + panic("No working init found. Try passing init= option to kernel. "
  1193. + "See Linux Documentation/admin-guide/init.rst for guidance.");
  1194. +}
  1195. +
  1196. +static noinline void __init kernel_init_freeable(void)
  1197. +{
  1198. + /*
  1199. + * Wait until kthreadd is all set-up.
  1200. + */
  1201. + wait_for_completion(&kthreadd_done);
  1202. +
  1203. + /* Now the scheduler is fully set up and can do blocking allocations */
  1204. + gfp_allowed_mask = __GFP_BITS_MASK;
  1205. +
  1206. + /*
  1207. + * init can allocate pages on any node
  1208. + */
  1209. + set_mems_allowed(node_states[N_MEMORY]);
  1210. +
  1211. + cad_pid = task_pid(current);
  1212. +
  1213. + smp_prepare_cpus(setup_max_cpus);
  1214. +
  1215. + workqueue_init();
  1216. +
  1217. + init_mm_internals();
  1218. +
  1219. + do_pre_smp_initcalls();
  1220. + lockup_detector_init();
  1221. +
  1222. + smp_init();
  1223. + sched_init_smp();
  1224. +
  1225. + page_alloc_init_late();
  1226. + /* Initialize page ext after all struct pages are initialized. */
  1227. + page_ext_init();
  1228. +
  1229. + do_basic_setup();
  1230. +
  1231. + /* Open the /dev/console on the rootfs, this should never fail */
  1232. + if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
  1233. + pr_err("Warning: unable to open an initial console.\n");
  1234. +
  1235. + (void) ksys_dup(0);
  1236. + (void) ksys_dup(0);
  1237. + /*
  1238. + * check if there is an early userspace init. If yes, let it do all
  1239. + * the work
  1240. + */
  1241. +
  1242. + if (!ramdisk_execute_command)
  1243. + ramdisk_execute_command = "/init";
  1244. +
  1245. + if (ksys_access((const char __user *)
  1246. + ramdisk_execute_command, 0) != 0) {
  1247. + ramdisk_execute_command = NULL;
  1248. + prepare_namespace();
  1249. + }
  1250. +
  1251. + /*
  1252. + * Ok, we have completed the initial bootup, and
  1253. + * we're essentially up and running. Get rid of the
  1254. + * initmem segments and start the user-mode stuff..
  1255. + *
  1256. + * rootfs is available now, try loading the public keys
  1257. + * and default modules
  1258. + */
  1259. +
  1260. + integrity_load_keys();
  1261. +}