natt.patch 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668
  1. diff -Nur linux-2.6.30.1.orig/include/net/xfrmudp.h linux-2.6.30.1/include/net/xfrmudp.h
  2. --- linux-2.6.30.1.orig/include/net/xfrmudp.h 1970-01-01 01:00:00.000000000 +0100
  3. +++ linux-2.6.30.1/include/net/xfrmudp.h 2009-07-24 22:00:56.771280384 +0200
  4. @@ -0,0 +1,10 @@
  5. +/*
  6. + * pointer to function for type that xfrm4_input wants, to permit
  7. + * decoupling of XFRM from udp.c
  8. + */
  9. +#define HAVE_XFRM4_UDP_REGISTER
  10. +
  11. +typedef int (*xfrm4_rcv_encap_t)(struct sk_buff *skb, __u16 encap_type);
  12. +extern int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
  13. + , xfrm4_rcv_encap_t *oldfunc);
  14. +extern int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func);
  15. diff -Nur linux-2.6.30.1.orig/net/ipv4/Kconfig linux-2.6.30.1/net/ipv4/Kconfig
  16. --- linux-2.6.30.1.orig/net/ipv4/Kconfig 2009-07-03 01:52:38.000000000 +0200
  17. +++ linux-2.6.30.1/net/ipv4/Kconfig 2009-07-24 22:00:56.751278392 +0200
  18. @@ -379,6 +379,12 @@
  19. tristate
  20. default n
  21. +config IPSEC_NAT_TRAVERSAL
  22. + bool "IPSEC NAT-Traversal (KLIPS compatible)"
  23. + depends on INET
  24. + ---help---
  25. + Includes support for RFC3947/RFC3948 NAT-Traversal of ESP over UDP.
  26. +
  27. config INET_XFRM_MODE_TRANSPORT
  28. tristate "IP: IPsec transport mode"
  29. default y
  30. diff -Nur linux-2.6.30.1.orig/net/ipv4/Kconfig.orig linux-2.6.30.1/net/ipv4/Kconfig.orig
  31. --- linux-2.6.30.1.orig/net/ipv4/Kconfig.orig 1970-01-01 01:00:00.000000000 +0100
  32. +++ linux-2.6.30.1/net/ipv4/Kconfig.orig 2009-07-03 01:52:38.000000000 +0200
  33. @@ -0,0 +1,638 @@
  34. +#
  35. +# IP configuration
  36. +#
  37. +config IP_MULTICAST
  38. + bool "IP: multicasting"
  39. + help
  40. + This is code for addressing several networked computers at once,
  41. + enlarging your kernel by about 2 KB. You need multicasting if you
  42. + intend to participate in the MBONE, a high bandwidth network on top
  43. + of the Internet which carries audio and video broadcasts. More
  44. + information about the MBONE is on the WWW at
  45. + <http://www.savetz.com/mbone/>. Information about the multicast
  46. + capabilities of the various network cards is contained in
  47. + <file:Documentation/networking/multicast.txt>. For most people, it's
  48. + safe to say N.
  49. +
  50. +config IP_ADVANCED_ROUTER
  51. + bool "IP: advanced router"
  52. + ---help---
  53. + If you intend to run your Linux box mostly as a router, i.e. as a
  54. + computer that forwards and redistributes network packets, say Y; you
  55. + will then be presented with several options that allow more precise
  56. + control about the routing process.
  57. +
  58. + The answer to this question won't directly affect the kernel:
  59. + answering N will just cause the configurator to skip all the
  60. + questions about advanced routing.
  61. +
  62. + Note that your box can only act as a router if you enable IP
  63. + forwarding in your kernel; you can do that by saying Y to "/proc
  64. + file system support" and "Sysctl support" below and executing the
  65. + line
  66. +
  67. + echo "1" > /proc/sys/net/ipv4/ip_forward
  68. +
  69. + at boot time after the /proc file system has been mounted.
  70. +
  71. + If you turn on IP forwarding, you should consider the rp_filter, which
  72. + automatically rejects incoming packets if the routing table entry
  73. + for their source address doesn't match the network interface they're
  74. + arriving on. This has security advantages because it prevents the
  75. + so-called IP spoofing, however it can pose problems if you use
  76. + asymmetric routing (packets from you to a host take a different path
  77. + than packets from that host to you) or if you operate a non-routing
  78. + host which has several IP addresses on different interfaces. To turn
  79. + rp_filter on use:
  80. +
  81. + echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter
  82. + and
  83. + echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
  84. +
  85. + Note that some distributions enable it in startup scripts.
  86. + For details about rp_filter strict and loose mode read
  87. + <file:Documentation/networking/ip-sysctl.txt>.
  88. +
  89. + If unsure, say N here.
  90. +
  91. +choice
  92. + prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
  93. + depends on IP_ADVANCED_ROUTER
  94. + default ASK_IP_FIB_HASH
  95. +
  96. +config ASK_IP_FIB_HASH
  97. + bool "FIB_HASH"
  98. + ---help---
  99. + Current FIB is very proven and good enough for most users.
  100. +
  101. +config IP_FIB_TRIE
  102. + bool "FIB_TRIE"
  103. + ---help---
  104. + Use new experimental LC-trie as FIB lookup algorithm.
  105. + This improves lookup performance if you have a large
  106. + number of routes.
  107. +
  108. + LC-trie is a longest matching prefix lookup algorithm which
  109. + performs better than FIB_HASH for large routing tables.
  110. + But, it consumes more memory and is more complex.
  111. +
  112. + LC-trie is described in:
  113. +
  114. + IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
  115. + IEEE Journal on Selected Areas in Communications, 17(6):1083-1092,
  116. + June 1999
  117. +
  118. + An experimental study of compression methods for dynamic tries
  119. + Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
  120. + http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
  121. +
  122. +endchoice
  123. +
  124. +config IP_FIB_HASH
  125. + def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER
  126. +
  127. +config IP_FIB_TRIE_STATS
  128. + bool "FIB TRIE statistics"
  129. + depends on IP_FIB_TRIE
  130. + ---help---
  131. + Keep track of statistics on structure of FIB TRIE table.
  132. + Useful for testing and measuring TRIE performance.
  133. +
  134. +config IP_MULTIPLE_TABLES
  135. + bool "IP: policy routing"
  136. + depends on IP_ADVANCED_ROUTER
  137. + select FIB_RULES
  138. + ---help---
  139. + Normally, a router decides what to do with a received packet based
  140. + solely on the packet's final destination address. If you say Y here,
  141. + the Linux router will also be able to take the packet's source
  142. + address into account. Furthermore, the TOS (Type-Of-Service) field
  143. + of the packet can be used for routing decisions as well.
  144. +
  145. + If you are interested in this, please see the preliminary
  146. + documentation at <http://www.compendium.com.ar/policy-routing.txt>
  147. + and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>.
  148. + You will need supporting software from
  149. + <ftp://ftp.tux.org/pub/net/ip-routing/>.
  150. +
  151. + If unsure, say N.
  152. +
  153. +config IP_ROUTE_MULTIPATH
  154. + bool "IP: equal cost multipath"
  155. + depends on IP_ADVANCED_ROUTER
  156. + help
  157. + Normally, the routing tables specify a single action to be taken in
  158. + a deterministic manner for a given packet. If you say Y here
  159. + however, it becomes possible to attach several actions to a packet
  160. + pattern, in effect specifying several alternative paths to travel
  161. + for those packets. The router considers all these paths to be of
  162. + equal "cost" and chooses one of them in a non-deterministic fashion
  163. + if a matching packet arrives.
  164. +
  165. +config IP_ROUTE_VERBOSE
  166. + bool "IP: verbose route monitoring"
  167. + depends on IP_ADVANCED_ROUTER
  168. + help
  169. + If you say Y here, which is recommended, then the kernel will print
  170. + verbose messages regarding the routing, for example warnings about
  171. + received packets which look strange and could be evidence of an
  172. + attack or a misconfigured system somewhere. The information is
  173. + handled by the klogd daemon which is responsible for kernel messages
  174. + ("man klogd").
  175. +
  176. +config IP_PNP
  177. + bool "IP: kernel level autoconfiguration"
  178. + help
  179. + This enables automatic configuration of IP addresses of devices and
  180. + of the routing table during kernel boot, based on either information
  181. + supplied on the kernel command line or by BOOTP or RARP protocols.
  182. + You need to say Y only for diskless machines requiring network
  183. + access to boot (in which case you want to say Y to "Root file system
  184. + on NFS" as well), because all other machines configure the network
  185. + in their startup scripts.
  186. +
  187. +config IP_PNP_DHCP
  188. + bool "IP: DHCP support"
  189. + depends on IP_PNP
  190. + ---help---
  191. + If you want your Linux box to mount its whole root file system (the
  192. + one containing the directory /) from some other computer over the
  193. + net via NFS and you want the IP address of your computer to be
  194. + discovered automatically at boot time using the DHCP protocol (a
  195. + special protocol designed for doing this job), say Y here. In case
  196. + the boot ROM of your network card was designed for booting Linux and
  197. + does DHCP itself, providing all necessary information on the kernel
  198. + command line, you can say N here.
  199. +
  200. + If unsure, say Y. Note that if you want to use DHCP, a DHCP server
  201. + must be operating on your network. Read
  202. + <file:Documentation/filesystems/nfsroot.txt> for details.
  203. +
  204. +config IP_PNP_BOOTP
  205. + bool "IP: BOOTP support"
  206. + depends on IP_PNP
  207. + ---help---
  208. + If you want your Linux box to mount its whole root file system (the
  209. + one containing the directory /) from some other computer over the
  210. + net via NFS and you want the IP address of your computer to be
  211. + discovered automatically at boot time using the BOOTP protocol (a
  212. + special protocol designed for doing this job), say Y here. In case
  213. + the boot ROM of your network card was designed for booting Linux and
  214. + does BOOTP itself, providing all necessary information on the kernel
  215. + command line, you can say N here. If unsure, say Y. Note that if you
  216. + want to use BOOTP, a BOOTP server must be operating on your network.
  217. + Read <file:Documentation/filesystems/nfsroot.txt> for details.
  218. +
  219. +config IP_PNP_RARP
  220. + bool "IP: RARP support"
  221. + depends on IP_PNP
  222. + help
  223. + If you want your Linux box to mount its whole root file system (the
  224. + one containing the directory /) from some other computer over the
  225. + net via NFS and you want the IP address of your computer to be
  226. + discovered automatically at boot time using the RARP protocol (an
  227. + older protocol which is being obsoleted by BOOTP and DHCP), say Y
  228. + here. Note that if you want to use RARP, a RARP server must be
  229. + operating on your network. Read
  230. + <file:Documentation/filesystems/nfsroot.txt> for details.
  231. +
  232. +# not yet ready..
  233. +# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
  234. +config NET_IPIP
  235. + tristate "IP: tunneling"
  236. + select INET_TUNNEL
  237. + ---help---
  238. + Tunneling means encapsulating data of one protocol type within
  239. + another protocol and sending it over a channel that understands the
  240. + encapsulating protocol. This particular tunneling driver implements
  241. + encapsulation of IP within IP, which sounds kind of pointless, but
  242. + can be useful if you want to make your (or some other) machine
  243. + appear on a different network than it physically is, or to use
  244. + mobile-IP facilities (allowing laptops to seamlessly move between
  245. + networks without changing their IP addresses).
  246. +
  247. + Saying Y to this option will produce two modules ( = code which can
  248. + be inserted in and removed from the running kernel whenever you
  249. + want). Most people won't need this and can say N.
  250. +
  251. +config NET_IPGRE
  252. + tristate "IP: GRE tunnels over IP"
  253. + help
  254. + Tunneling means encapsulating data of one protocol type within
  255. + another protocol and sending it over a channel that understands the
  256. + encapsulating protocol. This particular tunneling driver implements
  257. + GRE (Generic Routing Encapsulation) and at this time allows
  258. + encapsulating of IPv4 or IPv6 over existing IPv4 infrastructure.
  259. + This driver is useful if the other endpoint is a Cisco router: Cisco
  260. + likes GRE much better than the other Linux tunneling driver ("IP
  261. + tunneling" above). In addition, GRE allows multicast redistribution
  262. + through the tunnel.
  263. +
  264. +config NET_IPGRE_BROADCAST
  265. + bool "IP: broadcast GRE over IP"
  266. + depends on IP_MULTICAST && NET_IPGRE
  267. + help
  268. + One application of GRE/IP is to construct a broadcast WAN (Wide Area
  269. + Network), which looks like a normal Ethernet LAN (Local Area
  270. + Network), but can be distributed all over the Internet. If you want
  271. + to do that, say Y here and to "IP multicast routing" below.
  272. +
  273. +config IP_MROUTE
  274. + bool "IP: multicast routing"
  275. + depends on IP_MULTICAST
  276. + help
  277. + This is used if you want your machine to act as a router for IP
  278. + packets that have several destination addresses. It is needed on the
  279. + MBONE, a high bandwidth network on top of the Internet which carries
  280. + audio and video broadcasts. In order to do that, you would most
  281. + likely run the program mrouted. Information about the multicast
  282. + capabilities of the various network cards is contained in
  283. + <file:Documentation/networking/multicast.txt>. If you haven't heard
  284. + about it, you don't need it.
  285. +
  286. +config IP_PIMSM_V1
  287. + bool "IP: PIM-SM version 1 support"
  288. + depends on IP_MROUTE
  289. + help
  290. + Kernel side support for Sparse Mode PIM (Protocol Independent
  291. + Multicast) version 1. This multicast routing protocol is used widely
  292. + because Cisco supports it. You need special software to use it
  293. + (pimd-v1). Please see <http://netweb.usc.edu/pim/> for more
  294. + information about PIM.
  295. +
  296. + Say Y if you want to use PIM-SM v1. Note that you can say N here if
  297. + you just want to use Dense Mode PIM.
  298. +
  299. +config IP_PIMSM_V2
  300. + bool "IP: PIM-SM version 2 support"
  301. + depends on IP_MROUTE
  302. + help
  303. + Kernel side support for Sparse Mode PIM version 2. In order to use
  304. + this, you need an experimental routing daemon supporting it (pimd or
  305. + gated-5). This routing protocol is not used widely, so say N unless
  306. + you want to play with it.
  307. +
  308. +config ARPD
  309. + bool "IP: ARP daemon support (EXPERIMENTAL)"
  310. + depends on EXPERIMENTAL
  311. + ---help---
  312. + Normally, the kernel maintains an internal cache which maps IP
  313. + addresses to hardware addresses on the local network, so that
  314. + Ethernet/Token Ring/ etc. frames are sent to the proper address on
  315. + the physical networking layer. For small networks having a few
  316. + hundred directly connected hosts or less, keeping this address
  317. + resolution (ARP) cache inside the kernel works well. However,
  318. + maintaining an internal ARP cache does not work well for very large
  319. + switched networks, and will use a lot of kernel memory if TCP/IP
  320. + connections are made to many machines on the network.
  321. +
  322. + If you say Y here, the kernel's internal ARP cache will never grow
  323. + to more than 256 entries (the oldest entries are expired in a LIFO
  324. + manner) and communication will be attempted with the user space ARP
  325. + daemon arpd. Arpd then answers the address resolution request either
  326. + from its own cache or by asking the net.
  327. +
  328. + This code is experimental and also obsolete. If you want to use it,
  329. + you need to find a version of the daemon arpd on the net somewhere,
  330. + and you should also say Y to "Kernel/User network link driver",
  331. + below. If unsure, say N.
  332. +
  333. +config SYN_COOKIES
  334. + bool "IP: TCP syncookie support (disabled per default)"
  335. + ---help---
  336. + Normal TCP/IP networking is open to an attack known as "SYN
  337. + flooding". This denial-of-service attack prevents legitimate remote
  338. + users from being able to connect to your computer during an ongoing
  339. + attack and requires very little work from the attacker, who can
  340. + operate from anywhere on the Internet.
  341. +
  342. + SYN cookies provide protection against this type of attack. If you
  343. + say Y here, the TCP/IP stack will use a cryptographic challenge
  344. + protocol known as "SYN cookies" to enable legitimate users to
  345. + continue to connect, even when your machine is under attack. There
  346. + is no need for the legitimate users to change their TCP/IP software;
  347. + SYN cookies work transparently to them. For technical information
  348. + about SYN cookies, check out <http://cr.yp.to/syncookies.html>.
  349. +
  350. + If you are SYN flooded, the source address reported by the kernel is
  351. + likely to have been forged by the attacker; it is only reported as
  352. + an aid in tracing the packets to their actual source and should not
  353. + be taken as absolute truth.
  354. +
  355. + SYN cookies may prevent correct error reporting on clients when the
  356. + server is really overloaded. If this happens frequently better turn
  357. + them off.
  358. +
  359. + If you say Y here, note that SYN cookies aren't enabled by default;
  360. + you can enable them by saying Y to "/proc file system support" and
  361. + "Sysctl support" below and executing the command
  362. +
  363. + echo 1 >/proc/sys/net/ipv4/tcp_syncookies
  364. +
  365. + at boot time after the /proc file system has been mounted.
  366. +
  367. + If unsure, say N.
  368. +
  369. +config INET_AH
  370. + tristate "IP: AH transformation"
  371. + select XFRM
  372. + select CRYPTO
  373. + select CRYPTO_HMAC
  374. + select CRYPTO_MD5
  375. + select CRYPTO_SHA1
  376. + ---help---
  377. + Support for IPsec AH.
  378. +
  379. + If unsure, say Y.
  380. +
  381. +config INET_ESP
  382. + tristate "IP: ESP transformation"
  383. + select XFRM
  384. + select CRYPTO
  385. + select CRYPTO_AUTHENC
  386. + select CRYPTO_HMAC
  387. + select CRYPTO_MD5
  388. + select CRYPTO_CBC
  389. + select CRYPTO_SHA1
  390. + select CRYPTO_DES
  391. + ---help---
  392. + Support for IPsec ESP.
  393. +
  394. + If unsure, say Y.
  395. +
  396. +config INET_IPCOMP
  397. + tristate "IP: IPComp transformation"
  398. + select INET_XFRM_TUNNEL
  399. + select XFRM_IPCOMP
  400. + ---help---
  401. + Support for IP Payload Compression Protocol (IPComp) (RFC3173),
  402. + typically needed for IPsec.
  403. +
  404. + If unsure, say Y.
  405. +
  406. +config INET_XFRM_TUNNEL
  407. + tristate
  408. + select INET_TUNNEL
  409. + default n
  410. +
  411. +config INET_TUNNEL
  412. + tristate
  413. + default n
  414. +
  415. +config INET_XFRM_MODE_TRANSPORT
  416. + tristate "IP: IPsec transport mode"
  417. + default y
  418. + select XFRM
  419. + ---help---
  420. + Support for IPsec transport mode.
  421. +
  422. + If unsure, say Y.
  423. +
  424. +config INET_XFRM_MODE_TUNNEL
  425. + tristate "IP: IPsec tunnel mode"
  426. + default y
  427. + select XFRM
  428. + ---help---
  429. + Support for IPsec tunnel mode.
  430. +
  431. + If unsure, say Y.
  432. +
  433. +config INET_XFRM_MODE_BEET
  434. + tristate "IP: IPsec BEET mode"
  435. + default y
  436. + select XFRM
  437. + ---help---
  438. + Support for IPsec BEET mode.
  439. +
  440. + If unsure, say Y.
  441. +
  442. +config INET_LRO
  443. + bool "Large Receive Offload (ipv4/tcp)"
  444. + default y
  445. + ---help---
  446. + Support for Large Receive Offload (ipv4/tcp).
  447. +
  448. + If unsure, say Y.
  449. +
  450. +config INET_DIAG
  451. + tristate "INET: socket monitoring interface"
  452. + default y
  453. + ---help---
  454. + Support for INET (TCP, DCCP, etc) socket monitoring interface used by
  455. + native Linux tools such as ss. ss is included in iproute2, currently
  456. + downloadable at <http://linux-net.osdl.org/index.php/Iproute2>.
  457. +
  458. + If unsure, say Y.
  459. +
  460. +config INET_TCP_DIAG
  461. + depends on INET_DIAG
  462. + def_tristate INET_DIAG
  463. +
  464. +menuconfig TCP_CONG_ADVANCED
  465. + bool "TCP: advanced congestion control"
  466. + ---help---
  467. + Support for selection of various TCP congestion control
  468. + modules.
  469. +
  470. + Nearly all users can safely say no here, and a safe default
  471. + selection will be made (CUBIC with new Reno as a fallback).
  472. +
  473. + If unsure, say N.
  474. +
  475. +if TCP_CONG_ADVANCED
  476. +
  477. +config TCP_CONG_BIC
  478. + tristate "Binary Increase Congestion (BIC) control"
  479. + default m
  480. + ---help---
  481. + BIC-TCP is a sender-side only change that ensures a linear RTT
  482. + fairness under large windows while offering both scalability and
  483. + bounded TCP-friendliness. The protocol combines two schemes
  484. + called additive increase and binary search increase. When the
  485. + congestion window is large, additive increase with a large
  486. + increment ensures linear RTT fairness as well as good
  487. + scalability. Under small congestion windows, binary search
  488. + increase provides TCP friendliness.
  489. + See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/
  490. +
  491. +config TCP_CONG_CUBIC
  492. + tristate "CUBIC TCP"
  493. + default y
  494. + ---help---
  495. + This is version 2.0 of BIC-TCP which uses a cubic growth function
  496. + among other techniques.
  497. + See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf
  498. +
  499. +config TCP_CONG_WESTWOOD
  500. + tristate "TCP Westwood+"
  501. + default m
  502. + ---help---
  503. + TCP Westwood+ is a sender-side only modification of the TCP Reno
  504. + protocol stack that optimizes the performance of TCP congestion
  505. + control. It is based on end-to-end bandwidth estimation to set
  506. + congestion window and slow start threshold after a congestion
  507. + episode. Using this estimation, TCP Westwood+ adaptively sets a
  508. + slow start threshold and a congestion window which takes into
  509. + account the bandwidth used at the time congestion is experienced.
  510. + TCP Westwood+ significantly increases fairness wrt TCP Reno in
  511. + wired networks and throughput over wireless links.
  512. +
  513. +config TCP_CONG_HTCP
  514. + tristate "H-TCP"
  515. + default m
  516. + ---help---
  517. + H-TCP is a send-side only modifications of the TCP Reno
  518. + protocol stack that optimizes the performance of TCP
  519. + congestion control for high speed network links. It uses a
  520. + modeswitch to change the alpha and beta parameters of TCP Reno
  521. + based on network conditions and in a way so as to be fair with
  522. + other Reno and H-TCP flows.
  523. +
  524. +config TCP_CONG_HSTCP
  525. + tristate "High Speed TCP"
  526. + depends on EXPERIMENTAL
  527. + default n
  528. + ---help---
  529. + Sally Floyd's High Speed TCP (RFC 3649) congestion control.
  530. + A modification to TCP's congestion control mechanism for use
  531. + with large congestion windows. A table indicates how much to
  532. + increase the congestion window by when an ACK is received.
  533. + For more detail see http://www.icir.org/floyd/hstcp.html
  534. +
  535. +config TCP_CONG_HYBLA
  536. + tristate "TCP-Hybla congestion control algorithm"
  537. + depends on EXPERIMENTAL
  538. + default n
  539. + ---help---
  540. + TCP-Hybla is a sender-side only change that eliminates penalization of
  541. + long-RTT, large-bandwidth connections, like when satellite legs are
  542. + involved, especially when sharing a common bottleneck with normal
  543. + terrestrial connections.
  544. +
  545. +config TCP_CONG_VEGAS
  546. + tristate "TCP Vegas"
  547. + depends on EXPERIMENTAL
  548. + default n
  549. + ---help---
  550. + TCP Vegas is a sender-side only change to TCP that anticipates
  551. + the onset of congestion by estimating the bandwidth. TCP Vegas
  552. + adjusts the sending rate by modifying the congestion
  553. + window. TCP Vegas should provide less packet loss, but it is
  554. + not as aggressive as TCP Reno.
  555. +
  556. +config TCP_CONG_SCALABLE
  557. + tristate "Scalable TCP"
  558. + depends on EXPERIMENTAL
  559. + default n
  560. + ---help---
  561. + Scalable TCP is a sender-side only change to TCP which uses a
  562. + MIMD congestion control algorithm which has some nice scaling
  563. + properties, though is known to have fairness issues.
  564. + See http://www.deneholme.net/tom/scalable/
  565. +
  566. +config TCP_CONG_LP
  567. + tristate "TCP Low Priority"
  568. + depends on EXPERIMENTAL
  569. + default n
  570. + ---help---
  571. + TCP Low Priority (TCP-LP), a distributed algorithm whose goal is
  572. + to utilize only the excess network bandwidth as compared to the
  573. + ``fair share`` of bandwidth as targeted by TCP.
  574. + See http://www-ece.rice.edu/networks/TCP-LP/
  575. +
  576. +config TCP_CONG_VENO
  577. + tristate "TCP Veno"
  578. + depends on EXPERIMENTAL
  579. + default n
  580. + ---help---
  581. + TCP Veno is a sender-side only enhancement of TCP to obtain better
  582. + throughput over wireless networks. TCP Veno makes use of state
  583. + distinguishing to circumvent the difficult judgment of the packet loss
  584. + type. TCP Veno cuts down less congestion window in response to random
  585. + loss packets.
  586. + See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf
  587. +
  588. +config TCP_CONG_YEAH
  589. + tristate "YeAH TCP"
  590. + depends on EXPERIMENTAL
  591. + select TCP_CONG_VEGAS
  592. + default n
  593. + ---help---
  594. + YeAH-TCP is a sender-side high-speed enabled TCP congestion control
  595. + algorithm, which uses a mixed loss/delay approach to compute the
  596. + congestion window. It's design goals target high efficiency,
  597. + internal, RTT and Reno fairness, resilience to link loss while
  598. + keeping network elements load as low as possible.
  599. +
  600. + For further details look here:
  601. + http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
  602. +
  603. +config TCP_CONG_ILLINOIS
  604. + tristate "TCP Illinois"
  605. + depends on EXPERIMENTAL
  606. + default n
  607. + ---help---
  608. + TCP-Illinois is a sender-side modification of TCP Reno for
  609. + high speed long delay links. It uses round-trip-time to
  610. + adjust the alpha and beta parameters to achieve a higher average
  611. + throughput and maintain fairness.
  612. +
  613. + For further details see:
  614. + http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
  615. +
  616. +choice
  617. + prompt "Default TCP congestion control"
  618. + default DEFAULT_CUBIC
  619. + help
  620. + Select the TCP congestion control that will be used by default
  621. + for all connections.
  622. +
  623. + config DEFAULT_BIC
  624. + bool "Bic" if TCP_CONG_BIC=y
  625. +
  626. + config DEFAULT_CUBIC
  627. + bool "Cubic" if TCP_CONG_CUBIC=y
  628. +
  629. + config DEFAULT_HTCP
  630. + bool "Htcp" if TCP_CONG_HTCP=y
  631. +
  632. + config DEFAULT_VEGAS
  633. + bool "Vegas" if TCP_CONG_VEGAS=y
  634. +
  635. + config DEFAULT_WESTWOOD
  636. + bool "Westwood" if TCP_CONG_WESTWOOD=y
  637. +
  638. + config DEFAULT_RENO
  639. + bool "Reno"
  640. +
  641. +endchoice
  642. +
  643. +endif
  644. +
  645. +config TCP_CONG_CUBIC
  646. + tristate
  647. + depends on !TCP_CONG_ADVANCED
  648. + default y
  649. +
  650. +config DEFAULT_TCP_CONG
  651. + string
  652. + default "bic" if DEFAULT_BIC
  653. + default "cubic" if DEFAULT_CUBIC
  654. + default "htcp" if DEFAULT_HTCP
  655. + default "vegas" if DEFAULT_VEGAS
  656. + default "westwood" if DEFAULT_WESTWOOD
  657. + default "reno" if DEFAULT_RENO
  658. + default "cubic"
  659. +
  660. +config TCP_MD5SIG
  661. + bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)"
  662. + depends on EXPERIMENTAL
  663. + select CRYPTO
  664. + select CRYPTO_MD5
  665. + ---help---
  666. + RFC2385 specifies a method of giving MD5 protection to TCP sessions.
  667. + Its main (only?) use is to protect BGP sessions between core routers
  668. + on the Internet.
  669. +
  670. + If unsure, say N.
  671. +
  672. diff -Nur linux-2.6.30.1.orig/net/ipv4/udp.c linux-2.6.30.1/net/ipv4/udp.c
  673. --- linux-2.6.30.1.orig/net/ipv4/udp.c 2009-07-03 01:52:38.000000000 +0200
  674. +++ linux-2.6.30.1/net/ipv4/udp.c 2009-07-24 22:00:56.755270521 +0200
  675. @@ -104,6 +104,7 @@
  676. #include <net/route.h>
  677. #include <net/checksum.h>
  678. #include <net/xfrm.h>
  679. +#include <net/xfrmudp.h>
  680. #include "udp_impl.h"
  681. struct udp_table udp_table;
  682. @@ -1035,6 +1036,128 @@
  683. return -1;
  684. }
  685. +#if defined(CONFIG_XFRM) || defined(CONFIG_IPSEC_NAT_TRAVERSAL)
  686. +
  687. +static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = NULL;
  688. +
  689. +/*
  690. + * de-encapsulate and pass to the registered xfrm4_rcv_encap_func function.
  691. + * Most of this code stolen from net/ipv4/xfrm4_input.c
  692. + * which is attributed to YOSHIFUJI Hideaki @USAGI, and
  693. + * Derek Atkins <derek@ihtfp.com>
  694. + */
  695. +
  696. +static int xfrm4_udp_encap_rcv_wrapper(struct sock *sk, struct sk_buff *skb)
  697. +{
  698. + struct udp_sock *up = udp_sk(sk);
  699. + struct udphdr *uh;
  700. + struct iphdr *iph;
  701. + int iphlen, len;
  702. + int ret;
  703. +
  704. + __u8 *udpdata;
  705. + __be32 *udpdata32;
  706. + __u16 encap_type = up->encap_type;
  707. +
  708. + /* if this is not encapsulated socket, then just return now */
  709. + if (!encap_type && !xfrm4_rcv_encap_func)
  710. + return 1;
  711. +
  712. + /* If this is a paged skb, make sure we pull up
  713. + * whatever data we need to look at. */
  714. + len = skb->len - sizeof(struct udphdr);
  715. + if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8)))
  716. + return 1;
  717. +
  718. + /* Now we can get the pointers */
  719. + uh = udp_hdr(skb);
  720. + udpdata = (__u8 *)uh + sizeof(struct udphdr);
  721. + udpdata32 = (__be32 *)udpdata;
  722. +
  723. + switch (encap_type) {
  724. + default:
  725. + case UDP_ENCAP_ESPINUDP:
  726. + /* Check if this is a keepalive packet. If so, eat it. */
  727. + if (len == 1 && udpdata[0] == 0xff) {
  728. + goto drop;
  729. + } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) {
  730. + /* ESP Packet without Non-ESP header */
  731. + len = sizeof(struct udphdr);
  732. + } else
  733. + /* Must be an IKE packet.. pass it through */
  734. + return 1;
  735. + break;
  736. + case UDP_ENCAP_ESPINUDP_NON_IKE:
  737. + /* Check if this is a keepalive packet. If so, eat it. */
  738. + if (len == 1 && udpdata[0] == 0xff) {
  739. + goto drop;
  740. + } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
  741. + udpdata32[0] == 0 && udpdata32[1] == 0) {
  742. +
  743. + /* ESP Packet with Non-IKE marker */
  744. + len = sizeof(struct udphdr) + 2 * sizeof(u32);
  745. + } else
  746. + /* Must be an IKE packet.. pass it through */
  747. + return 1;
  748. + break;
  749. + }
  750. +
  751. + /* At this point we are sure that this is an ESPinUDP packet,
  752. + * so we need to remove 'len' bytes from the packet (the UDP
  753. + * header and optional ESP marker bytes) and then modify the
  754. + * protocol to ESP, and then call into the transform receiver.
  755. + */
  756. + if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  757. + goto drop;
  758. +
  759. + /* Now we can update and verify the packet length... */
  760. + iph = ip_hdr(skb);
  761. + iphlen = iph->ihl << 2;
  762. + iph->tot_len = htons(ntohs(iph->tot_len) - len);
  763. + if (skb->len < iphlen + len) {
  764. + /* packet is too small!?! */
  765. + goto drop;
  766. + }
  767. +
  768. + /* pull the data buffer up to the ESP header and set the
  769. + * transport header to point to ESP. Keep UDP on the stack
  770. + * for later.
  771. + */
  772. + __skb_pull(skb, len);
  773. + skb_reset_transport_header(skb);
  774. +
  775. + /* modify the protocol (it's ESP!) */
  776. + iph->protocol = IPPROTO_ESP;
  777. +
  778. + /* process ESP */
  779. + ret = (*xfrm4_rcv_encap_func)(skb, encap_type);
  780. + return ret;
  781. +
  782. +drop:
  783. + kfree_skb(skb);
  784. + return 0;
  785. +}
  786. +
  787. +int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func,
  788. + xfrm4_rcv_encap_t *oldfunc)
  789. +{
  790. + if (oldfunc != NULL)
  791. + *oldfunc = xfrm4_rcv_encap_func;
  792. + xfrm4_rcv_encap_func = func;
  793. + return 0;
  794. +}
  795. +
  796. +int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func)
  797. +{
  798. + if (xfrm4_rcv_encap_func != func)
  799. + return -1;
  800. +
  801. + xfrm4_rcv_encap_func = NULL;
  802. + return 0;
  803. +}
  804. +
  805. +#endif /* CONFIG_XFRM_MODULE || CONFIG_IPSEC_NAT_TRAVERSAL */
  806. +
  807. /* returns:
  808. * -1: error
  809. * 0: success
  810. @@ -1377,6 +1500,11 @@
  811. case 0:
  812. case UDP_ENCAP_ESPINUDP:
  813. case UDP_ENCAP_ESPINUDP_NON_IKE:
  814. +#if defined(CONFIG_XFRM) || defined(CONFIG_IPSEC_NAT_TRAVERSAL)
  815. + if (xfrm4_rcv_encap_func)
  816. + up->encap_rcv = xfrm4_udp_encap_rcv_wrapper;
  817. + else
  818. +#endif
  819. up->encap_rcv = xfrm4_udp_encap_rcv;
  820. /* FALLTHROUGH */
  821. case UDP_ENCAP_L2TPINUDP:
  822. @@ -1828,3 +1956,9 @@
  823. EXPORT_SYMBOL(udp_proc_register);
  824. EXPORT_SYMBOL(udp_proc_unregister);
  825. #endif
  826. +
  827. +#if defined(CONFIG_IPSEC_NAT_TRAVERSAL)
  828. +EXPORT_SYMBOL(udp4_register_esp_rcvencap);
  829. +EXPORT_SYMBOL(udp4_unregister_esp_rcvencap);
  830. +#endif
  831. +
  832. diff -Nur linux-2.6.30.1.orig/net/ipv4/udp.c.orig linux-2.6.30.1/net/ipv4/udp.c.orig
  833. --- linux-2.6.30.1.orig/net/ipv4/udp.c.orig 1970-01-01 01:00:00.000000000 +0100
  834. +++ linux-2.6.30.1/net/ipv4/udp.c.orig 2009-07-03 01:52:38.000000000 +0200
  835. @@ -0,0 +1,1830 @@
  836. +/*
  837. + * INET An implementation of the TCP/IP protocol suite for the LINUX
  838. + * operating system. INET is implemented using the BSD Socket
  839. + * interface as the means of communication with the user level.
  840. + *
  841. + * The User Datagram Protocol (UDP).
  842. + *
  843. + * Authors: Ross Biro
  844. + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  845. + * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  846. + * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  847. + * Hirokazu Takahashi, <taka@valinux.co.jp>
  848. + *
  849. + * Fixes:
  850. + * Alan Cox : verify_area() calls
  851. + * Alan Cox : stopped close while in use off icmp
  852. + * messages. Not a fix but a botch that
  853. + * for udp at least is 'valid'.
  854. + * Alan Cox : Fixed icmp handling properly
  855. + * Alan Cox : Correct error for oversized datagrams
  856. + * Alan Cox : Tidied select() semantics.
  857. + * Alan Cox : udp_err() fixed properly, also now
  858. + * select and read wake correctly on errors
  859. + * Alan Cox : udp_send verify_area moved to avoid mem leak
  860. + * Alan Cox : UDP can count its memory
  861. + * Alan Cox : send to an unknown connection causes
  862. + * an ECONNREFUSED off the icmp, but
  863. + * does NOT close.
  864. + * Alan Cox : Switched to new sk_buff handlers. No more backlog!
  865. + * Alan Cox : Using generic datagram code. Even smaller and the PEEK
  866. + * bug no longer crashes it.
  867. + * Fred Van Kempen : Net2e support for sk->broadcast.
  868. + * Alan Cox : Uses skb_free_datagram
  869. + * Alan Cox : Added get/set sockopt support.
  870. + * Alan Cox : Broadcasting without option set returns EACCES.
  871. + * Alan Cox : No wakeup calls. Instead we now use the callbacks.
  872. + * Alan Cox : Use ip_tos and ip_ttl
  873. + * Alan Cox : SNMP Mibs
  874. + * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
  875. + * Matt Dillon : UDP length checks.
  876. + * Alan Cox : Smarter af_inet used properly.
  877. + * Alan Cox : Use new kernel side addressing.
  878. + * Alan Cox : Incorrect return on truncated datagram receive.
  879. + * Arnt Gulbrandsen : New udp_send and stuff
  880. + * Alan Cox : Cache last socket
  881. + * Alan Cox : Route cache
  882. + * Jon Peatfield : Minor efficiency fix to sendto().
  883. + * Mike Shaver : RFC1122 checks.
  884. + * Alan Cox : Nonblocking error fix.
  885. + * Willy Konynenberg : Transparent proxying support.
  886. + * Mike McLagan : Routing by source
  887. + * David S. Miller : New socket lookup architecture.
  888. + * Last socket cache retained as it
  889. + * does have a high hit rate.
  890. + * Olaf Kirch : Don't linearise iovec on sendmsg.
  891. + * Andi Kleen : Some cleanups, cache destination entry
  892. + * for connect.
  893. + * Vitaly E. Lavrov : Transparent proxy revived after year coma.
  894. + * Melvin Smith : Check msg_name not msg_namelen in sendto(),
  895. + * return ENOTCONN for unconnected sockets (POSIX)
  896. + * Janos Farkas : don't deliver multi/broadcasts to a different
  897. + * bound-to-device socket
  898. + * Hirokazu Takahashi : HW checksumming for outgoing UDP
  899. + * datagrams.
  900. + * Hirokazu Takahashi : sendfile() on UDP works now.
  901. + * Arnaldo C. Melo : convert /proc/net/udp to seq_file
  902. + * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
  903. + * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
  904. + * a single port at the same time.
  905. + * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
  906. + * James Chapman : Add L2TP encapsulation type.
  907. + *
  908. + *
  909. + * This program is free software; you can redistribute it and/or
  910. + * modify it under the terms of the GNU General Public License
  911. + * as published by the Free Software Foundation; either version
  912. + * 2 of the License, or (at your option) any later version.
  913. + */
  914. +
  915. +#include <asm/system.h>
  916. +#include <asm/uaccess.h>
  917. +#include <asm/ioctls.h>
  918. +#include <linux/bootmem.h>
  919. +#include <linux/highmem.h>
  920. +#include <linux/swap.h>
  921. +#include <linux/types.h>
  922. +#include <linux/fcntl.h>
  923. +#include <linux/module.h>
  924. +#include <linux/socket.h>
  925. +#include <linux/sockios.h>
  926. +#include <linux/igmp.h>
  927. +#include <linux/in.h>
  928. +#include <linux/errno.h>
  929. +#include <linux/timer.h>
  930. +#include <linux/mm.h>
  931. +#include <linux/inet.h>
  932. +#include <linux/netdevice.h>
  933. +#include <net/tcp_states.h>
  934. +#include <linux/skbuff.h>
  935. +#include <linux/proc_fs.h>
  936. +#include <linux/seq_file.h>
  937. +#include <net/net_namespace.h>
  938. +#include <net/icmp.h>
  939. +#include <net/route.h>
  940. +#include <net/checksum.h>
  941. +#include <net/xfrm.h>
  942. +#include "udp_impl.h"
  943. +
  944. +struct udp_table udp_table;
  945. +EXPORT_SYMBOL(udp_table);
  946. +
  947. +int sysctl_udp_mem[3] __read_mostly;
  948. +int sysctl_udp_rmem_min __read_mostly;
  949. +int sysctl_udp_wmem_min __read_mostly;
  950. +
  951. +EXPORT_SYMBOL(sysctl_udp_mem);
  952. +EXPORT_SYMBOL(sysctl_udp_rmem_min);
  953. +EXPORT_SYMBOL(sysctl_udp_wmem_min);
  954. +
  955. +atomic_t udp_memory_allocated;
  956. +EXPORT_SYMBOL(udp_memory_allocated);
  957. +
  958. +#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
  959. +
  960. +static int udp_lib_lport_inuse(struct net *net, __u16 num,
  961. + const struct udp_hslot *hslot,
  962. + unsigned long *bitmap,
  963. + struct sock *sk,
  964. + int (*saddr_comp)(const struct sock *sk1,
  965. + const struct sock *sk2))
  966. +{
  967. + struct sock *sk2;
  968. + struct hlist_nulls_node *node;
  969. +
  970. + sk_nulls_for_each(sk2, node, &hslot->head)
  971. + if (net_eq(sock_net(sk2), net) &&
  972. + sk2 != sk &&
  973. + (bitmap || sk2->sk_hash == num) &&
  974. + (!sk2->sk_reuse || !sk->sk_reuse) &&
  975. + (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
  976. + || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  977. + (*saddr_comp)(sk, sk2)) {
  978. + if (bitmap)
  979. + __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
  980. + bitmap);
  981. + else
  982. + return 1;
  983. + }
  984. + return 0;
  985. +}
  986. +
  987. +/**
  988. + * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
  989. + *
  990. + * @sk: socket struct in question
  991. + * @snum: port number to look up
  992. + * @saddr_comp: AF-dependent comparison of bound local IP addresses
  993. + */
  994. +int udp_lib_get_port(struct sock *sk, unsigned short snum,
  995. + int (*saddr_comp)(const struct sock *sk1,
  996. + const struct sock *sk2 ) )
  997. +{
  998. + struct udp_hslot *hslot;
  999. + struct udp_table *udptable = sk->sk_prot->h.udp_table;
  1000. + int error = 1;
  1001. + struct net *net = sock_net(sk);
  1002. +
  1003. + if (!snum) {
  1004. + int low, high, remaining;
  1005. + unsigned rand;
  1006. + unsigned short first, last;
  1007. + DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
  1008. +
  1009. + inet_get_local_port_range(&low, &high);
  1010. + remaining = (high - low) + 1;
  1011. +
  1012. + rand = net_random();
  1013. + first = (((u64)rand * remaining) >> 32) + low;
  1014. + /*
  1015. + * force rand to be an odd multiple of UDP_HTABLE_SIZE
  1016. + */
  1017. + rand = (rand | 1) * UDP_HTABLE_SIZE;
  1018. + for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
  1019. + hslot = &udptable->hash[udp_hashfn(net, first)];
  1020. + bitmap_zero(bitmap, PORTS_PER_CHAIN);
  1021. + spin_lock_bh(&hslot->lock);
  1022. + udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
  1023. + saddr_comp);
  1024. +
  1025. + snum = first;
  1026. + /*
  1027. + * Iterate on all possible values of snum for this hash.
  1028. + * Using steps of an odd multiple of UDP_HTABLE_SIZE
  1029. + * give us randomization and full range coverage.
  1030. + */
  1031. + do {
  1032. + if (low <= snum && snum <= high &&
  1033. + !test_bit(snum / UDP_HTABLE_SIZE, bitmap))
  1034. + goto found;
  1035. + snum += rand;
  1036. + } while (snum != first);
  1037. + spin_unlock_bh(&hslot->lock);
  1038. + }
  1039. + goto fail;
  1040. + } else {
  1041. + hslot = &udptable->hash[udp_hashfn(net, snum)];
  1042. + spin_lock_bh(&hslot->lock);
  1043. + if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
  1044. + goto fail_unlock;
  1045. + }
  1046. +found:
  1047. + inet_sk(sk)->num = snum;
  1048. + sk->sk_hash = snum;
  1049. + if (sk_unhashed(sk)) {
  1050. + sk_nulls_add_node_rcu(sk, &hslot->head);
  1051. + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  1052. + }
  1053. + error = 0;
  1054. +fail_unlock:
  1055. + spin_unlock_bh(&hslot->lock);
  1056. +fail:
  1057. + return error;
  1058. +}
  1059. +
  1060. +static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
  1061. +{
  1062. + struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
  1063. +
  1064. + return ( !ipv6_only_sock(sk2) &&
  1065. + (!inet1->rcv_saddr || !inet2->rcv_saddr ||
  1066. + inet1->rcv_saddr == inet2->rcv_saddr ));
  1067. +}
  1068. +
  1069. +int udp_v4_get_port(struct sock *sk, unsigned short snum)
  1070. +{
  1071. + return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal);
  1072. +}
  1073. +
  1074. +static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
  1075. + unsigned short hnum,
  1076. + __be16 sport, __be32 daddr, __be16 dport, int dif)
  1077. +{
  1078. + int score = -1;
  1079. +
  1080. + if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
  1081. + !ipv6_only_sock(sk)) {
  1082. + struct inet_sock *inet = inet_sk(sk);
  1083. +
  1084. + score = (sk->sk_family == PF_INET ? 1 : 0);
  1085. + if (inet->rcv_saddr) {
  1086. + if (inet->rcv_saddr != daddr)
  1087. + return -1;
  1088. + score += 2;
  1089. + }
  1090. + if (inet->daddr) {
  1091. + if (inet->daddr != saddr)
  1092. + return -1;
  1093. + score += 2;
  1094. + }
  1095. + if (inet->dport) {
  1096. + if (inet->dport != sport)
  1097. + return -1;
  1098. + score += 2;
  1099. + }
  1100. + if (sk->sk_bound_dev_if) {
  1101. + if (sk->sk_bound_dev_if != dif)
  1102. + return -1;
  1103. + score += 2;
  1104. + }
  1105. + }
  1106. + return score;
  1107. +}
  1108. +
  1109. +/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  1110. + * harder than this. -DaveM
  1111. + */
  1112. +static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
  1113. + __be16 sport, __be32 daddr, __be16 dport,
  1114. + int dif, struct udp_table *udptable)
  1115. +{
  1116. + struct sock *sk, *result;
  1117. + struct hlist_nulls_node *node;
  1118. + unsigned short hnum = ntohs(dport);
  1119. + unsigned int hash = udp_hashfn(net, hnum);
  1120. + struct udp_hslot *hslot = &udptable->hash[hash];
  1121. + int score, badness;
  1122. +
  1123. + rcu_read_lock();
  1124. +begin:
  1125. + result = NULL;
  1126. + badness = -1;
  1127. + sk_nulls_for_each_rcu(sk, node, &hslot->head) {
  1128. + score = compute_score(sk, net, saddr, hnum, sport,
  1129. + daddr, dport, dif);
  1130. + if (score > badness) {
  1131. + result = sk;
  1132. + badness = score;
  1133. + }
  1134. + }
  1135. + /*
  1136. + * if the nulls value we got at the end of this lookup is
  1137. + * not the expected one, we must restart lookup.
  1138. + * We probably met an item that was moved to another chain.
  1139. + */
  1140. + if (get_nulls_value(node) != hash)
  1141. + goto begin;
  1142. +
  1143. + if (result) {
  1144. + if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
  1145. + result = NULL;
  1146. + else if (unlikely(compute_score(result, net, saddr, hnum, sport,
  1147. + daddr, dport, dif) < badness)) {
  1148. + sock_put(result);
  1149. + goto begin;
  1150. + }
  1151. + }
  1152. + rcu_read_unlock();
  1153. + return result;
  1154. +}
  1155. +
  1156. +static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
  1157. + __be16 sport, __be16 dport,
  1158. + struct udp_table *udptable)
  1159. +{
  1160. + struct sock *sk;
  1161. + const struct iphdr *iph = ip_hdr(skb);
  1162. +
  1163. + if (unlikely(sk = skb_steal_sock(skb)))
  1164. + return sk;
  1165. + else
  1166. + return __udp4_lib_lookup(dev_net(skb->dst->dev), iph->saddr, sport,
  1167. + iph->daddr, dport, inet_iif(skb),
  1168. + udptable);
  1169. +}
  1170. +
  1171. +struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
  1172. + __be32 daddr, __be16 dport, int dif)
  1173. +{
  1174. + return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
  1175. +}
  1176. +EXPORT_SYMBOL_GPL(udp4_lib_lookup);
  1177. +
  1178. +static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
  1179. + __be16 loc_port, __be32 loc_addr,
  1180. + __be16 rmt_port, __be32 rmt_addr,
  1181. + int dif)
  1182. +{
  1183. + struct hlist_nulls_node *node;
  1184. + struct sock *s = sk;
  1185. + unsigned short hnum = ntohs(loc_port);
  1186. +
  1187. + sk_nulls_for_each_from(s, node) {
  1188. + struct inet_sock *inet = inet_sk(s);
  1189. +
  1190. + if (!net_eq(sock_net(s), net) ||
  1191. + s->sk_hash != hnum ||
  1192. + (inet->daddr && inet->daddr != rmt_addr) ||
  1193. + (inet->dport != rmt_port && inet->dport) ||
  1194. + (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
  1195. + ipv6_only_sock(s) ||
  1196. + (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
  1197. + continue;
  1198. + if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
  1199. + continue;
  1200. + goto found;
  1201. + }
  1202. + s = NULL;
  1203. +found:
  1204. + return s;
  1205. +}
  1206. +
  1207. +/*
  1208. + * This routine is called by the ICMP module when it gets some
  1209. + * sort of error condition. If err < 0 then the socket should
  1210. + * be closed and the error returned to the user. If err > 0
  1211. + * it's just the icmp type << 8 | icmp code.
  1212. + * Header points to the ip header of the error packet. We move
  1213. + * on past this. Then (as it used to claim before adjustment)
  1214. + * header points to the first 8 bytes of the udp header. We need
  1215. + * to find the appropriate port.
  1216. + */
  1217. +
  1218. +void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
  1219. +{
  1220. + struct inet_sock *inet;
  1221. + struct iphdr *iph = (struct iphdr*)skb->data;
  1222. + struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
  1223. + const int type = icmp_hdr(skb)->type;
  1224. + const int code = icmp_hdr(skb)->code;
  1225. + struct sock *sk;
  1226. + int harderr;
  1227. + int err;
  1228. + struct net *net = dev_net(skb->dev);
  1229. +
  1230. + sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
  1231. + iph->saddr, uh->source, skb->dev->ifindex, udptable);
  1232. + if (sk == NULL) {
  1233. + ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
  1234. + return; /* No socket for error */
  1235. + }
  1236. +
  1237. + err = 0;
  1238. + harderr = 0;
  1239. + inet = inet_sk(sk);
  1240. +
  1241. + switch (type) {
  1242. + default:
  1243. + case ICMP_TIME_EXCEEDED:
  1244. + err = EHOSTUNREACH;
  1245. + break;
  1246. + case ICMP_SOURCE_QUENCH:
  1247. + goto out;
  1248. + case ICMP_PARAMETERPROB:
  1249. + err = EPROTO;
  1250. + harderr = 1;
  1251. + break;
  1252. + case ICMP_DEST_UNREACH:
  1253. + if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
  1254. + if (inet->pmtudisc != IP_PMTUDISC_DONT) {
  1255. + err = EMSGSIZE;
  1256. + harderr = 1;
  1257. + break;
  1258. + }
  1259. + goto out;
  1260. + }
  1261. + err = EHOSTUNREACH;
  1262. + if (code <= NR_ICMP_UNREACH) {
  1263. + harderr = icmp_err_convert[code].fatal;
  1264. + err = icmp_err_convert[code].errno;
  1265. + }
  1266. + break;
  1267. + }
  1268. +
  1269. + /*
  1270. + * RFC1122: OK. Passes ICMP errors back to application, as per
  1271. + * 4.1.3.3.
  1272. + */
  1273. + if (!inet->recverr) {
  1274. + if (!harderr || sk->sk_state != TCP_ESTABLISHED)
  1275. + goto out;
  1276. + } else {
  1277. + ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
  1278. + }
  1279. + sk->sk_err = err;
  1280. + sk->sk_error_report(sk);
  1281. +out:
  1282. + sock_put(sk);
  1283. +}
  1284. +
  1285. +void udp_err(struct sk_buff *skb, u32 info)
  1286. +{
  1287. + __udp4_lib_err(skb, info, &udp_table);
  1288. +}
  1289. +
  1290. +/*
  1291. + * Throw away all pending data and cancel the corking. Socket is locked.
  1292. + */
  1293. +void udp_flush_pending_frames(struct sock *sk)
  1294. +{
  1295. + struct udp_sock *up = udp_sk(sk);
  1296. +
  1297. + if (up->pending) {
  1298. + up->len = 0;
  1299. + up->pending = 0;
  1300. + ip_flush_pending_frames(sk);
  1301. + }
  1302. +}
  1303. +EXPORT_SYMBOL(udp_flush_pending_frames);
  1304. +
  1305. +/**
  1306. + * udp4_hwcsum_outgoing - handle outgoing HW checksumming
  1307. + * @sk: socket we are sending on
  1308. + * @skb: sk_buff containing the filled-in UDP header
  1309. + * (checksum field must be zeroed out)
  1310. + */
  1311. +static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
  1312. + __be32 src, __be32 dst, int len )
  1313. +{
  1314. + unsigned int offset;
  1315. + struct udphdr *uh = udp_hdr(skb);
  1316. + __wsum csum = 0;
  1317. +
  1318. + if (skb_queue_len(&sk->sk_write_queue) == 1) {
  1319. + /*
  1320. + * Only one fragment on the socket.
  1321. + */
  1322. + skb->csum_start = skb_transport_header(skb) - skb->head;
  1323. + skb->csum_offset = offsetof(struct udphdr, check);
  1324. + uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
  1325. + } else {
  1326. + /*
  1327. + * HW-checksum won't work as there are two or more
  1328. + * fragments on the socket so that all csums of sk_buffs
  1329. + * should be together
  1330. + */
  1331. + offset = skb_transport_offset(skb);
  1332. + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
  1333. +
  1334. + skb->ip_summed = CHECKSUM_NONE;
  1335. +
  1336. + skb_queue_walk(&sk->sk_write_queue, skb) {
  1337. + csum = csum_add(csum, skb->csum);
  1338. + }
  1339. +
  1340. + uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
  1341. + if (uh->check == 0)
  1342. + uh->check = CSUM_MANGLED_0;
  1343. + }
  1344. +}
  1345. +
  1346. +/*
  1347. + * Push out all pending data as one UDP datagram. Socket is locked.
  1348. + */
  1349. +static int udp_push_pending_frames(struct sock *sk)
  1350. +{
  1351. + struct udp_sock *up = udp_sk(sk);
  1352. + struct inet_sock *inet = inet_sk(sk);
  1353. + struct flowi *fl = &inet->cork.fl;
  1354. + struct sk_buff *skb;
  1355. + struct udphdr *uh;
  1356. + int err = 0;
  1357. + int is_udplite = IS_UDPLITE(sk);
  1358. + __wsum csum = 0;
  1359. +
  1360. + /* Grab the skbuff where UDP header space exists. */
  1361. + if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
  1362. + goto out;
  1363. +
  1364. + /*
  1365. + * Create a UDP header
  1366. + */
  1367. + uh = udp_hdr(skb);
  1368. + uh->source = fl->fl_ip_sport;
  1369. + uh->dest = fl->fl_ip_dport;
  1370. + uh->len = htons(up->len);
  1371. + uh->check = 0;
  1372. +
  1373. + if (is_udplite) /* UDP-Lite */
  1374. + csum = udplite_csum_outgoing(sk, skb);
  1375. +
  1376. + else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
  1377. +
  1378. + skb->ip_summed = CHECKSUM_NONE;
  1379. + goto send;
  1380. +
  1381. + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
  1382. +
  1383. + udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
  1384. + goto send;
  1385. +
  1386. + } else /* `normal' UDP */
  1387. + csum = udp_csum_outgoing(sk, skb);
  1388. +
  1389. + /* add protocol-dependent pseudo-header */
  1390. + uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
  1391. + sk->sk_protocol, csum );
  1392. + if (uh->check == 0)
  1393. + uh->check = CSUM_MANGLED_0;
  1394. +
  1395. +send:
  1396. + err = ip_push_pending_frames(sk);
  1397. +out:
  1398. + up->len = 0;
  1399. + up->pending = 0;
  1400. + if (!err)
  1401. + UDP_INC_STATS_USER(sock_net(sk),
  1402. + UDP_MIB_OUTDATAGRAMS, is_udplite);
  1403. + return err;
  1404. +}
  1405. +
  1406. +int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  1407. + size_t len)
  1408. +{
  1409. + struct inet_sock *inet = inet_sk(sk);
  1410. + struct udp_sock *up = udp_sk(sk);
  1411. + int ulen = len;
  1412. + struct ipcm_cookie ipc;
  1413. + struct rtable *rt = NULL;
  1414. + int free = 0;
  1415. + int connected = 0;
  1416. + __be32 daddr, faddr, saddr;
  1417. + __be16 dport;
  1418. + u8 tos;
  1419. + int err, is_udplite = IS_UDPLITE(sk);
  1420. + int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
  1421. + int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
  1422. +
  1423. + if (len > 0xFFFF)
  1424. + return -EMSGSIZE;
  1425. +
  1426. + /*
  1427. + * Check the flags.
  1428. + */
  1429. +
  1430. + if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
  1431. + return -EOPNOTSUPP;
  1432. +
  1433. + ipc.opt = NULL;
  1434. + ipc.shtx.flags = 0;
  1435. +
  1436. + if (up->pending) {
  1437. + /*
  1438. + * There are pending frames.
  1439. + * The socket lock must be held while it's corked.
  1440. + */
  1441. + lock_sock(sk);
  1442. + if (likely(up->pending)) {
  1443. + if (unlikely(up->pending != AF_INET)) {
  1444. + release_sock(sk);
  1445. + return -EINVAL;
  1446. + }
  1447. + goto do_append_data;
  1448. + }
  1449. + release_sock(sk);
  1450. + }
  1451. + ulen += sizeof(struct udphdr);
  1452. +
  1453. + /*
  1454. + * Get and verify the address.
  1455. + */
  1456. + if (msg->msg_name) {
  1457. + struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
  1458. + if (msg->msg_namelen < sizeof(*usin))
  1459. + return -EINVAL;
  1460. + if (usin->sin_family != AF_INET) {
  1461. + if (usin->sin_family != AF_UNSPEC)
  1462. + return -EAFNOSUPPORT;
  1463. + }
  1464. +
  1465. + daddr = usin->sin_addr.s_addr;
  1466. + dport = usin->sin_port;
  1467. + if (dport == 0)
  1468. + return -EINVAL;
  1469. + } else {
  1470. + if (sk->sk_state != TCP_ESTABLISHED)
  1471. + return -EDESTADDRREQ;
  1472. + daddr = inet->daddr;
  1473. + dport = inet->dport;
  1474. + /* Open fast path for connected socket.
  1475. + Route will not be used, if at least one option is set.
  1476. + */
  1477. + connected = 1;
  1478. + }
  1479. + ipc.addr = inet->saddr;
  1480. +
  1481. + ipc.oif = sk->sk_bound_dev_if;
  1482. + err = sock_tx_timestamp(msg, sk, &ipc.shtx);
  1483. + if (err)
  1484. + return err;
  1485. + if (msg->msg_controllen) {
  1486. + err = ip_cmsg_send(sock_net(sk), msg, &ipc);
  1487. + if (err)
  1488. + return err;
  1489. + if (ipc.opt)
  1490. + free = 1;
  1491. + connected = 0;
  1492. + }
  1493. + if (!ipc.opt)
  1494. + ipc.opt = inet->opt;
  1495. +
  1496. + saddr = ipc.addr;
  1497. + ipc.addr = faddr = daddr;
  1498. +
  1499. + if (ipc.opt && ipc.opt->srr) {
  1500. + if (!daddr)
  1501. + return -EINVAL;
  1502. + faddr = ipc.opt->faddr;
  1503. + connected = 0;
  1504. + }
  1505. + tos = RT_TOS(inet->tos);
  1506. + if (sock_flag(sk, SOCK_LOCALROUTE) ||
  1507. + (msg->msg_flags & MSG_DONTROUTE) ||
  1508. + (ipc.opt && ipc.opt->is_strictroute)) {
  1509. + tos |= RTO_ONLINK;
  1510. + connected = 0;
  1511. + }
  1512. +
  1513. + if (ipv4_is_multicast(daddr)) {
  1514. + if (!ipc.oif)
  1515. + ipc.oif = inet->mc_index;
  1516. + if (!saddr)
  1517. + saddr = inet->mc_addr;
  1518. + connected = 0;
  1519. + }
  1520. +
  1521. + if (connected)
  1522. + rt = (struct rtable*)sk_dst_check(sk, 0);
  1523. +
  1524. + if (rt == NULL) {
  1525. + struct flowi fl = { .oif = ipc.oif,
  1526. + .nl_u = { .ip4_u =
  1527. + { .daddr = faddr,
  1528. + .saddr = saddr,
  1529. + .tos = tos } },
  1530. + .proto = sk->sk_protocol,
  1531. + .flags = inet_sk_flowi_flags(sk),
  1532. + .uli_u = { .ports =
  1533. + { .sport = inet->sport,
  1534. + .dport = dport } } };
  1535. + struct net *net = sock_net(sk);
  1536. +
  1537. + security_sk_classify_flow(sk, &fl);
  1538. + err = ip_route_output_flow(net, &rt, &fl, sk, 1);
  1539. + if (err) {
  1540. + if (err == -ENETUNREACH)
  1541. + IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
  1542. + goto out;
  1543. + }
  1544. +
  1545. + err = -EACCES;
  1546. + if ((rt->rt_flags & RTCF_BROADCAST) &&
  1547. + !sock_flag(sk, SOCK_BROADCAST))
  1548. + goto out;
  1549. + if (connected)
  1550. + sk_dst_set(sk, dst_clone(&rt->u.dst));
  1551. + }
  1552. +
  1553. + if (msg->msg_flags&MSG_CONFIRM)
  1554. + goto do_confirm;
  1555. +back_from_confirm:
  1556. +
  1557. + saddr = rt->rt_src;
  1558. + if (!ipc.addr)
  1559. + daddr = ipc.addr = rt->rt_dst;
  1560. +
  1561. + lock_sock(sk);
  1562. + if (unlikely(up->pending)) {
  1563. + /* The socket is already corked while preparing it. */
  1564. + /* ... which is an evident application bug. --ANK */
  1565. + release_sock(sk);
  1566. +
  1567. + LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
  1568. + err = -EINVAL;
  1569. + goto out;
  1570. + }
  1571. + /*
  1572. + * Now cork the socket to pend data.
  1573. + */
  1574. + inet->cork.fl.fl4_dst = daddr;
  1575. + inet->cork.fl.fl_ip_dport = dport;
  1576. + inet->cork.fl.fl4_src = saddr;
  1577. + inet->cork.fl.fl_ip_sport = inet->sport;
  1578. + up->pending = AF_INET;
  1579. +
  1580. +do_append_data:
  1581. + up->len += ulen;
  1582. + getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
  1583. + err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
  1584. + sizeof(struct udphdr), &ipc, &rt,
  1585. + corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
  1586. + if (err)
  1587. + udp_flush_pending_frames(sk);
  1588. + else if (!corkreq)
  1589. + err = udp_push_pending_frames(sk);
  1590. + else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
  1591. + up->pending = 0;
  1592. + release_sock(sk);
  1593. +
  1594. +out:
  1595. + ip_rt_put(rt);
  1596. + if (free)
  1597. + kfree(ipc.opt);
  1598. + if (!err)
  1599. + return len;
  1600. + /*
  1601. + * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
  1602. + * ENOBUFS might not be good (it's not tunable per se), but otherwise
  1603. + * we don't have a good statistic (IpOutDiscards but it can be too many
  1604. + * things). We could add another new stat but at least for now that
  1605. + * seems like overkill.
  1606. + */
  1607. + if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
  1608. + UDP_INC_STATS_USER(sock_net(sk),
  1609. + UDP_MIB_SNDBUFERRORS, is_udplite);
  1610. + }
  1611. + return err;
  1612. +
  1613. +do_confirm:
  1614. + dst_confirm(&rt->u.dst);
  1615. + if (!(msg->msg_flags&MSG_PROBE) || len)
  1616. + goto back_from_confirm;
  1617. + err = 0;
  1618. + goto out;
  1619. +}
  1620. +
  1621. +int udp_sendpage(struct sock *sk, struct page *page, int offset,
  1622. + size_t size, int flags)
  1623. +{
  1624. + struct udp_sock *up = udp_sk(sk);
  1625. + int ret;
  1626. +
  1627. + if (!up->pending) {
  1628. + struct msghdr msg = { .msg_flags = flags|MSG_MORE };
  1629. +
  1630. + /* Call udp_sendmsg to specify destination address which
  1631. + * sendpage interface can't pass.
  1632. + * This will succeed only when the socket is connected.
  1633. + */
  1634. + ret = udp_sendmsg(NULL, sk, &msg, 0);
  1635. + if (ret < 0)
  1636. + return ret;
  1637. + }
  1638. +
  1639. + lock_sock(sk);
  1640. +
  1641. + if (unlikely(!up->pending)) {
  1642. + release_sock(sk);
  1643. +
  1644. + LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
  1645. + return -EINVAL;
  1646. + }
  1647. +
  1648. + ret = ip_append_page(sk, page, offset, size, flags);
  1649. + if (ret == -EOPNOTSUPP) {
  1650. + release_sock(sk);
  1651. + return sock_no_sendpage(sk->sk_socket, page, offset,
  1652. + size, flags);
  1653. + }
  1654. + if (ret < 0) {
  1655. + udp_flush_pending_frames(sk);
  1656. + goto out;
  1657. + }
  1658. +
  1659. + up->len += size;
  1660. + if (!(up->corkflag || (flags&MSG_MORE)))
  1661. + ret = udp_push_pending_frames(sk);
  1662. + if (!ret)
  1663. + ret = size;
  1664. +out:
  1665. + release_sock(sk);
  1666. + return ret;
  1667. +}
  1668. +
  1669. +/*
  1670. + * IOCTL requests applicable to the UDP protocol
  1671. + */
  1672. +
  1673. +int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  1674. +{
  1675. + switch (cmd) {
  1676. + case SIOCOUTQ:
  1677. + {
  1678. + int amount = atomic_read(&sk->sk_wmem_alloc);
  1679. + return put_user(amount, (int __user *)arg);
  1680. + }
  1681. +
  1682. + case SIOCINQ:
  1683. + {
  1684. + struct sk_buff *skb;
  1685. + unsigned long amount;
  1686. +
  1687. + amount = 0;
  1688. + spin_lock_bh(&sk->sk_receive_queue.lock);
  1689. + skb = skb_peek(&sk->sk_receive_queue);
  1690. + if (skb != NULL) {
  1691. + /*
  1692. + * We will only return the amount
  1693. + * of this packet since that is all
  1694. + * that will be read.
  1695. + */
  1696. + amount = skb->len - sizeof(struct udphdr);
  1697. + }
  1698. + spin_unlock_bh(&sk->sk_receive_queue.lock);
  1699. + return put_user(amount, (int __user *)arg);
  1700. + }
  1701. +
  1702. + default:
  1703. + return -ENOIOCTLCMD;
  1704. + }
  1705. +
  1706. + return 0;
  1707. +}
  1708. +
  1709. +/*
  1710. + * This should be easy, if there is something there we
  1711. + * return it, otherwise we block.
  1712. + */
  1713. +
  1714. +int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  1715. + size_t len, int noblock, int flags, int *addr_len)
  1716. +{
  1717. + struct inet_sock *inet = inet_sk(sk);
  1718. + struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
  1719. + struct sk_buff *skb;
  1720. + unsigned int ulen, copied;
  1721. + int peeked;
  1722. + int err;
  1723. + int is_udplite = IS_UDPLITE(sk);
  1724. +
  1725. + /*
  1726. + * Check any passed addresses
  1727. + */
  1728. + if (addr_len)
  1729. + *addr_len=sizeof(*sin);
  1730. +
  1731. + if (flags & MSG_ERRQUEUE)
  1732. + return ip_recv_error(sk, msg, len);
  1733. +
  1734. +try_again:
  1735. + skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
  1736. + &peeked, &err);
  1737. + if (!skb)
  1738. + goto out;
  1739. +
  1740. + ulen = skb->len - sizeof(struct udphdr);
  1741. + copied = len;
  1742. + if (copied > ulen)
  1743. + copied = ulen;
  1744. + else if (copied < ulen)
  1745. + msg->msg_flags |= MSG_TRUNC;
  1746. +
  1747. + /*
  1748. + * If checksum is needed at all, try to do it while copying the
  1749. + * data. If the data is truncated, or if we only want a partial
  1750. + * coverage checksum (UDP-Lite), do it before the copy.
  1751. + */
  1752. +
  1753. + if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
  1754. + if (udp_lib_checksum_complete(skb))
  1755. + goto csum_copy_err;
  1756. + }
  1757. +
  1758. + if (skb_csum_unnecessary(skb))
  1759. + err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
  1760. + msg->msg_iov, copied );
  1761. + else {
  1762. + err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
  1763. +
  1764. + if (err == -EINVAL)
  1765. + goto csum_copy_err;
  1766. + }
  1767. +
  1768. + if (err)
  1769. + goto out_free;
  1770. +
  1771. + if (!peeked)
  1772. + UDP_INC_STATS_USER(sock_net(sk),
  1773. + UDP_MIB_INDATAGRAMS, is_udplite);
  1774. +
  1775. + sock_recv_timestamp(msg, sk, skb);
  1776. +
  1777. + /* Copy the address. */
  1778. + if (sin)
  1779. + {
  1780. + sin->sin_family = AF_INET;
  1781. + sin->sin_port = udp_hdr(skb)->source;
  1782. + sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
  1783. + memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
  1784. + }
  1785. + if (inet->cmsg_flags)
  1786. + ip_cmsg_recv(msg, skb);
  1787. +
  1788. + err = copied;
  1789. + if (flags & MSG_TRUNC)
  1790. + err = ulen;
  1791. +
  1792. +out_free:
  1793. + lock_sock(sk);
  1794. + skb_free_datagram(sk, skb);
  1795. + release_sock(sk);
  1796. +out:
  1797. + return err;
  1798. +
  1799. +csum_copy_err:
  1800. + lock_sock(sk);
  1801. + if (!skb_kill_datagram(sk, skb, flags))
  1802. + UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1803. + release_sock(sk);
  1804. +
  1805. + if (noblock)
  1806. + return -EAGAIN;
  1807. + goto try_again;
  1808. +}
  1809. +
  1810. +
  1811. +int udp_disconnect(struct sock *sk, int flags)
  1812. +{
  1813. + struct inet_sock *inet = inet_sk(sk);
  1814. + /*
  1815. + * 1003.1g - break association.
  1816. + */
  1817. +
  1818. + sk->sk_state = TCP_CLOSE;
  1819. + inet->daddr = 0;
  1820. + inet->dport = 0;
  1821. + sk->sk_bound_dev_if = 0;
  1822. + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
  1823. + inet_reset_saddr(sk);
  1824. +
  1825. + if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
  1826. + sk->sk_prot->unhash(sk);
  1827. + inet->sport = 0;
  1828. + }
  1829. + sk_dst_reset(sk);
  1830. + return 0;
  1831. +}
  1832. +
  1833. +void udp_lib_unhash(struct sock *sk)
  1834. +{
  1835. + if (sk_hashed(sk)) {
  1836. + struct udp_table *udptable = sk->sk_prot->h.udp_table;
  1837. + unsigned int hash = udp_hashfn(sock_net(sk), sk->sk_hash);
  1838. + struct udp_hslot *hslot = &udptable->hash[hash];
  1839. +
  1840. + spin_lock_bh(&hslot->lock);
  1841. + if (sk_nulls_del_node_init_rcu(sk)) {
  1842. + inet_sk(sk)->num = 0;
  1843. + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  1844. + }
  1845. + spin_unlock_bh(&hslot->lock);
  1846. + }
  1847. +}
  1848. +EXPORT_SYMBOL(udp_lib_unhash);
  1849. +
  1850. +static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  1851. +{
  1852. + int is_udplite = IS_UDPLITE(sk);
  1853. + int rc;
  1854. +
  1855. + if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
  1856. + /* Note that an ENOMEM error is charged twice */
  1857. + if (rc == -ENOMEM) {
  1858. + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
  1859. + is_udplite);
  1860. + atomic_inc(&sk->sk_drops);
  1861. + }
  1862. + goto drop;
  1863. + }
  1864. +
  1865. + return 0;
  1866. +
  1867. +drop:
  1868. + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1869. + kfree_skb(skb);
  1870. + return -1;
  1871. +}
  1872. +
  1873. +/* returns:
  1874. + * -1: error
  1875. + * 0: success
  1876. + * >0: "udp encap" protocol resubmission
  1877. + *
  1878. + * Note that in the success and error cases, the skb is assumed to
  1879. + * have either been requeued or freed.
  1880. + */
  1881. +int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
  1882. +{
  1883. + struct udp_sock *up = udp_sk(sk);
  1884. + int rc;
  1885. + int is_udplite = IS_UDPLITE(sk);
  1886. +
  1887. + /*
  1888. + * Charge it to the socket, dropping if the queue is full.
  1889. + */
  1890. + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
  1891. + goto drop;
  1892. + nf_reset(skb);
  1893. +
  1894. + if (up->encap_type) {
  1895. + /*
  1896. + * This is an encapsulation socket so pass the skb to
  1897. + * the socket's udp_encap_rcv() hook. Otherwise, just
  1898. + * fall through and pass this up the UDP socket.
  1899. + * up->encap_rcv() returns the following value:
  1900. + * =0 if skb was successfully passed to the encap
  1901. + * handler or was discarded by it.
  1902. + * >0 if skb should be passed on to UDP.
  1903. + * <0 if skb should be resubmitted as proto -N
  1904. + */
  1905. +
  1906. + /* if we're overly short, let UDP handle it */
  1907. + if (skb->len > sizeof(struct udphdr) &&
  1908. + up->encap_rcv != NULL) {
  1909. + int ret;
  1910. +
  1911. + ret = (*up->encap_rcv)(sk, skb);
  1912. + if (ret <= 0) {
  1913. + UDP_INC_STATS_BH(sock_net(sk),
  1914. + UDP_MIB_INDATAGRAMS,
  1915. + is_udplite);
  1916. + return -ret;
  1917. + }
  1918. + }
  1919. +
  1920. + /* FALLTHROUGH -- it's a UDP Packet */
  1921. + }
  1922. +
  1923. + /*
  1924. + * UDP-Lite specific tests, ignored on UDP sockets
  1925. + */
  1926. + if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
  1927. +
  1928. + /*
  1929. + * MIB statistics other than incrementing the error count are
  1930. + * disabled for the following two types of errors: these depend
  1931. + * on the application settings, not on the functioning of the
  1932. + * protocol stack as such.
  1933. + *
  1934. + * RFC 3828 here recommends (sec 3.3): "There should also be a
  1935. + * way ... to ... at least let the receiving application block
  1936. + * delivery of packets with coverage values less than a value
  1937. + * provided by the application."
  1938. + */
  1939. + if (up->pcrlen == 0) { /* full coverage was set */
  1940. + LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
  1941. + "%d while full coverage %d requested\n",
  1942. + UDP_SKB_CB(skb)->cscov, skb->len);
  1943. + goto drop;
  1944. + }
  1945. + /* The next case involves violating the min. coverage requested
  1946. + * by the receiver. This is subtle: if receiver wants x and x is
  1947. + * greater than the buffersize/MTU then receiver will complain
  1948. + * that it wants x while sender emits packets of smaller size y.
  1949. + * Therefore the above ...()->partial_cov statement is essential.
  1950. + */
  1951. + if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
  1952. + LIMIT_NETDEBUG(KERN_WARNING
  1953. + "UDPLITE: coverage %d too small, need min %d\n",
  1954. + UDP_SKB_CB(skb)->cscov, up->pcrlen);
  1955. + goto drop;
  1956. + }
  1957. + }
  1958. +
  1959. + if (sk->sk_filter) {
  1960. + if (udp_lib_checksum_complete(skb))
  1961. + goto drop;
  1962. + }
  1963. +
  1964. + rc = 0;
  1965. +
  1966. + bh_lock_sock(sk);
  1967. + if (!sock_owned_by_user(sk))
  1968. + rc = __udp_queue_rcv_skb(sk, skb);
  1969. + else
  1970. + sk_add_backlog(sk, skb);
  1971. + bh_unlock_sock(sk);
  1972. +
  1973. + return rc;
  1974. +
  1975. +drop:
  1976. + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1977. + kfree_skb(skb);
  1978. + return -1;
  1979. +}
  1980. +
  1981. +/*
  1982. + * Multicasts and broadcasts go to each listener.
  1983. + *
  1984. + * Note: called only from the BH handler context,
  1985. + * so we don't need to lock the hashes.
  1986. + */
  1987. +static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
  1988. + struct udphdr *uh,
  1989. + __be32 saddr, __be32 daddr,
  1990. + struct udp_table *udptable)
  1991. +{
  1992. + struct sock *sk;
  1993. + struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))];
  1994. + int dif;
  1995. +
  1996. + spin_lock(&hslot->lock);
  1997. + sk = sk_nulls_head(&hslot->head);
  1998. + dif = skb->dev->ifindex;
  1999. + sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
  2000. + if (sk) {
  2001. + struct sock *sknext = NULL;
  2002. +
  2003. + do {
  2004. + struct sk_buff *skb1 = skb;
  2005. +
  2006. + sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
  2007. + daddr, uh->source, saddr,
  2008. + dif);
  2009. + if (sknext)
  2010. + skb1 = skb_clone(skb, GFP_ATOMIC);
  2011. +
  2012. + if (skb1) {
  2013. + int ret = udp_queue_rcv_skb(sk, skb1);
  2014. + if (ret > 0)
  2015. + /* we should probably re-process instead
  2016. + * of dropping packets here. */
  2017. + kfree_skb(skb1);
  2018. + }
  2019. + sk = sknext;
  2020. + } while (sknext);
  2021. + } else
  2022. + consume_skb(skb);
  2023. + spin_unlock(&hslot->lock);
  2024. + return 0;
  2025. +}
  2026. +
  2027. +/* Initialize UDP checksum. If exited with zero value (success),
  2028. + * CHECKSUM_UNNECESSARY means, that no more checks are required.
  2029. + * Otherwise, csum completion requires chacksumming packet body,
  2030. + * including udp header and folding it to skb->csum.
  2031. + */
  2032. +static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
  2033. + int proto)
  2034. +{
  2035. + const struct iphdr *iph;
  2036. + int err;
  2037. +
  2038. + UDP_SKB_CB(skb)->partial_cov = 0;
  2039. + UDP_SKB_CB(skb)->cscov = skb->len;
  2040. +
  2041. + if (proto == IPPROTO_UDPLITE) {
  2042. + err = udplite_checksum_init(skb, uh);
  2043. + if (err)
  2044. + return err;
  2045. + }
  2046. +
  2047. + iph = ip_hdr(skb);
  2048. + if (uh->check == 0) {
  2049. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  2050. + } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
  2051. + if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
  2052. + proto, skb->csum))
  2053. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  2054. + }
  2055. + if (!skb_csum_unnecessary(skb))
  2056. + skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
  2057. + skb->len, proto, 0);
  2058. + /* Probably, we should checksum udp header (it should be in cache
  2059. + * in any case) and data in tiny packets (< rx copybreak).
  2060. + */
  2061. +
  2062. + return 0;
  2063. +}
  2064. +
  2065. +/*
  2066. + * All we need to do is get the socket, and then do a checksum.
  2067. + */
  2068. +
  2069. +int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
  2070. + int proto)
  2071. +{
  2072. + struct sock *sk;
  2073. + struct udphdr *uh;
  2074. + unsigned short ulen;
  2075. + struct rtable *rt = (struct rtable*)skb->dst;
  2076. + __be32 saddr, daddr;
  2077. + struct net *net = dev_net(skb->dev);
  2078. +
  2079. + /*
  2080. + * Validate the packet.
  2081. + */
  2082. + if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  2083. + goto drop; /* No space for header. */
  2084. +
  2085. + uh = udp_hdr(skb);
  2086. + ulen = ntohs(uh->len);
  2087. + if (ulen > skb->len)
  2088. + goto short_packet;
  2089. +
  2090. + if (proto == IPPROTO_UDP) {
  2091. + /* UDP validates ulen. */
  2092. + if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
  2093. + goto short_packet;
  2094. + uh = udp_hdr(skb);
  2095. + }
  2096. +
  2097. + if (udp4_csum_init(skb, uh, proto))
  2098. + goto csum_error;
  2099. +
  2100. + saddr = ip_hdr(skb)->saddr;
  2101. + daddr = ip_hdr(skb)->daddr;
  2102. +
  2103. + if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
  2104. + return __udp4_lib_mcast_deliver(net, skb, uh,
  2105. + saddr, daddr, udptable);
  2106. +
  2107. + sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
  2108. +
  2109. + if (sk != NULL) {
  2110. + int ret = udp_queue_rcv_skb(sk, skb);
  2111. + sock_put(sk);
  2112. +
  2113. + /* a return value > 0 means to resubmit the input, but
  2114. + * it wants the return to be -protocol, or 0
  2115. + */
  2116. + if (ret > 0)
  2117. + return -ret;
  2118. + return 0;
  2119. + }
  2120. +
  2121. + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
  2122. + goto drop;
  2123. + nf_reset(skb);
  2124. +
  2125. + /* No socket. Drop packet silently, if checksum is wrong */
  2126. + if (udp_lib_checksum_complete(skb))
  2127. + goto csum_error;
  2128. +
  2129. + UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
  2130. + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
  2131. +
  2132. + /*
  2133. + * Hmm. We got an UDP packet to a port to which we
  2134. + * don't wanna listen. Ignore it.
  2135. + */
  2136. + kfree_skb(skb);
  2137. + return 0;
  2138. +
  2139. +short_packet:
  2140. + LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
  2141. + proto == IPPROTO_UDPLITE ? "-Lite" : "",
  2142. + &saddr,
  2143. + ntohs(uh->source),
  2144. + ulen,
  2145. + skb->len,
  2146. + &daddr,
  2147. + ntohs(uh->dest));
  2148. + goto drop;
  2149. +
  2150. +csum_error:
  2151. + /*
  2152. + * RFC1122: OK. Discards the bad packet silently (as far as
  2153. + * the network is concerned, anyway) as per 4.1.3.4 (MUST).
  2154. + */
  2155. + LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
  2156. + proto == IPPROTO_UDPLITE ? "-Lite" : "",
  2157. + &saddr,
  2158. + ntohs(uh->source),
  2159. + &daddr,
  2160. + ntohs(uh->dest),
  2161. + ulen);
  2162. +drop:
  2163. + UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
  2164. + kfree_skb(skb);
  2165. + return 0;
  2166. +}
  2167. +
  2168. +int udp_rcv(struct sk_buff *skb)
  2169. +{
  2170. + return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
  2171. +}
  2172. +
  2173. +void udp_destroy_sock(struct sock *sk)
  2174. +{
  2175. + lock_sock(sk);
  2176. + udp_flush_pending_frames(sk);
  2177. + release_sock(sk);
  2178. +}
  2179. +
  2180. +/*
  2181. + * Socket option code for UDP
  2182. + */
  2183. +int udp_lib_setsockopt(struct sock *sk, int level, int optname,
  2184. + char __user *optval, int optlen,
  2185. + int (*push_pending_frames)(struct sock *))
  2186. +{
  2187. + struct udp_sock *up = udp_sk(sk);
  2188. + int val;
  2189. + int err = 0;
  2190. + int is_udplite = IS_UDPLITE(sk);
  2191. +
  2192. + if (optlen<sizeof(int))
  2193. + return -EINVAL;
  2194. +
  2195. + if (get_user(val, (int __user *)optval))
  2196. + return -EFAULT;
  2197. +
  2198. + switch (optname) {
  2199. + case UDP_CORK:
  2200. + if (val != 0) {
  2201. + up->corkflag = 1;
  2202. + } else {
  2203. + up->corkflag = 0;
  2204. + lock_sock(sk);
  2205. + (*push_pending_frames)(sk);
  2206. + release_sock(sk);
  2207. + }
  2208. + break;
  2209. +
  2210. + case UDP_ENCAP:
  2211. + switch (val) {
  2212. + case 0:
  2213. + case UDP_ENCAP_ESPINUDP:
  2214. + case UDP_ENCAP_ESPINUDP_NON_IKE:
  2215. + up->encap_rcv = xfrm4_udp_encap_rcv;
  2216. + /* FALLTHROUGH */
  2217. + case UDP_ENCAP_L2TPINUDP:
  2218. + up->encap_type = val;
  2219. + break;
  2220. + default:
  2221. + err = -ENOPROTOOPT;
  2222. + break;
  2223. + }
  2224. + break;
  2225. +
  2226. + /*
  2227. + * UDP-Lite's partial checksum coverage (RFC 3828).
  2228. + */
  2229. + /* The sender sets actual checksum coverage length via this option.
  2230. + * The case coverage > packet length is handled by send module. */
  2231. + case UDPLITE_SEND_CSCOV:
  2232. + if (!is_udplite) /* Disable the option on UDP sockets */
  2233. + return -ENOPROTOOPT;
  2234. + if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
  2235. + val = 8;
  2236. + else if (val > USHORT_MAX)
  2237. + val = USHORT_MAX;
  2238. + up->pcslen = val;
  2239. + up->pcflag |= UDPLITE_SEND_CC;
  2240. + break;
  2241. +
  2242. + /* The receiver specifies a minimum checksum coverage value. To make
  2243. + * sense, this should be set to at least 8 (as done below). If zero is
  2244. + * used, this again means full checksum coverage. */
  2245. + case UDPLITE_RECV_CSCOV:
  2246. + if (!is_udplite) /* Disable the option on UDP sockets */
  2247. + return -ENOPROTOOPT;
  2248. + if (val != 0 && val < 8) /* Avoid silly minimal values. */
  2249. + val = 8;
  2250. + else if (val > USHORT_MAX)
  2251. + val = USHORT_MAX;
  2252. + up->pcrlen = val;
  2253. + up->pcflag |= UDPLITE_RECV_CC;
  2254. + break;
  2255. +
  2256. + default:
  2257. + err = -ENOPROTOOPT;
  2258. + break;
  2259. + }
  2260. +
  2261. + return err;
  2262. +}
  2263. +
  2264. +int udp_setsockopt(struct sock *sk, int level, int optname,
  2265. + char __user *optval, int optlen)
  2266. +{
  2267. + if (level == SOL_UDP || level == SOL_UDPLITE)
  2268. + return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  2269. + udp_push_pending_frames);
  2270. + return ip_setsockopt(sk, level, optname, optval, optlen);
  2271. +}
  2272. +
  2273. +#ifdef CONFIG_COMPAT
  2274. +int compat_udp_setsockopt(struct sock *sk, int level, int optname,
  2275. + char __user *optval, int optlen)
  2276. +{
  2277. + if (level == SOL_UDP || level == SOL_UDPLITE)
  2278. + return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  2279. + udp_push_pending_frames);
  2280. + return compat_ip_setsockopt(sk, level, optname, optval, optlen);
  2281. +}
  2282. +#endif
  2283. +
  2284. +int udp_lib_getsockopt(struct sock *sk, int level, int optname,
  2285. + char __user *optval, int __user *optlen)
  2286. +{
  2287. + struct udp_sock *up = udp_sk(sk);
  2288. + int val, len;
  2289. +
  2290. + if (get_user(len,optlen))
  2291. + return -EFAULT;
  2292. +
  2293. + len = min_t(unsigned int, len, sizeof(int));
  2294. +
  2295. + if (len < 0)
  2296. + return -EINVAL;
  2297. +
  2298. + switch (optname) {
  2299. + case UDP_CORK:
  2300. + val = up->corkflag;
  2301. + break;
  2302. +
  2303. + case UDP_ENCAP:
  2304. + val = up->encap_type;
  2305. + break;
  2306. +
  2307. + /* The following two cannot be changed on UDP sockets, the return is
  2308. + * always 0 (which corresponds to the full checksum coverage of UDP). */
  2309. + case UDPLITE_SEND_CSCOV:
  2310. + val = up->pcslen;
  2311. + break;
  2312. +
  2313. + case UDPLITE_RECV_CSCOV:
  2314. + val = up->pcrlen;
  2315. + break;
  2316. +
  2317. + default:
  2318. + return -ENOPROTOOPT;
  2319. + }
  2320. +
  2321. + if (put_user(len, optlen))
  2322. + return -EFAULT;
  2323. + if (copy_to_user(optval, &val,len))
  2324. + return -EFAULT;
  2325. + return 0;
  2326. +}
  2327. +
  2328. +int udp_getsockopt(struct sock *sk, int level, int optname,
  2329. + char __user *optval, int __user *optlen)
  2330. +{
  2331. + if (level == SOL_UDP || level == SOL_UDPLITE)
  2332. + return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  2333. + return ip_getsockopt(sk, level, optname, optval, optlen);
  2334. +}
  2335. +
  2336. +#ifdef CONFIG_COMPAT
  2337. +int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  2338. + char __user *optval, int __user *optlen)
  2339. +{
  2340. + if (level == SOL_UDP || level == SOL_UDPLITE)
  2341. + return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  2342. + return compat_ip_getsockopt(sk, level, optname, optval, optlen);
  2343. +}
  2344. +#endif
  2345. +/**
  2346. + * udp_poll - wait for a UDP event.
  2347. + * @file - file struct
  2348. + * @sock - socket
  2349. + * @wait - poll table
  2350. + *
  2351. + * This is same as datagram poll, except for the special case of
  2352. + * blocking sockets. If application is using a blocking fd
  2353. + * and a packet with checksum error is in the queue;
  2354. + * then it could get return from select indicating data available
  2355. + * but then block when reading it. Add special case code
  2356. + * to work around these arguably broken applications.
  2357. + */
  2358. +unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
  2359. +{
  2360. + unsigned int mask = datagram_poll(file, sock, wait);
  2361. + struct sock *sk = sock->sk;
  2362. + int is_lite = IS_UDPLITE(sk);
  2363. +
  2364. + /* Check for false positives due to checksum errors */
  2365. + if ( (mask & POLLRDNORM) &&
  2366. + !(file->f_flags & O_NONBLOCK) &&
  2367. + !(sk->sk_shutdown & RCV_SHUTDOWN)){
  2368. + struct sk_buff_head *rcvq = &sk->sk_receive_queue;
  2369. + struct sk_buff *skb;
  2370. +
  2371. + spin_lock_bh(&rcvq->lock);
  2372. + while ((skb = skb_peek(rcvq)) != NULL &&
  2373. + udp_lib_checksum_complete(skb)) {
  2374. + UDP_INC_STATS_BH(sock_net(sk),
  2375. + UDP_MIB_INERRORS, is_lite);
  2376. + __skb_unlink(skb, rcvq);
  2377. + kfree_skb(skb);
  2378. + }
  2379. + spin_unlock_bh(&rcvq->lock);
  2380. +
  2381. + /* nothing to see, move along */
  2382. + if (skb == NULL)
  2383. + mask &= ~(POLLIN | POLLRDNORM);
  2384. + }
  2385. +
  2386. + return mask;
  2387. +
  2388. +}
  2389. +
  2390. +struct proto udp_prot = {
  2391. + .name = "UDP",
  2392. + .owner = THIS_MODULE,
  2393. + .close = udp_lib_close,
  2394. + .connect = ip4_datagram_connect,
  2395. + .disconnect = udp_disconnect,
  2396. + .ioctl = udp_ioctl,
  2397. + .destroy = udp_destroy_sock,
  2398. + .setsockopt = udp_setsockopt,
  2399. + .getsockopt = udp_getsockopt,
  2400. + .sendmsg = udp_sendmsg,
  2401. + .recvmsg = udp_recvmsg,
  2402. + .sendpage = udp_sendpage,
  2403. + .backlog_rcv = __udp_queue_rcv_skb,
  2404. + .hash = udp_lib_hash,
  2405. + .unhash = udp_lib_unhash,
  2406. + .get_port = udp_v4_get_port,
  2407. + .memory_allocated = &udp_memory_allocated,
  2408. + .sysctl_mem = sysctl_udp_mem,
  2409. + .sysctl_wmem = &sysctl_udp_wmem_min,
  2410. + .sysctl_rmem = &sysctl_udp_rmem_min,
  2411. + .obj_size = sizeof(struct udp_sock),
  2412. + .slab_flags = SLAB_DESTROY_BY_RCU,
  2413. + .h.udp_table = &udp_table,
  2414. +#ifdef CONFIG_COMPAT
  2415. + .compat_setsockopt = compat_udp_setsockopt,
  2416. + .compat_getsockopt = compat_udp_getsockopt,
  2417. +#endif
  2418. +};
  2419. +
  2420. +/* ------------------------------------------------------------------------ */
  2421. +#ifdef CONFIG_PROC_FS
  2422. +
  2423. +static struct sock *udp_get_first(struct seq_file *seq, int start)
  2424. +{
  2425. + struct sock *sk;
  2426. + struct udp_iter_state *state = seq->private;
  2427. + struct net *net = seq_file_net(seq);
  2428. +
  2429. + for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
  2430. + struct hlist_nulls_node *node;
  2431. + struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
  2432. + spin_lock_bh(&hslot->lock);
  2433. + sk_nulls_for_each(sk, node, &hslot->head) {
  2434. + if (!net_eq(sock_net(sk), net))
  2435. + continue;
  2436. + if (sk->sk_family == state->family)
  2437. + goto found;
  2438. + }
  2439. + spin_unlock_bh(&hslot->lock);
  2440. + }
  2441. + sk = NULL;
  2442. +found:
  2443. + return sk;
  2444. +}
  2445. +
  2446. +static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
  2447. +{
  2448. + struct udp_iter_state *state = seq->private;
  2449. + struct net *net = seq_file_net(seq);
  2450. +
  2451. + do {
  2452. + sk = sk_nulls_next(sk);
  2453. + } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
  2454. +
  2455. + if (!sk) {
  2456. + if (state->bucket < UDP_HTABLE_SIZE)
  2457. + spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
  2458. + return udp_get_first(seq, state->bucket + 1);
  2459. + }
  2460. + return sk;
  2461. +}
  2462. +
  2463. +static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
  2464. +{
  2465. + struct sock *sk = udp_get_first(seq, 0);
  2466. +
  2467. + if (sk)
  2468. + while (pos && (sk = udp_get_next(seq, sk)) != NULL)
  2469. + --pos;
  2470. + return pos ? NULL : sk;
  2471. +}
  2472. +
  2473. +static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
  2474. +{
  2475. + struct udp_iter_state *state = seq->private;
  2476. + state->bucket = UDP_HTABLE_SIZE;
  2477. +
  2478. + return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
  2479. +}
  2480. +
  2481. +static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2482. +{
  2483. + struct sock *sk;
  2484. +
  2485. + if (v == SEQ_START_TOKEN)
  2486. + sk = udp_get_idx(seq, 0);
  2487. + else
  2488. + sk = udp_get_next(seq, v);
  2489. +
  2490. + ++*pos;
  2491. + return sk;
  2492. +}
  2493. +
  2494. +static void udp_seq_stop(struct seq_file *seq, void *v)
  2495. +{
  2496. + struct udp_iter_state *state = seq->private;
  2497. +
  2498. + if (state->bucket < UDP_HTABLE_SIZE)
  2499. + spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
  2500. +}
  2501. +
  2502. +static int udp_seq_open(struct inode *inode, struct file *file)
  2503. +{
  2504. + struct udp_seq_afinfo *afinfo = PDE(inode)->data;
  2505. + struct udp_iter_state *s;
  2506. + int err;
  2507. +
  2508. + err = seq_open_net(inode, file, &afinfo->seq_ops,
  2509. + sizeof(struct udp_iter_state));
  2510. + if (err < 0)
  2511. + return err;
  2512. +
  2513. + s = ((struct seq_file *)file->private_data)->private;
  2514. + s->family = afinfo->family;
  2515. + s->udp_table = afinfo->udp_table;
  2516. + return err;
  2517. +}
  2518. +
  2519. +/* ------------------------------------------------------------------------ */
  2520. +int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
  2521. +{
  2522. + struct proc_dir_entry *p;
  2523. + int rc = 0;
  2524. +
  2525. + afinfo->seq_fops.open = udp_seq_open;
  2526. + afinfo->seq_fops.read = seq_read;
  2527. + afinfo->seq_fops.llseek = seq_lseek;
  2528. + afinfo->seq_fops.release = seq_release_net;
  2529. +
  2530. + afinfo->seq_ops.start = udp_seq_start;
  2531. + afinfo->seq_ops.next = udp_seq_next;
  2532. + afinfo->seq_ops.stop = udp_seq_stop;
  2533. +
  2534. + p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
  2535. + &afinfo->seq_fops, afinfo);
  2536. + if (!p)
  2537. + rc = -ENOMEM;
  2538. + return rc;
  2539. +}
  2540. +
  2541. +void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
  2542. +{
  2543. + proc_net_remove(net, afinfo->name);
  2544. +}
  2545. +
  2546. +/* ------------------------------------------------------------------------ */
  2547. +static void udp4_format_sock(struct sock *sp, struct seq_file *f,
  2548. + int bucket, int *len)
  2549. +{
  2550. + struct inet_sock *inet = inet_sk(sp);
  2551. + __be32 dest = inet->daddr;
  2552. + __be32 src = inet->rcv_saddr;
  2553. + __u16 destp = ntohs(inet->dport);
  2554. + __u16 srcp = ntohs(inet->sport);
  2555. +
  2556. + seq_printf(f, "%4d: %08X:%04X %08X:%04X"
  2557. + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
  2558. + bucket, src, srcp, dest, destp, sp->sk_state,
  2559. + atomic_read(&sp->sk_wmem_alloc),
  2560. + atomic_read(&sp->sk_rmem_alloc),
  2561. + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
  2562. + atomic_read(&sp->sk_refcnt), sp,
  2563. + atomic_read(&sp->sk_drops), len);
  2564. +}
  2565. +
  2566. +int udp4_seq_show(struct seq_file *seq, void *v)
  2567. +{
  2568. + if (v == SEQ_START_TOKEN)
  2569. + seq_printf(seq, "%-127s\n",
  2570. + " sl local_address rem_address st tx_queue "
  2571. + "rx_queue tr tm->when retrnsmt uid timeout "
  2572. + "inode ref pointer drops");
  2573. + else {
  2574. + struct udp_iter_state *state = seq->private;
  2575. + int len;
  2576. +
  2577. + udp4_format_sock(v, seq, state->bucket, &len);
  2578. + seq_printf(seq, "%*s\n", 127 - len ,"");
  2579. + }
  2580. + return 0;
  2581. +}
  2582. +
  2583. +/* ------------------------------------------------------------------------ */
  2584. +static struct udp_seq_afinfo udp4_seq_afinfo = {
  2585. + .name = "udp",
  2586. + .family = AF_INET,
  2587. + .udp_table = &udp_table,
  2588. + .seq_fops = {
  2589. + .owner = THIS_MODULE,
  2590. + },
  2591. + .seq_ops = {
  2592. + .show = udp4_seq_show,
  2593. + },
  2594. +};
  2595. +
  2596. +static int udp4_proc_init_net(struct net *net)
  2597. +{
  2598. + return udp_proc_register(net, &udp4_seq_afinfo);
  2599. +}
  2600. +
  2601. +static void udp4_proc_exit_net(struct net *net)
  2602. +{
  2603. + udp_proc_unregister(net, &udp4_seq_afinfo);
  2604. +}
  2605. +
  2606. +static struct pernet_operations udp4_net_ops = {
  2607. + .init = udp4_proc_init_net,
  2608. + .exit = udp4_proc_exit_net,
  2609. +};
  2610. +
  2611. +int __init udp4_proc_init(void)
  2612. +{
  2613. + return register_pernet_subsys(&udp4_net_ops);
  2614. +}
  2615. +
  2616. +void udp4_proc_exit(void)
  2617. +{
  2618. + unregister_pernet_subsys(&udp4_net_ops);
  2619. +}
  2620. +#endif /* CONFIG_PROC_FS */
  2621. +
  2622. +void __init udp_table_init(struct udp_table *table)
  2623. +{
  2624. + int i;
  2625. +
  2626. + for (i = 0; i < UDP_HTABLE_SIZE; i++) {
  2627. + INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
  2628. + spin_lock_init(&table->hash[i].lock);
  2629. + }
  2630. +}
  2631. +
  2632. +void __init udp_init(void)
  2633. +{
  2634. + unsigned long nr_pages, limit;
  2635. +
  2636. + udp_table_init(&udp_table);
  2637. + /* Set the pressure threshold up by the same strategy of TCP. It is a
  2638. + * fraction of global memory that is up to 1/2 at 256 MB, decreasing
  2639. + * toward zero with the amount of memory, with a floor of 128 pages.
  2640. + */
  2641. + nr_pages = totalram_pages - totalhigh_pages;
  2642. + limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
  2643. + limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
  2644. + limit = max(limit, 128UL);
  2645. + sysctl_udp_mem[0] = limit / 4 * 3;
  2646. + sysctl_udp_mem[1] = limit;
  2647. + sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
  2648. +
  2649. + sysctl_udp_rmem_min = SK_MEM_QUANTUM;
  2650. + sysctl_udp_wmem_min = SK_MEM_QUANTUM;
  2651. +}
  2652. +
  2653. +EXPORT_SYMBOL(udp_disconnect);
  2654. +EXPORT_SYMBOL(udp_ioctl);
  2655. +EXPORT_SYMBOL(udp_prot);
  2656. +EXPORT_SYMBOL(udp_sendmsg);
  2657. +EXPORT_SYMBOL(udp_lib_getsockopt);
  2658. +EXPORT_SYMBOL(udp_lib_setsockopt);
  2659. +EXPORT_SYMBOL(udp_poll);
  2660. +EXPORT_SYMBOL(udp_lib_get_port);
  2661. +
  2662. +#ifdef CONFIG_PROC_FS
  2663. +EXPORT_SYMBOL(udp_proc_register);
  2664. +EXPORT_SYMBOL(udp_proc_unregister);
  2665. +#endif