aboutsummaryrefslogtreecommitdiff
path: root/drivers/md/Kconfig
blob: faa4741df6d3f7fca87e79b8a5f788b5d1b87218 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
#
# Block device driver configuration
#

menuconfig MD
	bool "Multiple devices driver support (RAID and LVM)"
	depends on BLOCK
	help
	  Support multiple physical spindles through a single logical device.
	  Required for RAID and logical volume management.

if MD

config BLK_DEV_MD
	tristate "RAID support"
	---help---
	  This driver lets you combine several hard disk partitions into one
	  logical block device. This can be used to simply append one
	  partition to another one or to combine several redundant hard disks
	  into a RAID1/4/5 device so as to provide protection against hard
	  disk failures. This is called "Software RAID" since the combining of
	  the partitions is done by the kernel. "Hardware RAID" means that the
	  combining is done by a dedicated controller; if you have such a
	  controller, you do not need to say Y here.

	  More information about Software RAID on Linux is contained in the
	  Software RAID mini-HOWTO, available from
	  <http://www.tldp.org/docs.html#howto>. There you will also learn
	  where to get the supporting user space utilities raidtools.

	  If unsure, say N.

config MD_AUTODETECT
	bool "Autodetect RAID arrays during kernel boot"
	depends on BLK_DEV_MD=y
	default y
	---help---
	  If you say Y here, then the kernel will try to autodetect raid
	  arrays as part of its boot process. 

	  If you don't use raid and say Y, this autodetection can cause 
	  a several-second delay in the boot time due to various
	  synchronisation steps that are part of this step.

	  If unsure, say Y.

config MD_LINEAR
	tristate "Linear (append) mode"
	depends on BLK_DEV_MD
	---help---
	  If you say Y here, then your multiple devices driver will be able to
	  use the so-called linear mode, i.e. it will combine the hard disk
	  partitions by simply appending one to the other.

	  To compile this as a module, choose M here: the module
	  will be called linear.

	  If unsure, say Y.

config MD_RAID0
	tristate "RAID-0 (striping) mode"
	depends on BLK_DEV_MD
	---help---
	  If you say Y here, then your multiple devices driver will be able to
	  use the so-called raid0 mode, i.e. it will combine the hard disk
	  partitions into one logical device in such a fashion as to fill them
	  up evenly, one chunk here and one chunk there. This will increase
	  the throughput rate if the partitions reside on distinct disks.

	  Information about Software RAID on Linux is contained in the
	  Software-RAID mini-HOWTO, available from
	  <http://www.tldp.org/docs.html#howto>. There you will also
	  learn where to get the supporting user space utilities raidtools.

	  To compile this as a module, choose M here: the module
	  will be called raid0.

	  If unsure, say Y.

config MD_RAID1
	tristate "RAID-1 (mirroring) mode"
	depends on BLK_DEV_MD
	---help---
	  A RAID-1 set consists of several disk drives which are exact copies
	  of each other.  In the event of a mirror failure, the RAID driver
	  will continue to use the operational mirrors in the set, providing
	  an error free MD (multiple device) to the higher levels of the
	  kernel.  In a set with N drives, the available space is the capacity
	  of a single drive, and the set protects against a failure of (N - 1)
	  drives.

	  Information about Software RAID on Linux is contained in the
	  Software-RAID mini-HOWTO, available from
	  <http://www.tldp.org/docs.html#howto>.  There you will also
	  learn where to get the supporting user space utilities raidtools.

	  If you want to use such a RAID-1 set, say Y.  To compile this code
	  as a module, choose M here: the module will be called raid1.

	  If unsure, say Y.

config MD_RAID10
	tristate "RAID-10 (mirrored striping) mode"
	depends on BLK_DEV_MD
	---help---
	  RAID-10 provides a combination of striping (RAID-0) and
	  mirroring (RAID-1) with easier configuration and more flexible
	  layout.
	  Unlike RAID-0, but like RAID-1, RAID-10 requires all devices to
	  be the same size (or at least, only as much as the smallest device
	  will be used).
	  RAID-10 provides a variety of layouts that provide different levels
	  of redundancy and performance.

	  RAID-10 requires mdadm-1.7.0 or later, available at:

	  ftp://ftp.kernel.org/pub/linux/utils/raid/mdadm/

	  If unsure, say Y.

config MD_RAID456
	tristate "RAID-4/RAID-5/RAID-6 mode"
	depends on BLK_DEV_MD
	select RAID6_PQ
	select ASYNC_MEMCPY
	select ASYNC_XOR
	select ASYNC_PQ
	select ASYNC_RAID6_RECOV
	---help---
	  A RAID-5 set of N drives with a capacity of C MB per drive provides
	  the capacity of C * (N - 1) MB, and protects against a failure
	  of a single drive. For a given sector (row) number, (N - 1) drives
	  contain data sectors, and one drive contains the parity protection.
	  For a RAID-4 set, the parity blocks are present on a single drive,
	  while a RAID-5 set distributes the parity across the drives in one
	  of the available parity distribution methods.

	  A RAID-6 set of N drives with a capacity of C MB per drive
	  provides the capacity of C * (N - 2) MB, and protects
	  against a failure of any two drives. For a given sector
	  (row) number, (N - 2) drives contain data sectors, and two
	  drives contains two independent redundancy syndromes.  Like
	  RAID-5, RAID-6 distributes the syndromes across the drives
	  in one of the available parity distribution methods.

	  Information about Software RAID on Linux is contained in the
	  Software-RAID mini-HOWTO, available from
	  <http://www.tldp.org/docs.html#howto>. There you will also
	  learn where to get the supporting user space utilities raidtools.

	  If you want to use such a RAID-4/RAID-5/RAID-6 set, say Y.  To
	  compile this code as a module, choose M here: the module
	  will be called raid456.

	  If unsure, say Y.

config MULTICORE_RAID456
	bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
	depends on MD_RAID456
	depends on SMP
	depends on EXPERIMENTAL
	---help---
	  Enable the raid456 module to dispatch per-stripe raid operations to a
	  thread pool.

	  If unsure, say N.

config MD_MULTIPATH
	tristate "Multipath I/O support"
	depends on BLK_DEV_MD
	help
	  MD_MULTIPATH provides a simple multi-path personality for use
	  the MD framework.  It is not under active development.  New
	  projects should consider using DM_MULTIPATH which has more
	  features and more testing.

	  If unsure, say N.

config MD_FAULTY
	tristate "Faulty test module for MD"
	depends on BLK_DEV_MD
	help
	  The "faulty" module allows for a block device that occasionally returns
	  read or write errors.  It is useful for testing.

	  In unsure, say N.

config BLK_DEV_DM
	tristate "Device mapper support"
	---help---
	  Device-mapper is a low level volume manager.  It works by allowing
	  people to specify mappings for ranges of logical sectors.  Various
	  mapping types are available, in addition people may write their own
	  modules containing custom mappings if they wish.

	  Higher level volume managers such as LVM2 use this driver.

	  To compile this as a module, choose M here: the module will be
	  called dm-mod.

	  If unsure, say N.

config DM_DEBUG
	boolean "Device mapper debugging support"
	depends on BLK_DEV_DM
	---help---
	  Enable this for messages that may help debug device-mapper problems.

	  If unsure, say N.

config DM_BUFIO
       tristate
       depends on BLK_DEV_DM && EXPERIMENTAL
       ---help---
	 This interface allows you to do buffered I/O on a device and acts
	 as a cache, holding recently-read blocks in memory and performing
	 delayed writes.

source "drivers/md/persistent-data/Kconfig"

config DM_CRYPT
	tristate "Crypt target support"
	depends on BLK_DEV_DM
	select CRYPTO
	select CRYPTO_CBC
	---help---
	  This device-mapper target allows you to create a device that
	  transparently encrypts the data on it. You'll need to activate
	  the ciphers you're going to use in the cryptoapi configuration.

	  Information on how to use dm-crypt can be found on

	  <http://www.saout.de/misc/dm-crypt/>

	  To compile this code as a module, choose M here: the module will
	  be called dm-crypt.

	  If unsure, say N.

config DM_SNAPSHOT
       tristate "Snapshot target"
       depends on BLK_DEV_DM
       ---help---
         Allow volume managers to take writable snapshots of a device.

config DM_THIN_PROVISIONING
       tristate "Thin provisioning target (EXPERIMENTAL)"
       depends on BLK_DEV_DM && EXPERIMENTAL
       select DM_PERSISTENT_DATA
       ---help---
         Provides thin provisioning and snapshots that share a data store.

config DM_DEBUG_BLOCK_STACK_TRACING
	boolean "Keep stack trace of thin provisioning block lock holders"
	depends on STACKTRACE_SUPPORT && DM_THIN_PROVISIONING
	select STACKTRACE
	---help---
	  Enable this for messages that may help debug problems with the
	  block manager locking used by thin provisioning.

	  If unsure, say N.

config DM_DEBUG_SPACE_MAPS
	boolean "Extra validation for thin provisioning space maps"
	depends on DM_THIN_PROVISIONING
	---help---
	  Enable this for messages that may help debug problems with the
	  space maps used by thin provisioning.

          If unsure, say N.

config DM_MIRROR
       tristate "Mirror target"
       depends on BLK_DEV_DM
       ---help---
         Allow volume managers to mirror logical volumes, also
         needed for live data migration tools such as 'pvmove'.

config DM_RAID
       tristate "RAID 1/4/5/6 target (EXPERIMENTAL)"
       depends on BLK_DEV_DM && EXPERIMENTAL
       select MD_RAID1
       select MD_RAID456
       select BLK_DEV_MD
       ---help---
	 A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings

	 A RAID-5 set of N drives with a capacity of C MB per drive provides
	 the capacity of C * (N - 1) MB, and protects against a failure
	 of a single drive. For a given sector (row) number, (N - 1) drives
	 contain data sectors, and one drive contains the parity protection.
	 For a RAID-4 set, the parity blocks are present on a single drive,
	 while a RAID-5 set distributes the parity across the drives in one
	 of the available parity distribution methods.

	 A RAID-6 set of N drives with a capacity of C MB per drive
	 provides the capacity of C * (N - 2) MB, and protects
	 against a failure of any two drives. For a given sector
	 (row) number, (N - 2) drives contain data sectors, and two
	 drives contains two independent redundancy syndromes.  Like
	 RAID-5, RAID-6 distributes the syndromes across the drives
	 in one of the available parity distribution methods.

config DM_LOG_USERSPACE
	tristate "Mirror userspace logging (EXPERIMENTAL)"
	depends on DM_MIRROR && EXPERIMENTAL && NET
	select CONNECTOR
	---help---
	  The userspace logging module provides a mechanism for
	  relaying the dm-dirty-log API to userspace.  Log designs
	  which are more suited to userspace implementation (e.g.
	  shared storage logs) or experimental logs can be implemented
	  by leveraging this framework.

config DM_ZERO
	tristate "Zero target"
	depends on BLK_DEV_DM
	---help---
	  A target that discards writes, and returns all zeroes for
	  reads.  Useful in some recovery situations.

config DM_MULTIPATH
	tristate "Multipath target"
	depends on BLK_DEV_DM
	# nasty syntax but means make DM_MULTIPATH independent
	# of SCSI_DH if the latter isn't defined but if
	# it is, DM_MULTIPATH must depend on it.  We get a build
	# error if SCSI_DH=m and DM_MULTIPATH=y
	depends on SCSI_DH || !SCSI_DH
	---help---
	  Allow volume managers to support multipath hardware.

config DM_MULTIPATH_QL
	tristate "I/O Path Selector based on the number of in-flight I/Os"
	depends on DM_MULTIPATH
	---help---
	  This path selector is a dynamic load balancer which selects
	  the path with the least number of in-flight I/Os.

	  If unsure, say N.

config DM_MULTIPATH_ST
	tristate "I/O Path Selector based on the service time"
	depends on DM_MULTIPATH
	---help---
	  This path selector is a dynamic load balancer which selects
	  the path expected to complete the incoming I/O in the shortest
	  time.

	  If unsure, say N.

config DM_DELAY
	tristate "I/O delaying target (EXPERIMENTAL)"
	depends on BLK_DEV_DM && EXPERIMENTAL
	---help---
	A target that delays reads and/or writes and can send
	them to different devices.  Useful for testing.

	If unsure, say N.

config DM_UEVENT
	bool "DM uevents (EXPERIMENTAL)"
	depends on BLK_DEV_DM && EXPERIMENTAL
	---help---
	Generate udev events for DM events.

config DM_FLAKEY
       tristate "Flakey target (EXPERIMENTAL)"
       depends on BLK_DEV_DM && EXPERIMENTAL
       ---help---
         A target that intermittently fails I/O for debugging purposes.

endif # MD