aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/cio/device.h
blob: 6bace694239614dfb2d948ba6741c25de052ff73 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
#ifndef S390_DEVICE_H
#define S390_DEVICE_H

#include <asm/ccwdev.h>
#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/notifier.h>
#include <linux/kernel_stat.h>
#include "io_sch.h"

/*
 * states of the device statemachine
 */
enum dev_state {
	DEV_STATE_NOT_OPER,
	DEV_STATE_SENSE_PGID,
	DEV_STATE_SENSE_ID,
	DEV_STATE_OFFLINE,
	DEV_STATE_VERIFY,
	DEV_STATE_ONLINE,
	DEV_STATE_W4SENSE,
	DEV_STATE_DISBAND_PGID,
	DEV_STATE_BOXED,
	/* states to wait for i/o completion before doing something */
	DEV_STATE_TIMEOUT_KILL,
	DEV_STATE_QUIESCE,
	/* special states for devices gone not operational */
	DEV_STATE_DISCONNECTED,
	DEV_STATE_DISCONNECTED_SENSE_ID,
	DEV_STATE_CMFCHANGE,
	DEV_STATE_CMFUPDATE,
	DEV_STATE_STEAL_LOCK,
	/* last element! */
	NR_DEV_STATES
};

/*
 * asynchronous events of the device statemachine
 */
enum dev_event {
	DEV_EVENT_NOTOPER,
	DEV_EVENT_INTERRUPT,
	DEV_EVENT_TIMEOUT,
	DEV_EVENT_VERIFY,
	/* last element! */
	NR_DEV_EVENTS
};

struct ccw_device;

/*
 * action called through jumptable
 */
typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];

static inline void
dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
{
	int state = cdev->private->state;

	if (dev_event == DEV_EVENT_INTERRUPT) {
		if (state == DEV_STATE_ONLINE)
			kstat_cpu(smp_processor_id()).
				irqs[cdev->private->int_class]++;
		else if (state != DEV_STATE_CMFCHANGE &&
			 state != DEV_STATE_CMFUPDATE)
			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
	}
	dev_jumptable[state][dev_event](cdev, dev_event);
}

/*
 * Delivers 1 if the device state is final.
 */
static inline int
dev_fsm_final_state(struct ccw_device *cdev)
{
	return (cdev->private->state == DEV_STATE_NOT_OPER ||
		cdev->private->state == DEV_STATE_OFFLINE ||
		cdev->private->state == DEV_STATE_ONLINE ||
		cdev->private->state == DEV_STATE_BOXED);
}

extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count;
int __init io_subchannel_init(void);

void io_subchannel_recog_done(struct ccw_device *cdev);
void io_subchannel_init_config(struct subchannel *sch);

int ccw_device_cancel_halt_clear(struct ccw_device *);

int ccw_device_is_orphan(struct ccw_device *);

void ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
void ccw_device_update_sense_data(struct ccw_device *);
int ccw_device_test_sense_data(struct ccw_device *);
void ccw_device_schedule_sch_unregister(struct ccw_device *);
int ccw_purge_blacklisted(void);
void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);

/* Function prototypes for device status and basic sense stuff. */
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
int ccw_device_do_sense(struct ccw_device *, struct irb *);

/* Function prototype for internal request handling. */
int lpm_adjust(int lpm, int mask);
void ccw_request_start(struct ccw_device *);
int ccw_request_cancel(struct ccw_device *cdev);
void ccw_request_handler(struct ccw_device *cdev);
void ccw_request_timeout(struct ccw_device *cdev);
void ccw_request_notoper(struct ccw_device *cdev);

/* Function prototypes for sense id stuff. */
void ccw_device_sense_id_start(struct ccw_device *);
void ccw_device_sense_id_done(struct ccw_device *, int);

/* Function prototypes for path grouping stuff. */
void ccw_device_verify_start(struct ccw_device *);
void ccw_device_verify_done(struct ccw_device *, int);

void ccw_device_disband_start(struct ccw_device *);
void ccw_device_disband_done(struct ccw_device *, int);

void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
void ccw_device_stlck_done(struct ccw_device *, void *, int);

int ccw_device_call_handler(struct ccw_device *);

int ccw_device_stlck(struct ccw_device *);

/* Helper function for machine check handling. */
void ccw_device_trigger_reprobe(struct ccw_device *);
void ccw_device_kill_io(struct ccw_device *);
int ccw_device_notify(struct ccw_device *, int);
void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);

/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);

/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
void cmf_retry_copy_block(struct ccw_device *);
int cmf_reenable(struct ccw_device *);
int ccw_set_cmf(struct ccw_device *cdev, int enable);
extern struct device_attribute dev_attr_cmb_enable;
#endif