-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathsel4_virt_drv.h
226 lines (177 loc) · 4.94 KB
/
sel4_virt_drv.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright 2022, 2023, 2024, Technology Innovation Institute
*
*/
#ifndef __SEL4_VIRT_DRV_H
#define __SEL4_VIRT_DRV_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/refcount.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include "sel4/sel4_virt.h"
#include <sel4/rpc.h>
#define SEL4_MEM_IOVA 0
#define SEL4_MEM_LOGICAL 1
#define SEL4_MEM_VIRTUAL 2
/* vm_server is interface implements vmm create/destroy calls. The vm-server
* is responsible for resource management. It is up tho the driver to either
* implement the resource management mechanisms, or pass the calls to
* hypervisor. */
struct sel4_vm_server {
struct sel4_vmm *(*create_vm)(struct sel4_vm_params params);
int (*destroy_vm)(struct sel4_vmm *vmm);
void *private;
};
struct sel4_vmm;
struct sel4_mem_map {
int type;
phys_addr_t paddr;
void *addr;
resource_size_t size;
struct sel4_vmm *vmm;
};
struct sel4_vmm_ops {
/* irq handler */
irqreturn_t (*upcall_irqhandler)(int irq, struct sel4_vmm *);
};
/* Use this to indicate that the VMM uses some other upcall mechanism,
* thus skipping irq_request/free. The driver should call
* sel4_vm_upcall_notify directly. */
#define SEL4_IRQ_NONE 0
struct sel4_vmm {
int id;
int irq;
unsigned long irq_flags;
struct sel4_vmm_ops ops;
struct sel4_mem_map maps[NUM_SEL4_MEM_MAP];
struct sel4_vm *vm;
vso_rpc_t rpc;
vso_rpc_t user_rpc;
};
/* Indicates whether ioeventfd processed the ioreq */
#define SEL4_IOEVENTFD_PROCESSED (1)
#define SEL4_IOEVENTFD_NONE (0)
struct sel4_vm {
struct list_head vm_list;
spinlock_t lock;
refcount_t refcount;
wait_queue_head_t ioreq_wait;
struct list_head ioeventfds;
struct list_head irqfds;
struct sel4_vmm *vmm;
};
static inline void sel4_vmm_mem_map_set(struct sel4_vmm *vmm,
unsigned int index,
struct sel4_mem_map *map)
{
BUG_ON(index >= NUM_SEL4_MEM_MAP);
vmm->maps[index] = *map;
vmm->maps[index].vmm = vmm;
}
static inline __must_check unsigned long sel4_vm_lock(struct sel4_vm *vm)
{
unsigned long irqflags;
spin_lock_irqsave(&vm->lock, irqflags);
return irqflags;
}
static inline void sel4_vm_unlock(struct sel4_vm *vm, unsigned long irqflags)
{
spin_unlock_irqrestore(&vm->lock, irqflags);
}
static inline int sel4_start_vm(struct sel4_vm *vm)
{
if (WARN_ON(!vm))
return -EINVAL;
if (WARN_ON(!vm->vmm)) {
return -ENODEV;
}
return device_rpc_req_start_vm(&vm->vmm->rpc);
}
static inline int sel4_vm_create_vpci(struct sel4_vm *vm,
struct sel4_vpci_device *vpci)
{
if (WARN_ON(!vm || !vpci))
return -EINVAL;
if (WARN_ON(!vm->vmm)) {
return -ENODEV;
}
return device_rpc_req_create_vpci_device(&vm->vmm->rpc, vpci->pcidev);
}
static inline int sel4_vm_destroy_vpci(struct sel4_vm *vm,
struct sel4_vpci_device *vpci)
{
if (WARN_ON(!vm || !vpci))
return -EINVAL;
if (WARN_ON(!vm->vmm)) {
return -ENODEV;
}
/* Not implemented */
return -ENOSYS;
}
static inline int sel4_vm_set_irqline(struct sel4_vm *vm, u32 irq, u32 op)
{
int rc;
if (WARN_ON(!vm))
return -EINVAL;
if (WARN_ON(!vm->vmm)) {
return -ENODEV;
}
switch (op) {
case SEL4_IRQ_OP_SET:
rc = device_rpc_req_set_irqline(&vm->vmm->rpc, irq);
break;
case SEL4_IRQ_OP_CLR:
rc = device_rpc_req_clear_irqline(&vm->vmm->rpc, irq);
break;
case SEL4_IRQ_OP_PULSE:
rc = device_rpc_req_pulse_irqline(&vm->vmm->rpc, irq);
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
static inline irqreturn_t sel4_vm_call_irqhandler(struct sel4_vm *vm, int irq)
{
if (WARN_ON(!vm))
return IRQ_NONE;
if (WARN_ON(!vm->vmm || !vm->vmm->ops.upcall_irqhandler)) {
return IRQ_NONE;
}
return vm->vmm->ops.upcall_irqhandler(irq, vm->vmm);
}
static inline int sel4_vm_mmio_region_config(struct sel4_vm *vm,
struct sel4_mmio_region_config *config)
{
if (WARN_ON(!vm) || WARN_ON(!config))
return -EINVAL;
if (WARN_ON(!vm->vmm)) {
return -ENODEV;
}
return device_rpc_req_mmio_region_config(&vm->vmm->rpc, config->gpa,
config->len, config->flags);
}
void sel4_vm_upcall_notify(struct sel4_vm *vm);
int sel4_irqfd_init(void);
void sel4_irqfd_exit(void);
int sel4_vm_irqfd_config(struct sel4_vm *vm,
struct sel4_irqfd_config *config);
int sel4_vm_ioeventfd_config(struct sel4_vm *vm,
struct sel4_ioeventfd_config *config);
int rpc_process_mmio(struct sel4_vm *vm, rpcmsg_t *req);
int sel4_init(struct sel4_vm_server *vm_server, struct module *module);
void sel4_exit(void);
/* Called when vmm is killed/about to be killed and still used by VM. */
int sel4_notify_vmm_dying(int id);
/* For driver modules with custom ioctls */
long sel4_module_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg);
int sel4_vm_mmap(struct file *filp, struct vm_area_struct *vma);
struct sel4_vmm *sel4_vmm_alloc(struct sel4_vmm_ops ops);
bool sel4_vmm_valid(struct sel4_vmm *vmm);
#endif /* __SEL4_VIRT_DRV_H */