-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathsel4_irqfd.c
206 lines (163 loc) · 4.35 KB
/
sel4_irqfd.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2023, Technology Innovation Institute
*
*/
#include <linux/mutex.h>
#include <linux/file.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/poll.h>
#include <linux/eventfd.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "sel4_virt_drv.h"
struct sel4_irqfd {
struct sel4_vm *vm;
struct list_head list;
struct eventfd_ctx *eventfd;
wait_queue_entry_t wait;
struct work_struct cleanup;
poll_table pt;
u32 virq;
};
static struct workqueue_struct *irqfd_cleanup_wq;
static void sel4_irqfd_inject(struct sel4_irqfd *irqfd)
{
u64 cnt;
eventfd_ctx_do_read(irqfd->eventfd, &cnt);
/* Pulse irq */
sel4_vm_set_irqline(irqfd->vm, irqfd->virq, SEL4_IRQ_OP_PULSE);
}
/* Called with wqh->lock held and interrupts disabled */
static int sel4_irqfd_wakeywakey(wait_queue_entry_t *wait,
unsigned int mode,
int sync, void *key)
{
struct sel4_irqfd *irqfd;
unsigned long poll_bits = (unsigned long)key;
irqfd = container_of(wait, struct sel4_irqfd, wait);
if (poll_bits & POLLIN)
/* An event has been signaled, inject an interrupt */
sel4_irqfd_inject(irqfd);
if (poll_bits & POLLHUP)
/* Do shutdown work in thread to hold wqh->lock */
queue_work(irqfd_cleanup_wq , &irqfd->cleanup);
return 0;
}
static void sel4_irqfd_poll(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
{
struct sel4_irqfd *irqfd = container_of(pt, struct sel4_irqfd, pt);
add_wait_queue_priority(wqh, &irqfd->wait);
}
static void sel4_irqfd_cleanup(struct sel4_irqfd *irqfd)
{
u64 cnt;
lockdep_assert_held(&irqfd->vm->lock);
/* remove from wait queue */
list_del_init(&irqfd->list);
eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
eventfd_ctx_put(irqfd->eventfd);
kfree(irqfd);
}
static void sel4_irqfd_cleanup_work(struct work_struct *work)
{
struct sel4_irqfd *irqfd;
unsigned long irqflags;
irqfd = container_of(work, struct sel4_irqfd, cleanup);
irqflags = sel4_vm_lock(irqfd->vm);
if (!list_empty(&irqfd->list))
sel4_irqfd_cleanup(irqfd);
sel4_vm_unlock(irqfd->vm, irqflags);
}
static int sel4_irqfd_assign(struct sel4_vm *vm,
struct sel4_irqfd_config *config)
{
struct sel4_irqfd *irqfd, *tmp;
struct fd fd;
__poll_t events;
unsigned long irqflags;
int rc = 0;
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
if (!irqfd)
return -ENOMEM;
irqfd->vm = vm;
irqfd->virq = config->virq;
INIT_LIST_HEAD(&irqfd->list);
INIT_WORK(&irqfd->cleanup, sel4_irqfd_cleanup_work);
fd = fdget(config->fd);
if (!fd.file) {
rc = -EBADF;
goto err_free;
}
irqfd->eventfd = eventfd_ctx_fileget(fd.file);
if (IS_ERR(irqfd->eventfd )) {
rc = PTR_ERR(irqfd->eventfd);
goto err_put;
}
init_waitqueue_func_entry(&irqfd->wait, sel4_irqfd_wakeywakey);
init_poll_funcptr(&irqfd->pt, sel4_irqfd_poll);
irqflags = sel4_vm_lock(vm);
list_for_each_entry(tmp, &vm->irqfds, list) {
if (irqfd->eventfd != tmp->eventfd)
continue;
rc = -EBUSY;
sel4_vm_unlock(vm, irqflags);
goto err_put;
}
list_add_tail(&irqfd->list, &vm->irqfds);
sel4_vm_unlock(vm, irqflags);
/* Check the pending event in this stage */
events = vfs_poll(fd.file, &irqfd->pt);
if (events & EPOLLIN)
sel4_irqfd_inject(irqfd);
fdput(fd);
return rc;
err_put:
if (irqfd->eventfd && !IS_ERR(irqfd->eventfd))
eventfd_ctx_put(irqfd->eventfd);
fdput(fd);
err_free:
kfree(irqfd);
return rc;
}
static int sel4_irqfd_deassign(struct sel4_vm *vm,
struct sel4_irqfd_config *config)
{
struct sel4_irqfd *irqfd, *tmp;
struct eventfd_ctx *eventfd;
unsigned long irqflags;
eventfd = eventfd_ctx_fdget(config->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
irqflags = sel4_vm_lock(vm);
list_for_each_entry_safe(irqfd, tmp, &vm->irqfds, list) {
if (irqfd->eventfd == eventfd) {
sel4_irqfd_cleanup(irqfd);
break;
}
}
sel4_vm_unlock(vm, irqflags);
eventfd_ctx_put(eventfd);
return 0;
}
int sel4_vm_irqfd_config(struct sel4_vm *vm,
struct sel4_irqfd_config *config)
{
if (WARN_ON(!vm || !config))
return -EINVAL;
if (config->flags & SEL4_IRQFD_FLAG_DEASSIGN)
return sel4_irqfd_deassign(vm, config);
return sel4_irqfd_assign(vm, config);
}
int sel4_irqfd_init(void)
{
irqfd_cleanup_wq = alloc_workqueue("sel4-irqfd-cleanup", 0, 0);
if (!irqfd_cleanup_wq)
return -ENOMEM;
return 0;
}
void sel4_irqfd_exit(void)
{
destroy_workqueue(irqfd_cleanup_wq);
}