summary refs log tree commit diff
path: root/src/hw/virtio/queue.rs
blob: 6e704f78df956cdc140e5299e963a322a6068646 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

use std::cmp::min;
use std::result;

use sys_util::{GuestAddress, GuestMemory};

const VIRTQ_DESC_F_NEXT: u16 = 0x1;
const VIRTQ_DESC_F_WRITE: u16 = 0x2;
#[allow(dead_code)]
const VIRTQ_DESC_F_INDIRECT: u16 = 0x4;

#[derive(Clone, Debug)]
pub enum Error {
    RequestOutOfBounds,
    SectorOutOfBounds,
}
pub type Result<T> = result::Result<T, Error>;

/// A virtio descriptor chain.
pub struct DescriptorChain<'a> {
    mem: &'a GuestMemory,
    desc_table: GuestAddress,
    queue_size: u16,
    ttl: u16, // used to prevent infinite chain cycles

    /// Index into the descriptor table
    pub index: u16,

    /// Guest physical address of device specific data
    pub addr: GuestAddress,

    /// Length of device specific data
    pub len: u32,

    /// Includes next, write, and indirect bits
    pub flags: u16,

    /// Index into the descriptor table of the next descriptor if flags has
    /// the next bit set
    pub next: u16,
}

impl<'a> DescriptorChain<'a> {
    fn checked_new(mem: &GuestMemory,
                   desc_table: GuestAddress,
                   queue_size: u16,
                   index: u16)
                   -> Option<DescriptorChain> {
        if index >= queue_size {
            return None;
        }

        let desc_head = match mem.checked_offset(desc_table, (index as usize) * 16) {
            Some(a) => a,
            None => return None,
        };
        // These reads can't fail unless Guest memory is hopelessly broken.
        let addr = GuestAddress(mem.read_obj_from_addr::<u64>(desc_head).unwrap() as usize);
        if mem.checked_offset(desc_head, 16).is_none() {
            return None;
        }
        let len: u32 = mem.read_obj_from_addr(desc_head.unchecked_add(8))
            .unwrap();
        let flags: u16 = mem.read_obj_from_addr(desc_head.unchecked_add(12))
            .unwrap();
        let next: u16 = mem.read_obj_from_addr(desc_head.unchecked_add(14))
            .unwrap();
        let chain = DescriptorChain {
            mem: mem,
            desc_table: desc_table,
            queue_size: queue_size,
            ttl: queue_size,
            index: index,
            addr: addr,
            len: len,
            flags: flags,
            next: next,
        };

        if chain.is_valid() { Some(chain) } else { None }
    }

    fn is_valid(&self) -> bool {
        if self.mem
               .checked_offset(self.addr, self.len as usize)
               .is_none() {
            false
        } else if self.has_next() && self.next >= self.queue_size {
            false
        } else {
            true
        }
    }

    /// Gets if this descriptor chain has another descriptor chain linked after it.
    pub fn has_next(&self) -> bool {
        self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
    }

    /// If the driver designated this as a write only descriptor.
    ///
    /// If this is false, this descriptor is read only.
    pub fn is_write_only(&self) -> bool {
        self.flags & VIRTQ_DESC_F_WRITE != 0
    }

    /// Gets the next descriptor in this descriptor chain, if there is one.
    ///
    /// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
    /// the head of the next _available_ descriptor chain.
    pub fn next_descriptor(&self) -> Option<DescriptorChain<'a>> {
        if self.has_next() {
            DescriptorChain::checked_new(self.mem, self.desc_table, self.queue_size, self.next)
                .map(|mut c| {
                         c.ttl = self.ttl - 1;
                         c
                     })
        } else {
            None
        }
    }
}

/// Consuming iterator over all available descriptor chain heads in the queue.
pub struct AvailIter<'a, 'b> {
    mem: &'a GuestMemory,
    desc_table: GuestAddress,
    avail_ring: GuestAddress,
    next_index: usize,
    last_index: usize,
    queue_size: usize,
    next_avail: &'b mut u16,
}

impl<'a, 'b> Iterator for AvailIter<'a, 'b> {
    type Item = DescriptorChain<'a>;

    fn next(&mut self) -> Option<Self::Item> {
        if self.next_index == self.last_index {
            return None;
        }

        let avail_addr = match self.mem
                  .checked_offset(self.avail_ring, 4 + self.next_index * 2) {
            Some(a) => a,
            None => return None,
        };
        // This index is checked below in checked_new
        let desc_index: u16 = self.mem.read_obj_from_addr(avail_addr).unwrap();

        self.next_index += 1;
        self.next_index %= self.queue_size;

        let ret = DescriptorChain::checked_new(self.mem,
                                               self.desc_table,
                                               self.queue_size as u16,
                                               desc_index);
        if ret.is_some() {
            *self.next_avail += 1;
            *self.next_avail %= self.queue_size as u16;
        }
        ret
    }
}

#[derive(Clone)]
/// A virtio queue's parameters.
pub struct Queue {
    /// The maximal size in elements offered by the device
    pub max_size: u16,

    /// The queue size in elements the driver selected
    pub size: u16,

    /// Inidcates if the queue is finished with configuration
    pub ready: bool,

    /// Guest physical address of the descriptor table
    pub desc_table: GuestAddress,

    /// Guest physical address of the available ring
    pub avail_ring: GuestAddress,

    /// Guest physical address of the used ring
    pub used_ring: GuestAddress,

    next_avail: u16,
    next_used: u16,
}

impl Queue {
    /// Constructs an empty virtio queue with the given `max_size`.
    pub fn new(max_size: u16) -> Queue {
        Queue {
            max_size: max_size,
            size: 0,
            ready: false,
            desc_table: GuestAddress(0),
            avail_ring: GuestAddress(0),
            used_ring: GuestAddress(0),
            next_avail: 0,
            next_used: 0,
        }
    }

    fn actual_size(&self) -> u16 {
        min(self.size, self.max_size)
    }

    fn is_valid(&self, mem: &GuestMemory) -> bool {
        let queue_size = self.actual_size() as usize;
        let desc_table = self.desc_table;
        let desc_table_size = 16 * queue_size;
        let avail_ring = self.avail_ring;
        let avail_ring_size = 6 + 2 * queue_size;
        let used_ring = self.used_ring;
        let used_ring_size = 6 + 8 * queue_size;
        if !self.ready {
            println!("error: attempt to use virtio queue that is not marked ready");
            false
        } else if self.size > self.max_size || self.size == 0 ||
                  (self.size & (self.size - 1)) != 0 {
            println!("error: virtio queue with invalid size: {}", self.size);
            false
        } else if desc_table
                      .checked_add(desc_table_size)
                      .map_or(true, |v| !mem.address_in_range(v)) {
            println!("error: virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
                     desc_table.offset(),
                     desc_table_size);
            false
        } else if avail_ring
                      .checked_add(avail_ring_size)
                      .map_or(true, |v| !mem.address_in_range(v)) {
            println!("error: virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
                     avail_ring.offset(),
                     avail_ring_size);
            false
        } else if used_ring
                      .checked_add(used_ring_size)
                      .map_or(true, |v| !mem.address_in_range(v)) {
            println!("error: virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
                     used_ring.offset(),
                     used_ring_size);
            false
        } else {
            true
        }

    }

    /// A consuming iterator over all available descriptor chain heads offered by the driver.
    pub fn iter<'a, 'b>(&'b mut self, mem: &'a GuestMemory) -> AvailIter<'a, 'b> {
        if !self.is_valid(mem) {
            return AvailIter {
                       mem: mem,
                       desc_table: GuestAddress(0),
                       avail_ring: GuestAddress(0),
                       next_index: 0,
                       last_index: 0,
                       queue_size: 0,
                       next_avail: &mut self.next_avail,
                   };
        }
        let queue_size = self.actual_size();
        let avail_ring = self.avail_ring;

        let index_addr = mem.checked_offset(avail_ring, 2).unwrap();
        // Note that last_index has no invalid values
        let last_index: u16 = mem.read_obj_from_addr::<u16>(index_addr).unwrap() % queue_size;
        AvailIter {
            mem: mem,
            desc_table: self.desc_table,
            avail_ring: avail_ring,
            next_index: self.next_avail as usize,
            last_index: last_index as usize,
            queue_size: queue_size as usize,
            next_avail: &mut self.next_avail,
        }
    }

    /// Puts an available descriptor head into the used ring for use by the guest.
    pub fn add_used(&mut self, mem: &GuestMemory, desc_index: u16, len: u32) {
        if desc_index >= self.actual_size() {
            println!("error: attempted to add out of bounds descriptor to used ring: {}",
                     desc_index);
            return;
        }

        let used_ring = self.used_ring;
        let next_used = (self.next_used % self.actual_size()) as usize;
        let used_elem = used_ring.unchecked_add(4 + next_used * 8);

        // These writes can't fail as we are guaranteed to be within the descriptor ring.
        mem.write_obj_at_addr(desc_index as u32, used_elem)
            .unwrap();
        mem.write_obj_at_addr(len as u32, used_elem.unchecked_add(4))
            .unwrap();

        self.next_used = self.next_used.wrapping_add(1);
        mem.write_obj_at_addr(self.next_used as u16, used_ring.unchecked_add(2))
            .unwrap();
    }
}