summary refs log tree commit diff
path: root/devices/src/virtio
diff options
context:
space:
mode:
authorDavid Tolnay <dtolnay@chromium.org>2018-12-01 17:49:30 -0800
committerchrome-bot <chrome-bot@chromium.org>2018-12-03 20:32:03 -0800
commit5bbbf610828e975fd308b90543359a85ef59b67f (patch)
tree4cd736628475d702b7ac45feb2e359c3fb74d220 /devices/src/virtio
parent21fb34fb937678d85e9bfa4c721ab4a29196c764 (diff)
downloadcrosvm-5bbbf610828e975fd308b90543359a85ef59b67f.tar
crosvm-5bbbf610828e975fd308b90543359a85ef59b67f.tar.gz
crosvm-5bbbf610828e975fd308b90543359a85ef59b67f.tar.bz2
crosvm-5bbbf610828e975fd308b90543359a85ef59b67f.tar.lz
crosvm-5bbbf610828e975fd308b90543359a85ef59b67f.tar.xz
crosvm-5bbbf610828e975fd308b90543359a85ef59b67f.tar.zst
crosvm-5bbbf610828e975fd308b90543359a85ef59b67f.zip
lint: Resolve the easier clippy lints
Hopefully the changes are self-explanatory and uncontroversial. This
eliminates much of the noise from `cargo clippy` and, for my purposes,
gives me a reasonable way to use it as a tool when writing and reviewing
code.

Here is the Clippy invocation I was using:

    cargo +nightly clippy -- -W clippy::correctness -A renamed_and_removed_lints -Aclippy::{blacklisted_name,borrowed_box,cast_lossless,cast_ptr_alignment,enum_variant_names,identity_op,if_same_then_else,mut_from_ref,needless_pass_by_value,new_without_default,new_without_default_derive,or_fun_call,ptr_arg,should_implement_trait,single_match,too_many_arguments,trivially_copy_pass_by_ref,unreadable_literal,unsafe_vector_initialization,useless_transmute}

TEST=cargo check --features wl-dmabuf,gpu,usb-emulation
TEST=boot linux

Change-Id: I55eb1b4a72beb2f762480e3333a921909314a0a2
Reviewed-on: https://chromium-review.googlesource.com/1356911
Commit-Ready: David Tolnay <dtolnay@chromium.org>
Tested-by: David Tolnay <dtolnay@chromium.org>
Reviewed-by: Dylan Reid <dgreid@chromium.org>
Diffstat (limited to 'devices/src/virtio')
-rw-r--r--devices/src/virtio/balloon.rs48
-rw-r--r--devices/src/virtio/block.rs18
-rw-r--r--devices/src/virtio/gpu/backend.rs2
-rw-r--r--devices/src/virtio/gpu/mod.rs4
-rw-r--r--devices/src/virtio/gpu/protocol.rs36
-rw-r--r--devices/src/virtio/net.rs37
-rw-r--r--devices/src/virtio/p9.rs26
-rw-r--r--devices/src/virtio/rng.rs2
-rw-r--r--devices/src/virtio/vhost/net.rs2
-rw-r--r--devices/src/virtio/vhost/vsock.rs2
-rw-r--r--devices/src/virtio/wl.rs46
11 files changed, 105 insertions, 118 deletions
diff --git a/devices/src/virtio/balloon.rs b/devices/src/virtio/balloon.rs
index 168e0c4..c7614c4 100644
--- a/devices/src/virtio/balloon.rs
+++ b/devices/src/virtio/balloon.rs
@@ -32,7 +32,7 @@ pub type Result<T> = std::result::Result<T, BalloonError>;
 // Balloon has three virt IO queues: Inflate, Deflate, and Stats.
 // Stats is currently not used.
 const QUEUE_SIZE: u16 = 128;
-const QUEUE_SIZES: &'static [u16] = &[QUEUE_SIZE, QUEUE_SIZE];
+const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
 
 const VIRTIO_BALLOON_PFN_SHIFT: u32 = 12;
 
@@ -73,29 +73,27 @@ impl Worker {
         let mut used_desc_heads = [0; QUEUE_SIZE as usize];
         let mut used_count = 0;
         for avail_desc in queue.iter(&self.mem) {
-            if inflate {
-                if valid_inflate_desc(&avail_desc) {
-                    let num_addrs = avail_desc.len / 4;
-                    'addr_loop: for i in 0..num_addrs as usize {
-                        let addr = match avail_desc.addr.checked_add((i * 4) as u64) {
-                            Some(a) => a,
-                            None => break,
-                        };
-                        let guest_input: u32 = match self.mem.read_obj_from_addr(addr) {
-                            Ok(a) => a,
-                            Err(_) => continue,
-                        };
-                        let guest_address =
-                            GuestAddress((guest_input as u64) << VIRTIO_BALLOON_PFN_SHIFT);
-
-                        if self
-                            .mem
-                            .remove_range(guest_address, 1 << VIRTIO_BALLOON_PFN_SHIFT)
-                            .is_err()
-                        {
-                            warn!("Marking pages unused failed {:?}", guest_address);
-                            continue;
-                        }
+            if inflate && valid_inflate_desc(&avail_desc) {
+                let num_addrs = avail_desc.len / 4;
+                for i in 0..num_addrs as usize {
+                    let addr = match avail_desc.addr.checked_add((i * 4) as u64) {
+                        Some(a) => a,
+                        None => break,
+                    };
+                    let guest_input: u32 = match self.mem.read_obj_from_addr(addr) {
+                        Ok(a) => a,
+                        Err(_) => continue,
+                    };
+                    let guest_address =
+                        GuestAddress((guest_input as u64) << VIRTIO_BALLOON_PFN_SHIFT);
+
+                    if self
+                        .mem
+                        .remove_range(guest_address, 1 << VIRTIO_BALLOON_PFN_SHIFT)
+                        .is_err()
+                    {
+                        warn!("Marking pages unused failed {:?}", guest_address);
+                        continue;
                     }
                 }
             }
@@ -302,7 +300,7 @@ impl VirtioDevice for Balloon {
     }
 
     fn ack_features(&mut self, value: u64) {
-        self.features = self.features & value;
+        self.features &= value;
     }
 
     fn activate(
diff --git a/devices/src/virtio/block.rs b/devices/src/virtio/block.rs
index 7d5fb02..f817e89 100644
--- a/devices/src/virtio/block.rs
+++ b/devices/src/virtio/block.rs
@@ -28,7 +28,7 @@ use super::{
 };
 
 const QUEUE_SIZE: u16 = 256;
-const QUEUE_SIZES: &'static [u16] = &[QUEUE_SIZE];
+const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
 const SECTOR_SHIFT: u8 = 9;
 const SECTOR_SIZE: u64 = 0x01 << SECTOR_SHIFT;
 const MAX_DISCARD_SECTORS: u32 = u32::MAX;
@@ -212,14 +212,14 @@ enum ExecuteError {
 impl ExecuteError {
     fn status(&self) -> u8 {
         match self {
-            &ExecuteError::Flush(_) => VIRTIO_BLK_S_IOERR,
-            &ExecuteError::Read { .. } => VIRTIO_BLK_S_IOERR,
-            &ExecuteError::Seek { .. } => VIRTIO_BLK_S_IOERR,
-            &ExecuteError::TimerFd(_) => VIRTIO_BLK_S_IOERR,
-            &ExecuteError::Write { .. } => VIRTIO_BLK_S_IOERR,
-            &ExecuteError::DiscardWriteZeroes { .. } => VIRTIO_BLK_S_IOERR,
-            &ExecuteError::ReadOnly { .. } => VIRTIO_BLK_S_IOERR,
-            &ExecuteError::Unsupported(_) => VIRTIO_BLK_S_UNSUPP,
+            ExecuteError::Flush(_) => VIRTIO_BLK_S_IOERR,
+            ExecuteError::Read { .. } => VIRTIO_BLK_S_IOERR,
+            ExecuteError::Seek { .. } => VIRTIO_BLK_S_IOERR,
+            ExecuteError::TimerFd(_) => VIRTIO_BLK_S_IOERR,
+            ExecuteError::Write { .. } => VIRTIO_BLK_S_IOERR,
+            ExecuteError::DiscardWriteZeroes { .. } => VIRTIO_BLK_S_IOERR,
+            ExecuteError::ReadOnly { .. } => VIRTIO_BLK_S_IOERR,
+            ExecuteError::Unsupported(_) => VIRTIO_BLK_S_UNSUPP,
         }
     }
 }
diff --git a/devices/src/virtio/gpu/backend.rs b/devices/src/virtio/gpu/backend.rs
index 00a327f..634cc55 100644
--- a/devices/src/virtio/gpu/backend.rs
+++ b/devices/src/virtio/gpu/backend.rs
@@ -494,7 +494,7 @@ impl Backend {
             }
         }
 
-        return response;
+        response
     }
 
     /// Copes the given rectangle of pixels of the given resource's backing memory to the host side
diff --git a/devices/src/virtio/gpu/mod.rs b/devices/src/virtio/gpu/mod.rs
index 5c1806b..f13c06d 100644
--- a/devices/src/virtio/gpu/mod.rs
+++ b/devices/src/virtio/gpu/mod.rs
@@ -38,7 +38,7 @@ use self::protocol::*;
 
 // First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect
 // there to be fewer of.
-const QUEUE_SIZES: &'static [u16] = &[256, 16];
+const QUEUE_SIZES: &[u16] = &[256, 16];
 const FENCE_POLL_MS: u64 = 1;
 
 struct QueueDescriptor {
@@ -503,7 +503,7 @@ impl Worker {
 
         'poll: loop {
             // If there are outstanding fences, wake up early to poll them.
-            let duration = if self.state.fence_descriptors.len() != 0 {
+            let duration = if !self.state.fence_descriptors.is_empty() {
                 Duration::from_millis(FENCE_POLL_MS)
             } else {
                 Duration::new(i64::MAX as u64, 0)
diff --git a/devices/src/virtio/gpu/protocol.rs b/devices/src/virtio/gpu/protocol.rs
index 2001cd7..f3e53a8 100644
--- a/devices/src/virtio/gpu/protocol.rs
+++ b/devices/src/virtio/gpu/protocol.rs
@@ -613,8 +613,8 @@ impl GpuResponse {
             ctx_id: Le32::from(ctx_id),
             padding: Le32::from(0),
         };
-        let len = match self {
-            &GpuResponse::OkDisplayInfo(ref info) => {
+        let len = match *self {
+            GpuResponse::OkDisplayInfo(ref info) => {
                 if info.len() > VIRTIO_GPU_MAX_SCANOUTS {
                     return Err(GpuResponseEncodeError::TooManyDisplays(info.len()));
                 }
@@ -630,7 +630,7 @@ impl GpuResponse {
                 resp.get_ref(0)?.store(disp_info);
                 size_of_val(&disp_info)
             }
-            &GpuResponse::OkCapsetInfo { id, version, size } => {
+            GpuResponse::OkCapsetInfo { id, version, size } => {
                 resp.get_ref(0)?.store(virtio_gpu_resp_capset_info {
                     hdr,
                     capset_id: Le32::from(id),
@@ -640,7 +640,7 @@ impl GpuResponse {
                 });
                 size_of::<virtio_gpu_resp_capset_info>()
             }
-            &GpuResponse::OkCapset(ref data) => {
+            GpuResponse::OkCapset(ref data) => {
                 resp.get_ref(0)?.store(hdr);
                 let resp_data_slice =
                     resp.get_slice(size_of_val(&hdr) as u64, data.len() as u64)?;
@@ -658,26 +658,26 @@ impl GpuResponse {
     /// Gets the `VIRTIO_GPU_*` enum value that corresponds to this variant.
     pub fn get_type(&self) -> u32 {
         match self {
-            &GpuResponse::OkNoData => VIRTIO_GPU_RESP_OK_NODATA,
-            &GpuResponse::OkDisplayInfo(_) => VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
-            &GpuResponse::OkCapsetInfo { .. } => VIRTIO_GPU_RESP_OK_CAPSET_INFO,
-            &GpuResponse::OkCapset(_) => VIRTIO_GPU_RESP_OK_CAPSET,
-            &GpuResponse::ErrUnspec => VIRTIO_GPU_RESP_ERR_UNSPEC,
-            &GpuResponse::ErrOutOfMemory => VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
-            &GpuResponse::ErrInvalidScanoutId => VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
-            &GpuResponse::ErrInvalidResourceId => VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
-            &GpuResponse::ErrInvalidContextId => VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
-            &GpuResponse::ErrInvalidParameter => VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
+            GpuResponse::OkNoData => VIRTIO_GPU_RESP_OK_NODATA,
+            GpuResponse::OkDisplayInfo(_) => VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
+            GpuResponse::OkCapsetInfo { .. } => VIRTIO_GPU_RESP_OK_CAPSET_INFO,
+            GpuResponse::OkCapset(_) => VIRTIO_GPU_RESP_OK_CAPSET,
+            GpuResponse::ErrUnspec => VIRTIO_GPU_RESP_ERR_UNSPEC,
+            GpuResponse::ErrOutOfMemory => VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
+            GpuResponse::ErrInvalidScanoutId => VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
+            GpuResponse::ErrInvalidResourceId => VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
+            GpuResponse::ErrInvalidContextId => VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
+            GpuResponse::ErrInvalidParameter => VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
         }
     }
 
     /// Returns true if this response indicates success.
     pub fn is_ok(&self) -> bool {
         match self {
-            &GpuResponse::OkNoData => true,
-            &GpuResponse::OkDisplayInfo(_) => true,
-            &GpuResponse::OkCapsetInfo { .. } => true,
-            &GpuResponse::OkCapset(_) => true,
+            GpuResponse::OkNoData => true,
+            GpuResponse::OkDisplayInfo(_) => true,
+            GpuResponse::OkCapsetInfo { .. } => true,
+            GpuResponse::OkCapset(_) => true,
             _ => false,
         }
     }
diff --git a/devices/src/virtio/net.rs b/devices/src/virtio/net.rs
index c127946..8fa4d8d 100644
--- a/devices/src/virtio/net.rs
+++ b/devices/src/virtio/net.rs
@@ -25,7 +25,7 @@ use super::{Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING, TYPE_NET};
 /// http://docs.oasis-open.org/virtio/virtio/v1.0/virtio-v1.0.html#x1-1740003
 const MAX_BUFFER_SIZE: usize = 65562;
 const QUEUE_SIZE: u16 = 256;
-const QUEUE_SIZES: &'static [u16] = &[QUEUE_SIZE, QUEUE_SIZE];
+const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
 
 #[derive(Debug)]
 pub enum NetError {
@@ -175,31 +175,24 @@ where
             let mut read_count = 0;
 
             // Copy buffer from across multiple descriptors.
-            loop {
-                match next_desc {
-                    Some(desc) => {
-                        if desc.is_write_only() {
-                            break;
-                        }
-                        let limit = cmp::min(read_count + desc.len as usize, frame.len());
-                        let read_result = self
-                            .mem
-                            .read_slice_at_addr(&mut frame[read_count..limit as usize], desc.addr);
-                        match read_result {
-                            Ok(sz) => {
-                                read_count += sz;
-                            }
-                            Err(e) => {
-                                warn!("net: tx: failed to read slice: {:?}", e);
-                                break;
-                            }
-                        }
-                        next_desc = desc.next_descriptor();
+            while let Some(desc) = next_desc {
+                if desc.is_write_only() {
+                    break;
+                }
+                let limit = cmp::min(read_count + desc.len as usize, frame.len());
+                let read_result = self
+                    .mem
+                    .read_slice_at_addr(&mut frame[read_count..limit as usize], desc.addr);
+                match read_result {
+                    Ok(sz) => {
+                        read_count += sz;
                     }
-                    None => {
+                    Err(e) => {
+                        warn!("net: tx: failed to read slice: {:?}", e);
                         break;
                     }
                 }
+                next_desc = desc.next_descriptor();
             }
 
             let write_result = self.tap.write(&frame[..read_count as usize]);
diff --git a/devices/src/virtio/p9.rs b/devices/src/virtio/p9.rs
index 1984096..2c1faa6 100644
--- a/devices/src/virtio/p9.rs
+++ b/devices/src/virtio/p9.rs
@@ -22,7 +22,7 @@ use virtio_sys::vhost::VIRTIO_F_VERSION_1;
 use super::{DescriptorChain, Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING, TYPE_9P};
 
 const QUEUE_SIZE: u16 = 128;
-const QUEUE_SIZES: &'static [u16] = &[QUEUE_SIZE];
+const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
 
 // The only virtio_9p feature.
 const VIRTIO_9P_MOUNT_TAG: u8 = 0;
@@ -61,39 +61,35 @@ impl error::Error for P9Error {
 impl fmt::Display for P9Error {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
-            &P9Error::TagTooLong(len) => write!(
+            P9Error::TagTooLong(len) => write!(
                 f,
                 "P9 device tag is too long: len = {}, max = {}",
                 len,
                 ::std::u16::MAX
             ),
-            &P9Error::RootNotAbsolute(ref buf) => write!(
+            P9Error::RootNotAbsolute(buf) => write!(
                 f,
                 "P9 root directory is not absolute: root = {}",
                 buf.display()
             ),
-            &P9Error::CreatePollContext(ref err) => {
-                write!(f, "failed to create PollContext: {:?}", err)
-            }
-            &P9Error::PollError(ref err) => write!(f, "failed to poll events: {:?}", err),
-            &P9Error::ReadQueueEventFd(ref err) => {
+            P9Error::CreatePollContext(err) => write!(f, "failed to create PollContext: {:?}", err),
+            P9Error::PollError(err) => write!(f, "failed to poll events: {:?}", err),
+            P9Error::ReadQueueEventFd(err) => {
                 write!(f, "failed to read from virtio queue EventFd: {:?}", err)
             }
-            &P9Error::NoReadableDescriptors => {
+            P9Error::NoReadableDescriptors => {
                 write!(f, "request does not have any readable descriptors")
             }
-            &P9Error::NoWritableDescriptors => {
+            P9Error::NoWritableDescriptors => {
                 write!(f, "request does not have any writable descriptors")
             }
-            &P9Error::InvalidGuestAddress(addr, len) => write!(
+            P9Error::InvalidGuestAddress(addr, len) => write!(
                 f,
                 "descriptor contained invalid guest address range: address = {:?}, len = {}",
                 addr, len
             ),
-            &P9Error::SignalUsedQueue(ref err) => {
-                write!(f, "failed to signal used queue: {:?}", err)
-            }
-            &P9Error::Internal(ref err) => write!(f, "P9 internal server error: {}", err),
+            P9Error::SignalUsedQueue(err) => write!(f, "failed to signal used queue: {:?}", err),
+            P9Error::Internal(err) => write!(f, "P9 internal server error: {}", err),
         }
     }
 }
diff --git a/devices/src/virtio/rng.rs b/devices/src/virtio/rng.rs
index 2aec07e..ee83980 100644
--- a/devices/src/virtio/rng.rs
+++ b/devices/src/virtio/rng.rs
@@ -15,7 +15,7 @@ use sys_util::{EventFd, GuestMemory, PollContext, PollToken};
 use super::{Queue, VirtioDevice, INTERRUPT_STATUS_USED_RING, TYPE_RNG};
 
 const QUEUE_SIZE: u16 = 256;
-const QUEUE_SIZES: &'static [u16] = &[QUEUE_SIZE];
+const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE];
 
 #[derive(Debug)]
 pub enum RngError {
diff --git a/devices/src/virtio/vhost/net.rs b/devices/src/virtio/vhost/net.rs
index d95a7db..2b8c08b 100644
--- a/devices/src/virtio/vhost/net.rs
+++ b/devices/src/virtio/vhost/net.rs
@@ -22,7 +22,7 @@ use super::{Error, Result};
 
 const QUEUE_SIZE: u16 = 256;
 const NUM_QUEUES: usize = 2;
-const QUEUE_SIZES: &'static [u16] = &[QUEUE_SIZE; NUM_QUEUES];
+const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
 
 pub struct Net<T: TapT, U: VhostNetT<T>> {
     workers_kill_evt: Option<EventFd>,
diff --git a/devices/src/virtio/vhost/vsock.rs b/devices/src/virtio/vhost/vsock.rs
index 4b8fe85..ff4e575 100644
--- a/devices/src/virtio/vhost/vsock.rs
+++ b/devices/src/virtio/vhost/vsock.rs
@@ -19,7 +19,7 @@ use super::{Error, Result};
 
 const QUEUE_SIZE: u16 = 256;
 const NUM_QUEUES: usize = 3;
-const QUEUE_SIZES: &'static [u16] = &[QUEUE_SIZE; NUM_QUEUES];
+const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
 
 pub struct Vsock {
     worker_kill_evt: Option<EventFd>,
diff --git a/devices/src/virtio/wl.rs b/devices/src/virtio/wl.rs
index a3a1ac0..9c4e424 100644
--- a/devices/src/virtio/wl.rs
+++ b/devices/src/virtio/wl.rs
@@ -101,7 +101,7 @@ const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
 const VIRTIO_WL_F_TRANS_FLAGS: u32 = 0x01;
 
 const QUEUE_SIZE: u16 = 16;
-const QUEUE_SIZES: &'static [u16] = &[QUEUE_SIZE, QUEUE_SIZE];
+const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
 
 const NEXT_VFD_ID_BASE: u32 = 0x40000000;
 const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
@@ -486,7 +486,7 @@ impl VmRequester {
 
     fn request(&self, request: VmRequest) -> WlResult<VmResponse> {
         let mut inner = self.inner.borrow_mut();
-        let ref mut vm_socket = *inner;
+        let vm_socket = &mut *inner;
         vm_socket.send(&request).map_err(WlError::VmControl)?;
         vm_socket.recv().map_err(WlError::VmControl)
     }
@@ -633,9 +633,9 @@ enum WlResp<'a> {
 
 impl<'a> WlResp<'a> {
     fn get_code(&self) -> u32 {
-        match self {
-            &WlResp::Ok => VIRTIO_WL_RESP_OK,
-            &WlResp::VfdNew { resp, .. } => {
+        match *self {
+            WlResp::Ok => VIRTIO_WL_RESP_OK,
+            WlResp::VfdNew { resp, .. } => {
                 if resp {
                     VIRTIO_WL_RESP_VFD_NEW
                 } else {
@@ -643,15 +643,15 @@ impl<'a> WlResp<'a> {
                 }
             }
             #[cfg(feature = "wl-dmabuf")]
-            &WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
-            &WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
-            &WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
-            &WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
-            &WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
-            &WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
-            &WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
-            &WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
-            &WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
+            WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
+            WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
+            WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
+            WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
+            WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
+            WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
+            WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
+            WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
+            WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
         }
     }
 }
@@ -1160,7 +1160,7 @@ impl WlState {
         let mut to_delete = Set::new();
         for &(dest_vfd_id, ref q) in self.in_queue.iter() {
             if dest_vfd_id == vfd_id {
-                if let &WlRecv::Vfd { id } = q {
+                if let WlRecv::Vfd { id } = *q {
                     to_delete.insert(id);
                 }
             }
@@ -1277,8 +1277,8 @@ impl WlState {
 
     fn next_recv(&self) -> Option<WlResp> {
         if let Some(q) = self.in_queue.front() {
-            match q {
-                &(vfd_id, WlRecv::Vfd { id }) => {
+            match *q {
+                (vfd_id, WlRecv::Vfd { id }) => {
                     if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
                         match self.vfds.get(&id) {
                             Some(vfd) => Some(WlResp::VfdNew {
@@ -1304,7 +1304,7 @@ impl WlState {
                         })
                     }
                 }
-                &(vfd_id, WlRecv::Data { ref buf }) => {
+                (vfd_id, WlRecv::Data { ref buf }) => {
                     if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
                         Some(WlResp::VfdRecv {
                             id: vfd_id,
@@ -1319,7 +1319,7 @@ impl WlState {
                         })
                     }
                 }
-                &(vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
+                (vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
             }
         } else {
             None
@@ -1328,8 +1328,8 @@ impl WlState {
 
     fn pop_recv(&mut self) {
         if let Some(q) = self.in_queue.front() {
-            match q {
-                &(vfd_id, WlRecv::Vfd { id }) => {
+            match *q {
+                (vfd_id, WlRecv::Vfd { id }) => {
                     if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
                         self.recv_vfds.push(id);
                         self.current_recv_vfd = Some(vfd_id);
@@ -1339,14 +1339,14 @@ impl WlState {
                         return;
                     }
                 }
-                &(vfd_id, WlRecv::Data { .. }) => {
+                (vfd_id, WlRecv::Data { .. }) => {
                     self.recv_vfds.clear();
                     self.current_recv_vfd = None;
                     if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
                         return;
                     }
                 }
-                &(_, WlRecv::Hup) => {
+                (_, WlRecv::Hup) => {
                     self.recv_vfds.clear();
                     self.current_recv_vfd = None;
                 }