// Copyright 2019 Intel Corporation. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 use libc; use libc::EFD_NONBLOCK; use std::cmp; use std::io::Write; use std::os::unix::prelude::*; use std::sync::atomic::AtomicUsize; use std::sync::{Arc, RwLock}; use std::thread; use std::vec::Vec; use sync::Mutex; use sys_util::{error, warn, EventFd, GuestMemory}; use super::cloud_hypervisor::net_util::{MacAddr, MAC_ADDR_LEN}; use super::super::{Queue, VirtioDevice, TYPE_NET}; use super::handler::*; use super::vu_common_ctrl::*; use super::{Error, Result}; use crate::{pci::MsixConfig, virtio::Interrupt}; use vhost_rs::vhost_user::message::VhostUserVirtioFeatures; use vhost_rs::vhost_user::{Master, VhostUserMaster}; use vhost_rs::VhostBackend; use virtio_bindings::virtio_net; use virtio_bindings::virtio_ring; pub struct Net { vhost_user_net: Master, kill_evt: vmm_sys_util::eventfd::EventFd, avail_features: u64, acked_features: u64, config_space: Vec, queue_sizes: Vec, } impl<'a> Net { /// Create a new vhost-user-net device pub fn new(mac_addr: MacAddr, vu_cfg: VhostUserConfig<'a>) -> Result { let mut vhost_user_net = Master::connect(vu_cfg.sock, vu_cfg.num_queues as u64) .map_err(Error::VhostUserCreateMaster)?; let kill_evt = vmm_sys_util::eventfd::EventFd::new(EFD_NONBLOCK).map_err(Error::CreateKillEventFd)?; // Filling device and vring features VMM supports. let mut avail_features = 1 << virtio_net::VIRTIO_NET_F_GUEST_CSUM | 1 << virtio_net::VIRTIO_NET_F_CSUM | 1 << virtio_net::VIRTIO_NET_F_GUEST_TSO4 | 1 << virtio_net::VIRTIO_NET_F_GUEST_TSO6 | 1 << virtio_net::VIRTIO_NET_F_GUEST_ECN | 1 << virtio_net::VIRTIO_NET_F_GUEST_UFO | 1 << virtio_net::VIRTIO_NET_F_HOST_TSO4 | 1 << virtio_net::VIRTIO_NET_F_HOST_TSO6 | 1 << virtio_net::VIRTIO_NET_F_HOST_ECN | 1 << virtio_net::VIRTIO_NET_F_HOST_UFO | 1 << virtio_net::VIRTIO_NET_F_MRG_RXBUF | 1 << virtio_net::VIRTIO_F_NOTIFY_ON_EMPTY | 1 << virtio_net::VIRTIO_F_VERSION_1 | 1 << virtio_ring::VIRTIO_RING_F_EVENT_IDX | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); vhost_user_net .set_owner() .map_err(Error::VhostUserSetOwner)?; // Get features from backend, do negotiation to get a feature collection which // both VMM and backend support. let backend_features = vhost_user_net.get_features().unwrap(); avail_features &= backend_features; // Set features back is required by the vhost crate mechanism, since the // later vhost call will check if features is filled in master before execution. vhost_user_net .set_features(backend_features) .map_err(Error::VhostUserSetFeatures)?; let mut acked_features = 0; if backend_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() != 0 { acked_features |= VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); vhost_user_net .get_protocol_features() .map_err(Error::VhostUserGetProtocolFeatures)?; } else { return Err(Error::VhostUserProtocolNotSupport); } let mut config_space = Vec::with_capacity(MAC_ADDR_LEN); unsafe { config_space.set_len(MAC_ADDR_LEN) } config_space[..].copy_from_slice(mac_addr.get_bytes()); avail_features |= 1 << virtio_net::VIRTIO_NET_F_MAC; // Send set_vring_base here, since it could tell backends, like OVS + DPDK, // how many virt queues to be handled, which backend required to know at early stage. for i in 0..vu_cfg.num_queues { vhost_user_net .set_vring_base(i, 0) .map_err(Error::VhostUserSetVringBase)?; } Ok(Net { vhost_user_net, kill_evt, avail_features, acked_features, config_space, queue_sizes: vec![vu_cfg.queue_size; vu_cfg.num_queues], }) } } impl Drop for Net { fn drop(&mut self) { if let Err(_e) = self.kill_evt.write(1) { error!("failed to kill vhost-user-net with error {}", _e); } } } impl VirtioDevice for Net { fn keep_fds(&self) -> Vec { vec![self.kill_evt.as_raw_fd()] } fn device_type(&self) -> u32 { TYPE_NET as u32 } fn queue_max_sizes(&self) -> &[u16] { &self.queue_sizes } fn features(&self) -> u64 { self.avail_features } fn ack_features(&mut self, mut value: u64) { // Check if the guest is ACK'ing a feature that we didn't claim to have. let unrequested_features = value & !self.avail_features; if unrequested_features != 0 { warn!( "Received acknowledge request for unknown feature: {:x}", value ); // Don't count these features as acked. value &= !unrequested_features; } self.acked_features |= value; } fn read_config(&self, offset: u64, mut data: &mut [u8]) { let config_len = self.config_space.len() as u64; if offset >= config_len { error!("Failed to read config space"); return; } if let Some(end) = offset.checked_add(data.len() as u64) { // This write can't fail, offset and end are checked against config_len. data.write_all(&self.config_space[offset as usize..cmp::min(end, config_len) as usize]) .unwrap(); } } fn write_config(&mut self, offset: u64, data: &[u8]) { let data_len = data.len() as u64; let config_len = self.config_space.len() as u64; if offset + data_len > config_len { error!("Failed to write config space"); return; } let (_, right) = self.config_space.split_at_mut(offset as usize); right.copy_from_slice(&data[..]); } fn activate( &mut self, mem: GuestMemory, interrupt_evt: sys_util::EventFd, interrupt_resample_evt: sys_util::EventFd, msix_config: Option>>, status: Arc, queues: Vec, queue_evts: Vec, ) { let handler_kill_evt = match self.kill_evt.try_clone() { Ok(v) => v, Err(e) => { error!("failed creating kill EventFd pair: {}", e); return; } }; let vu_interrupt_list = match setup_vhost_user( &mut self.vhost_user_net, &mem, queues, queue_evts, self.acked_features, ) { Ok(v) => v, Err(e) => { error!("failed to set up vhost_user: {:?}", e); return; } }; let vu_epoll_cfg = VhostUserEpollConfig { interrupt: Interrupt::new(status, interrupt_evt, interrupt_resample_evt, msix_config), kill_evt: handler_kill_evt, vu_interrupt_list, }; let _handler_result = thread::Builder::new() .name("vhost_user_net".to_string()) .spawn(move || { let mut handler = VhostUserEpollHandler::new(vu_epoll_cfg); let result = handler.run(); if let Err(_e) = result { error!("net worker thread exited with error {:?}!", _e); } }); if let Err(_e) = _handler_result { error!("vhost-user net thread create failed with error {:?}", _e); } } }