From 190e58559479bec42075736c0be5ada89d7319ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9C=B1=E8=80=BF=E5=AE=87?= Date: Tue, 10 Sep 2024 01:06:55 +0000 Subject: [PATCH 1/5] =?UTF-8?q?=E5=9C=A8=20RTEMS-LibBSD=20=E5=9F=BA?= =?UTF-8?q?=E7=BA=BF=E4=B8=AD=E5=A2=9E=E5=8A=A0=20Phytium=20=E9=A9=B1?= =?UTF-8?q?=E5=8A=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 1 + freebsd/sbin/ping/ping.c | 2 + freebsd/sys/dev/cadence64/if_cgem.c | 2207 ++++++++++ freebsd/sys/dev/cadence64/if_cgem_hw.h | 447 ++ freebsd/sys/dev/mii/mii_physubr.c | 8 +- freebsd/sys/dev/mmc/mmc.c | 5 + freebsd/sys/dev/pci/pci.c | 8 + freebsd/sys/dev/pci/pci_host_generic.c | 495 +++ freebsd/sys/dev/pci/pci_host_generic.h | 75 + freebsd/sys/dev/pci/pci_host_generic_fdt.c | 669 +++ freebsd/sys/dev/pci/pci_host_generic_fdt.h | 49 + freebsd/sys/dev/phytium/phytium_sdif.c | 936 +++++ freebsd/sys/dev/phytium/phytium_sdmmc.c | 0 freebsd/sys/dev/usb/controller/xhci.c | 4373 ++++++++++++++++++++ freebsd/sys/dev/usb/controller/xhci.h | 538 +++ freebsd/sys/dev/usb/controller/xhci_pci.c | 473 +++ freebsd/sys/dev/usb/controller/xhci_plat.c | 238 ++ freebsd/sys/dev/usb/controller/xhcireg.h | 18 + libbsd.py | 138 +- rtemsbsd/include/bsp/nexus-devices.h | 53 + rtemsbsd/sys/dev/nvd/nvd.c | 32 + 21 files changed, 10762 insertions(+), 3 deletions(-) create mode 100644 freebsd/sys/dev/cadence64/if_cgem.c create mode 100644 freebsd/sys/dev/cadence64/if_cgem_hw.h create mode 100644 freebsd/sys/dev/pci/pci_host_generic.c create mode 100644 freebsd/sys/dev/pci/pci_host_generic.h create mode 100644 freebsd/sys/dev/pci/pci_host_generic_fdt.c create mode 100644 freebsd/sys/dev/pci/pci_host_generic_fdt.h create mode 100644 freebsd/sys/dev/phytium/phytium_sdif.c create mode 100644 freebsd/sys/dev/phytium/phytium_sdmmc.c create mode 100644 freebsd/sys/dev/usb/controller/xhci.c create mode 100644 freebsd/sys/dev/usb/controller/xhci.h create mode 100644 freebsd/sys/dev/usb/controller/xhci_pci.c create mode 100644 freebsd/sys/dev/usb/controller/xhci_plat.c diff --git a/.gitignore b/.gitignore index d7a78f78..8dc27768 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ testsuite/include/rtems/bsd/test/network-config.h .lock-waf* .waf* build +/.vscode \ No newline at end of file diff --git a/freebsd/sbin/ping/ping.c b/freebsd/sbin/ping/ping.c index fae24e53..29eec0e6 100644 --- a/freebsd/sbin/ping/ping.c +++ b/freebsd/sbin/ping/ping.c @@ -374,6 +374,8 @@ main(int argc, char *const *argv) #ifndef __rtems__ alarmtimeout = df = preload = tos = 0; +#else + df = preload = tos = 0; #endif /* __rtems__ */ outpack = outpackhdr + sizeof(struct ip); diff --git a/freebsd/sys/dev/cadence64/if_cgem.c b/freebsd/sys/dev/cadence64/if_cgem.c new file mode 100644 index 00000000..5a855ab5 --- /dev/null +++ b/freebsd/sys/dev/cadence64/if_cgem.c @@ -0,0 +1,2207 @@ +#include + +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2012-2014 Thomas Skibo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * A network interface driver for Cadence GEM Gigabit Ethernet + * interface such as the one used in Xilinx Zynq-7000 SoC. + * + * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. + * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 + * and register definitions are in appendix B.18. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef INET +#include +#include +#include +#include +#endif + +#include +#include + +#ifndef __rtems__ +#include +#include +#include + +#include +#include +#endif /* __rtems__ */ + +#include +#include + +#if BUS_SPACE_MAXADDR > BUS_SPACE_MAXADDR_32BIT +#define CGEM64 +#endif + +#ifdef __rtems__ +#include +#else +#include +#endif + +#include +#ifdef __rtems__ +#pragma GCC diagnostic ignored "-Wpointer-sign" +#pragma GCC diagnostic ignored "-Wincompatible-pointer-types" +#include +#endif /* __rtems__ */ + +#define IF_CGEM_NAME "cgem" + +#define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ +#define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ + +/* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ +#define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ + +#define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ + +#define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ + CSUM_TCP_IPV6 | CSUM_UDP_IPV6) + +#define HWQUIRK_NONE 0 +#define HWQUIRK_NEEDNULLQS 1 +#define HWQUIRK_RXHANGWAR 2 +#define HWQUIRK_TXCLK 4 +#define HWQUIRK_PCLK 8 + +#ifndef __rtems__ +static struct ofw_compat_data compat_data[] = { + { "cdns,zynq-gem", HWQUIRK_RXHANGWAR | HWQUIRK_TXCLK }, + { "cdns,zynqmp-gem", HWQUIRK_NEEDNULLQS | HWQUIRK_TXCLK }, + { "microchip,mpfs-mss-gem", HWQUIRK_NEEDNULLQS | HWQUIRK_TXCLK }, + { "sifive,fu540-c000-gem", HWQUIRK_PCLK }, + { "sifive,fu740-c000-gem", HWQUIRK_PCLK }, + { "cdns,gem", HWQUIRK_NONE }, + { "cdns,macb", HWQUIRK_NONE }, + { "cadence,gem", HWQUIRK_NONE }, + { NULL, 0 } +}; +#endif /* __rtems__ */ + +struct cgem_softc { + if_t ifp; + struct mtx sc_mtx; + device_t dev; + device_t miibus; + u_int mii_media_active; /* last active media */ + int if_old_flags; + struct resource *mem_res; + struct resource *irq_res; + void *intrhand; + struct callout tick_ch; + uint32_t net_ctl_shadow; + uint32_t net_cfg_shadow; +#ifndef __rtems__ + clk_t ref_clk; +#endif + int neednullqs; + int phy_contype; + + bus_dma_tag_t desc_dma_tag; + bus_dma_tag_t mbuf_dma_tag; + + /* receive descriptor ring */ + struct cgem_rx_desc *rxring; + bus_addr_t rxring_physaddr; + struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; +#ifndef __rtems__ + bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; +#endif /* __rtems__ */ + int rxring_hd_ptr; /* where to put rcv bufs */ + int rxring_tl_ptr; /* where to get receives */ + int rxring_queued; /* how many rcv bufs queued */ + bus_dmamap_t rxring_dma_map; + int rxbufs; /* tunable number rcv bufs */ + int rxhangwar; /* rx hang work-around */ + u_int rxoverruns; /* rx overruns */ + u_int rxnobufs; /* rx buf ring empty events */ + u_int rxdmamapfails; /* rx dmamap failures */ + uint32_t rx_frames_prev; + + /* transmit descriptor ring */ + struct cgem_tx_desc *txring; + bus_addr_t txring_physaddr; + struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; +#ifndef __rtems__ + bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; +#endif /* __rtems__ */ + int txring_hd_ptr; /* where to put next xmits */ + int txring_tl_ptr; /* next xmit mbuf to free */ + int txring_queued; /* num xmits segs queued */ + bus_dmamap_t txring_dma_map; + u_int txfull; /* tx ring full events */ + u_int txdefrags; /* tx calls to m_defrag() */ + u_int txdefragfails; /* tx m_defrag() failures */ + u_int txdmamapfails; /* tx dmamap failures */ + + /* null descriptor rings */ + void *null_qs; + bus_addr_t null_qs_physaddr; + + /* hardware provided statistics */ + struct cgem_hw_stats { + uint64_t tx_bytes; + uint32_t tx_frames; + uint32_t tx_frames_bcast; + uint32_t tx_frames_multi; + uint32_t tx_frames_pause; + uint32_t tx_frames_64b; + uint32_t tx_frames_65to127b; + uint32_t tx_frames_128to255b; + uint32_t tx_frames_256to511b; + uint32_t tx_frames_512to1023b; + uint32_t tx_frames_1024to1536b; + uint32_t tx_under_runs; + uint32_t tx_single_collisn; + uint32_t tx_multi_collisn; + uint32_t tx_excsv_collisn; + uint32_t tx_late_collisn; + uint32_t tx_deferred_frames; + uint32_t tx_carrier_sense_errs; + + uint64_t rx_bytes; + uint32_t rx_frames; + uint32_t rx_frames_bcast; + uint32_t rx_frames_multi; + uint32_t rx_frames_pause; + uint32_t rx_frames_64b; + uint32_t rx_frames_65to127b; + uint32_t rx_frames_128to255b; + uint32_t rx_frames_256to511b; + uint32_t rx_frames_512to1023b; + uint32_t rx_frames_1024to1536b; + uint32_t rx_frames_undersize; + uint32_t rx_frames_oversize; + uint32_t rx_frames_jabber; + uint32_t rx_frames_fcs_errs; + uint32_t rx_frames_length_errs; + uint32_t rx_symbol_errs; + uint32_t rx_align_errs; + uint32_t rx_resource_errs; + uint32_t rx_overrun_errs; + uint32_t rx_ip_hdr_csum_errs; + uint32_t rx_tcp_csum_errs; + uint32_t rx_udp_csum_errs; + } stats; +}; + +#define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) +#define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) +#define BARRIER(sc, off, len, flags) \ + (bus_barrier((sc)->mem_res, (off), (len), (flags)) + +#define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx) +#define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) +#define CGEM_LOCK_INIT(sc) \ + mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ + MTX_NETWORK_LOCK, MTX_DEF) +#define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) +#define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) + +/* Allow platforms to optionally provide a way to set the reference clock. */ +static int cgem_default_set_ref_clk(int unit, int frequency); + +static devclass_t cgem_devclass; +static int interface_type; +static int interface_speed; +struct cgem_softc *interface_sc = NULL; + +static int cgem_probe(device_t dev); +static int cgem_attach(device_t dev); +static int cgem_detach(device_t dev); +static void cgem_tick(void *); +static void cgem_intr(void *); + +static void cgem_mediachange(struct cgem_softc *, struct mii_data *); + +static void +cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) +{ + int i; +#ifndef __rtems__ + uint32_t rnd; +#endif /* __rtems__ */ + + /* See if boot loader gave us a MAC address already. */ + for (i = 0; i < 4; i++) { + uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i)); + uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff; + if (low != 0 || high != 0) { + eaddr[0] = low & 0xff; + eaddr[1] = (low >> 8) & 0xff; + eaddr[2] = (low >> 16) & 0xff; + eaddr[3] = (low >> 24) & 0xff; + eaddr[4] = high & 0xff; + eaddr[5] = (high >> 8) & 0xff; + break; + } + } + + /* No MAC from boot loader? Assign a random one. */ + if (i == 4) { +#ifndef __rtems__ + rnd = arc4random(); + + eaddr[0] = 'b'; + eaddr[1] = 's'; + eaddr[2] = 'd'; + eaddr[3] = (rnd >> 16) & 0xff; + eaddr[4] = (rnd >> 8) & 0xff; + eaddr[5] = rnd & 0xff; +#else /* __rtems__ */ + rtems_bsd_get_mac_address(device_get_name(sc->dev), +- device_get_unit(sc->dev), eaddr); +#endif /* __rtems__ */ + + device_printf(sc->dev, "no mac address found, assigning " + "random: %02x:%02x:%02x:%02x:%02x:%02x\n", + eaddr[0], eaddr[1], eaddr[2], + eaddr[3], eaddr[4], eaddr[5]); + } + + /* Move address to first slot and zero out the rest. */ + WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | + (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); + WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); + + for (i = 1; i < 4; i++) { + WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); + WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); + } +} + +/* + * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash + * corresponds to a bit in a 64-bit hash register. Setting that bit in the + * hash register enables reception of all frames with a destination address + * that hashes to that 6-bit value. + * + * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech + * Reference Manual. Bits 0-5 in the hash are the exclusive-or of + * every sixth bit in the destination address. + */ +static int +cgem_mac_hash(u_char eaddr[]) +{ + int hash; + int i, j; + + hash = 0; + for (i = 0; i < 6; i++) + for (j = i; j < 48; j += 6) + if ((eaddr[j >> 3] & (1 << (j & 7))) != 0) + hash ^= (1 << i); + + return hash; +} + +static u_int +cgem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) +{ + uint32_t *hashes = arg; + int index; + + index = cgem_mac_hash(LLADDR(sdl)); + if (index > 31) + hashes[0] |= (1U << (index - 32)); + else + hashes[1] |= (1U << index); + + return (1); +} + +/* + * After any change in rx flags or multi-cast addresses, set up hash registers + * and net config register bits. + */ +static void +cgem_rx_filter(struct cgem_softc *sc) +{ + if_t ifp = sc->ifp; + uint32_t hashes[2] = { 0, 0 }; + + sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN | + CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL); + +#if 0 + if ((if_getflags(ifp) & IFF_PROMISC) != 0) + sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL; + else { + if ((if_getflags(ifp) & IFF_BROADCAST) == 0) + sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST; + if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { + hashes[0] = 0xffffffff; + hashes[1] = 0xffffffff; + } else + if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes); + + if (hashes[0] != 0 || hashes[1] != 0) + sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN; + } +#endif + + WR4(sc, CGEM_HASH_TOP, hashes[0]); + WR4(sc, CGEM_HASH_BOT, hashes[1]); + WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); +} + +/* For bus_dmamap_load() callback. */ +static void +cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + + if (nsegs != 1 || error != 0) + return; + *(bus_addr_t *)arg = segs[0].ds_addr; +} + +/* Set up null queues for priority queues we actually can't disable. */ +static void +cgem_null_qs(struct cgem_softc *sc) +{ + struct cgem_rx_desc *rx_desc; + struct cgem_tx_desc *tx_desc; + uint32_t queue_mask; + int n; + + /* Read design config register 6 to determine number of queues. */ + queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) & + CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1; + if (queue_mask == 0) + return; + + /* Create empty RX queue and empty TX buf queues. */ + memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) + + sizeof(struct cgem_tx_desc)); + rx_desc = sc->null_qs; + rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP; + tx_desc = (struct cgem_tx_desc *)(rx_desc + 1); + tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP; + + /* Point all valid ring base pointers to the null queues. */ + for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) { + WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr); + WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr + + sizeof(struct cgem_rx_desc)); + } +} + +/* Create DMA'able descriptor rings. */ +static int +cgem_setup_descs(struct cgem_softc *sc) +{ + int i, err; + int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) + + CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc); + + if (sc->neednullqs) + desc_rings_size += sizeof(struct cgem_rx_desc) + + sizeof(struct cgem_tx_desc); + + sc->txring = NULL; + sc->rxring = NULL; + + /* Allocate non-cached DMA space for RX and TX descriptors. */ + err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, +#ifdef CGEM64 + 1ULL << 32, /* Do not cross a 4G boundary. */ +#else + 0, +#endif + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, + desc_rings_size, 1, desc_rings_size, 0, + busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag); + if (err) + return (err); + + /* Set up a bus_dma_tag for mbufs. */ + err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, + BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, + TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, + &sc->mbuf_dma_tag); + if (err) + return (err); + + /* + * Allocate DMA memory. We allocate transmit, receive and null + * descriptor queues all at once because the hardware only provides + * one register for the upper 32 bits of rx and tx descriptor queues + * hardware addresses. + */ + err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring, + BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, + &sc->rxring_dma_map); + if (err) + return (err); + + /* Load descriptor DMA memory. */ + err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, + (void *)sc->rxring, desc_rings_size, + cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT); + if (err) + return (err); + + /* Initialize RX descriptors. */ + for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { + sc->rxring[i].addr = CGEM_RXDESC_OWN; + sc->rxring[i].ctl = 0; + sc->rxring_m[i] = NULL; +#ifndef __rtems__ + sc->rxring_m_dmamap[i] = NULL; +#endif /* __rtems__ */ + } + sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; + + sc->rxring_hd_ptr = 0; + sc->rxring_tl_ptr = 0; + sc->rxring_queued = 0; + + sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS); + sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS * + sizeof(struct cgem_rx_desc); + + /* Initialize TX descriptor ring. */ + for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { + sc->txring[i].addr = 0; + sc->txring[i].ctl = CGEM_TXDESC_USED; + sc->txring_m[i] = NULL; +#ifndef __rtems__ + sc->txring_m_dmamap[i] = NULL; +#endif /* __rtems__ */ + } + sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; + + sc->txring_hd_ptr = 0; + sc->txring_tl_ptr = 0; + sc->txring_queued = 0; + + if (sc->neednullqs) { + sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS); + sc->null_qs_physaddr = sc->txring_physaddr + + CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc); + + cgem_null_qs(sc); + } + + return (0); +} + +/* Fill receive descriptor ring with mbufs. */ +static void +cgem_fill_rqueue(struct cgem_softc *sc) +{ + struct mbuf *m = NULL; +#ifndef __rtems__ + bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; + int nsegs; +#else /* __rtems__ */ + bus_dma_segment_t segs[1]; +#endif /* __rtems__ */ + + CGEM_ASSERT_LOCKED(sc); + + while (sc->rxring_queued < sc->rxbufs) { + /* Get a cluster mbuf. */ + m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + if (m == NULL) + break; + + m->m_len = MCLBYTES; + m->m_pkthdr.len = MCLBYTES; + m->m_pkthdr.rcvif = sc->ifp; + + /* Load map and plug in physical address. */ +#ifndef __rtems__ + if (bus_dmamap_create(sc->mbuf_dma_tag, 0, + &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) { + sc->rxdmamapfails++; + m_free(m); + break; + } + if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, + segs, &nsegs, BUS_DMA_NOWAIT)) { + sc->rxdmamapfails++; + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[sc->rxring_hd_ptr]); + sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL; + m_free(m); + break; + } +#endif /* __rtems__ */ + sc->rxring_m[sc->rxring_hd_ptr] = m; + + /* Sync cache with receive buffer. */ +#ifndef __rtems__ + bus_dmamap_sync(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[sc->rxring_hd_ptr], + BUS_DMASYNC_PREREAD); +#else /* __rtems__ */ + rtems_cache_invalidate_multiple_data_lines(m->m_data, m->m_len); + segs[0].ds_addr = mtod(m, bus_addr_t); +#endif /* __rtems__ */ + + /* Write rx descriptor and increment head pointer. */ + sc->rxring[sc->rxring_hd_ptr].ctl = 0; +#ifdef CGEM64 + sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32; +#endif + if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { + sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | + CGEM_RXDESC_WRAP; + sc->rxring_hd_ptr = 0; + } else + sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; + + sc->rxring_queued++; + } +} + +/* Pull received packets off of receive descriptor ring. */ +static void +cgem_recv(struct cgem_softc *sc) +{ + if_t ifp = sc->ifp; + struct mbuf *m, *m_hd, **m_tl; + uint32_t ctl; + + CGEM_ASSERT_LOCKED(sc); + + /* Pick up all packets in which the OWN bit is set. */ + m_hd = NULL; + m_tl = &m_hd; + while (sc->rxring_queued > 0 && + (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { + ctl = sc->rxring[sc->rxring_tl_ptr].ctl; + + /* Grab filled mbuf. */ + m = sc->rxring_m[sc->rxring_tl_ptr]; + sc->rxring_m[sc->rxring_tl_ptr] = NULL; + + /* Sync cache with receive buffer. */ +#ifndef __rtems__ + bus_dmamap_sync(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[sc->rxring_tl_ptr], + BUS_DMASYNC_POSTREAD); +#else /* __rtems__ */ + rtems_cache_invalidate_multiple_data_lines(m->m_data, m->m_len); +#endif /* __rtems__ */ + +#ifndef __rtems__ + /* Unload and destroy dmamap. */ + bus_dmamap_unload(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[sc->rxring_tl_ptr]); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[sc->rxring_tl_ptr]); + sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL; +#endif /* __rtems__ */ + + /* Increment tail pointer. */ + if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) + sc->rxring_tl_ptr = 0; + sc->rxring_queued--; + + /* + * Check FCS and make sure entire packet landed in one mbuf + * cluster (which is much bigger than the largest ethernet + * packet). + */ + if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 || + (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) != + (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) { + /* discard. */ + m_free(m); + if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); + continue; + } + + /* Ready it to hand off to upper layers. */ + m->m_data += ETHER_ALIGN; + m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK); + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = m->m_len; + + /* + * Are we using hardware checksumming? Check the status in the + * receive descriptor. + */ + if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { + /* TCP or UDP checks out, IP checks out too. */ + if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == + CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || + (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == + CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) { + m->m_pkthdr.csum_flags |= + CSUM_IP_CHECKED | CSUM_IP_VALID | + CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xffff; + } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == + CGEM_RXDESC_CKSUM_STAT_IP_GOOD) { + /* Only IP checks out. */ + m->m_pkthdr.csum_flags |= + CSUM_IP_CHECKED | CSUM_IP_VALID; + m->m_pkthdr.csum_data = 0xffff; + } + } + + /* Queue it up for delivery below. */ + *m_tl = m; + m_tl = &m->m_next; + } + + /* Replenish receive buffers. */ + cgem_fill_rqueue(sc); + + /* Unlock and send up packets. */ + CGEM_UNLOCK(sc); + while (m_hd != NULL) { + m = m_hd; + m_hd = m_hd->m_next; + m->m_next = NULL; + if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); + if_input(ifp, m); + } + CGEM_LOCK(sc); +} + +/* Find completed transmits and free their mbufs. */ +static void +cgem_clean_tx(struct cgem_softc *sc) +{ + struct mbuf *m; + uint32_t ctl; + + CGEM_ASSERT_LOCKED(sc); + + /* free up finished transmits. */ + while (sc->txring_queued > 0 && + ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & + CGEM_TXDESC_USED) != 0) { + /* Sync cache. */ +#ifndef __rtems__ + bus_dmamap_sync(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_tl_ptr], + BUS_DMASYNC_POSTWRITE); + + /* Unload and destroy DMA map. */ + bus_dmamap_unload(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_tl_ptr]); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_tl_ptr]); + sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL; +#endif /* __rtems__ */ + + /* Free up the mbuf. */ + m = sc->txring_m[sc->txring_tl_ptr]; + sc->txring_m[sc->txring_tl_ptr] = NULL; + m_freem(m); + + /* Check the status. */ + if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { + /* Serious bus error. log to console. */ +#ifdef CGEM64 + device_printf(sc->dev, + "cgem_clean_tx: AHB error, addr=0x%x%08x\n", + sc->txring[sc->txring_tl_ptr].addrhi, + sc->txring[sc->txring_tl_ptr].addr); +#else + device_printf(sc->dev, + "cgem_clean_tx: AHB error, addr=0x%x\n", + sc->txring[sc->txring_tl_ptr].addr); +#endif + } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | + CGEM_TXDESC_LATE_COLL)) != 0) { + if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); + } else + if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); + + /* + * If the packet spanned more than one tx descriptor, skip + * descriptors until we find the end so that only + * start-of-frame descriptors are processed. + */ + while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { + if ((ctl & CGEM_TXDESC_WRAP) != 0) + sc->txring_tl_ptr = 0; + else + sc->txring_tl_ptr++; + sc->txring_queued--; + + ctl = sc->txring[sc->txring_tl_ptr].ctl; + + sc->txring[sc->txring_tl_ptr].ctl = + ctl | CGEM_TXDESC_USED; + } + + /* Next descriptor. */ + if ((ctl & CGEM_TXDESC_WRAP) != 0) + sc->txring_tl_ptr = 0; + else + sc->txring_tl_ptr++; + sc->txring_queued--; + + if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE); + } +} + +#ifdef __rtems__ +static int +cgem_get_segs_for_tx(struct mbuf *m, bus_dma_segment_t segs[TX_MAX_DMA_SEGS], + int *nsegs) +{ + int i = 0; + + do { + if (m->m_len > 0) { + segs[i].ds_addr = mtod(m, bus_addr_t); + segs[i].ds_len = m->m_len; + rtems_cache_flush_multiple_data_lines(m->m_data, m->m_len); + ++i; + } + + m = m->m_next; + + if (m == NULL) { + *nsegs = i; + + return (0); + } + } while (i < TX_MAX_DMA_SEGS); + + return (EFBIG); +} +#endif /* __rtems__ */ +/* Start transmits. */ +static void +cgem_start_locked(if_t ifp) +{ + struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); + struct mbuf *m; + bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; + uint32_t ctl; + int i, nsegs, wrap, err; + + CGEM_ASSERT_LOCKED(sc); + + if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0) + return; + + for (;;) { + /* Check that there is room in the descriptor ring. */ + if (sc->txring_queued >= + CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { + /* Try to make room. */ + cgem_clean_tx(sc); + + /* Still no room? */ + if (sc->txring_queued >= + CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { + if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); + sc->txfull++; + break; + } + } + + /* Grab next transmit packet. */ + m = if_dequeue(ifp); + if (m == NULL) + break; + +#ifndef __rtems__ + /* Create and load DMA map. */ + if (bus_dmamap_create(sc->mbuf_dma_tag, 0, + &sc->txring_m_dmamap[sc->txring_hd_ptr])) { + m_freem(m); + sc->txdmamapfails++; + continue; + } + err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_hd_ptr], + m, segs, &nsegs, BUS_DMA_NOWAIT); +#else /* __rtems__ */ + err = cgem_get_segs_for_tx(m, segs, &nsegs); +#endif /* __rtems__ */ + if (err == EFBIG) { + /* Too many segments! defrag and try again. */ + struct mbuf *m2 = m_defrag(m, M_NOWAIT); + + if (m2 == NULL) { + sc->txdefragfails++; + m_freem(m); +#ifndef __rtems__ + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_hd_ptr]); + sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; +#endif /* __rtems__ */ + continue; + } + m = m2; +#ifndef __rtems__ + err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_hd_ptr], + m, segs, &nsegs, BUS_DMA_NOWAIT); +#else /* __rtems__ */ + err = cgem_get_segs_for_tx(m, segs, &nsegs); +#endif /* __rtems__ */ + sc->txdefrags++; + } + if (err) { + /* Give up. */ + m_freem(m); +#ifndef __rtems__ + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_hd_ptr]); + sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; +#endif /* __rtems__ */ + sc->txdmamapfails++; + continue; + } + sc->txring_m[sc->txring_hd_ptr] = m; + +#ifndef __rtems__ + /* Sync tx buffer with cache. */ + bus_dmamap_sync(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_hd_ptr], + BUS_DMASYNC_PREWRITE); +#endif /* __rtems__ */ + + /* Set wrap flag if next packet might run off end of ring. */ + wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= + CGEM_NUM_TX_DESCS; + + /* + * Fill in the TX descriptors back to front so that USED bit in + * first descriptor is cleared last. + */ + for (i = nsegs - 1; i >= 0; i--) { + /* Descriptor address. */ + sc->txring[sc->txring_hd_ptr + i].addr = + segs[i].ds_addr; +#ifdef CGEM64 + sc->txring[sc->txring_hd_ptr + i].addrhi = + segs[i].ds_addr >> 32; +#endif + /* Descriptor control word. */ + ctl = segs[i].ds_len; + if (i == nsegs - 1) { + ctl |= CGEM_TXDESC_LAST_BUF; + if (wrap) + ctl |= CGEM_TXDESC_WRAP; + } + sc->txring[sc->txring_hd_ptr + i].ctl = ctl; + + if (i != 0) + sc->txring_m[sc->txring_hd_ptr + i] = NULL; + } + + if (wrap) + sc->txring_hd_ptr = 0; + else + sc->txring_hd_ptr += nsegs; + sc->txring_queued += nsegs; + + /* Kick the transmitter. */ + WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | + CGEM_NET_CTRL_START_TX); + + /* If there is a BPF listener, bounce a copy to him. */ + ETHER_BPF_MTAP(ifp, m); + } +} + +static void +cgem_start(if_t ifp) +{ + struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); + + CGEM_LOCK(sc); + cgem_start_locked(ifp); + CGEM_UNLOCK(sc); +} + +static void +cgem_poll_hw_stats(struct cgem_softc *sc) +{ + uint32_t n; + + CGEM_ASSERT_LOCKED(sc); + + sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT); + sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32; + + sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX); + sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX); + sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX); + sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX); + sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX); + sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX); + sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX); + sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX); + sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX); + sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX); + sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS); + + n = RD4(sc, CGEM_SINGLE_COLL_FRAMES); + sc->stats.tx_single_collisn += n; + if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); + n = RD4(sc, CGEM_MULTI_COLL_FRAMES); + sc->stats.tx_multi_collisn += n; + if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); + n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES); + sc->stats.tx_excsv_collisn += n; + if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); + n = RD4(sc, CGEM_LATE_COLL); + sc->stats.tx_late_collisn += n; + if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); + + sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES); + sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS); + + sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT); + sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32; + + sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX); + sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX); + sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX); + sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX); + sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX); + sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX); + sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX); + sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX); + sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX); + sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX); + sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX); + sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX); + sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX); + sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS); + sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS); + sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS); + sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS); + sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS); + sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS); + sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS); + sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS); + sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS); +} + +static void +cgem_tick(void *arg) +{ + struct cgem_softc *sc = (struct cgem_softc *)arg; + struct mii_data *mii; + + CGEM_ASSERT_LOCKED(sc); + + /* Poll the phy. */ + if (sc->miibus != NULL) { + mii = device_get_softc(sc->miibus); + mii_tick(mii); + } + + /* Poll statistics registers. */ + cgem_poll_hw_stats(sc); + + /* Check for receiver hang. */ + if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) { + /* + * Reset receiver logic by toggling RX_EN bit. 1usec + * delay is necessary especially when operating at 100mbps + * and 10mbps speeds. + */ + WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow & + ~CGEM_NET_CTRL_RX_EN); + DELAY(1); + WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); + } + sc->rx_frames_prev = sc->stats.rx_frames; + + /* Next callout in one second. */ + callout_reset(&sc->tick_ch, hz, cgem_tick, sc); +} + +/* Interrupt handler. */ +static void +cgem_intr(void *arg) +{ + struct cgem_softc *sc = (struct cgem_softc *)arg; + if_t ifp = sc->ifp; + uint32_t istatus; + + CGEM_LOCK(sc); + + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { + CGEM_UNLOCK(sc); + return; + } + + /* Read interrupt status and immediately clear the bits. */ + istatus = RD4(sc, CGEM_INTR_STAT); + WR4(sc, CGEM_INTR_STAT, istatus); + + /* Packets received. */ + if ((istatus & CGEM_INTR_RX_COMPLETE) != 0) + cgem_recv(sc); + + /* Free up any completed transmit buffers. */ + cgem_clean_tx(sc); + + /* Hresp not ok. Something is very bad with DMA. Try to clear. */ + if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) { + device_printf(sc->dev, + "cgem_intr: hresp not okay! rx_status=0x%x\n", + RD4(sc, CGEM_RX_STAT)); + WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK); + } + + /* Receiver overrun. */ + if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) { + /* Clear status bit. */ + WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN); + sc->rxoverruns++; + } + + /* Receiver ran out of bufs. */ + if ((istatus & CGEM_INTR_RX_USED_READ) != 0) { + WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | + CGEM_NET_CTRL_FLUSH_DPRAM_PKT); + cgem_fill_rqueue(sc); + sc->rxnobufs++; + } + + /* Restart transmitter if needed. */ + if (!if_sendq_empty(ifp)) + cgem_start_locked(ifp); + + CGEM_UNLOCK(sc); +} + +/* Reset hardware. */ +static void +cgem_reset(struct cgem_softc *sc) +{ + + CGEM_ASSERT_LOCKED(sc); + + /* Determine data bus width from design configuration register. */ + switch (RD4(sc, CGEM_DESIGN_CFG1) & + CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) { + case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64: + sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64; + break; + case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128: + sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128; + break; + default: + sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32; + } + + WR4(sc, CGEM_NET_CTRL, 0); + WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); + WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); + WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); + WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); + WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); + WR4(sc, CGEM_HASH_BOT, 0); + WR4(sc, CGEM_HASH_TOP, 0); + WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ + WR4(sc, CGEM_RX_QBAR, 0); + + /* Get management port running even if interface is down. */ + sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_32;//CGEM_NET_CFG_MDC_CLK_DIV_48; + WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); + + sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; + WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); +} + +/* Bring up the hardware. */ +static void +cgem_config(struct cgem_softc *sc) +{ + if_t ifp = sc->ifp; + uint32_t dma_cfg; + u_char *eaddr = if_getlladdr(ifp); + + CGEM_ASSERT_LOCKED(sc); + + /* Program Net Config Register. */ + sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK | + CGEM_NET_CFG_DBUS_WIDTH_MASK); + sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE | + CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | + CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN | + CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100); + + /* Check connection type, enable SGMII bits if necessary. */ + if (sc->phy_contype == MII_CONTYPE_SGMII) { + sc->net_cfg_shadow |= CGEM_NET_CFG_SGMII_EN; + sc->net_cfg_shadow |= CGEM_NET_CFG_PCS_SEL; + } + + /* Enable receive checksum offloading? */ + if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) + sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; + + WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); + + /* Program DMA Config Register. */ + dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | + CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | + CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | + CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | +#ifdef CGEM64 + CGEM_DMA_CFG_ADDR_BUS_64 | +#endif + CGEM_DMA_CFG_DISC_WHEN_NO_AHB; + + /* Enable transmit checksum offloading? */ + if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) + dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; + + WR4(sc, CGEM_DMA_CFG, dma_cfg); + + /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ + WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr); + WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr); +#ifdef CGEM64 + WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32)); + WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32)); +#endif + + /* Enable rx and tx. */ + sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); + WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); + + /* Set receive address in case it changed. */ + WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | + (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); + WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); + + /* Set up interrupts. */ + WR4(sc, CGEM_INTR_EN, CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN | + CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ | + CGEM_INTR_HRESP_NOT_OK); +} + +/* Turn on interface and load up receive ring with buffers. */ +static void +cgem_init_locked(struct cgem_softc *sc) +{ + struct mii_data *mii; + + CGEM_ASSERT_LOCKED(sc); + + if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0) + return; + + cgem_config(sc); + cgem_fill_rqueue(sc); + + if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); + + if (sc->miibus != NULL) { + mii = device_get_softc(sc->miibus); + mii_mediachg(mii); + } + + callout_reset(&sc->tick_ch, hz, cgem_tick, sc); +} + +static void +cgem_init(void *arg) +{ + struct cgem_softc *sc = (struct cgem_softc *)arg; + + CGEM_LOCK(sc); + cgem_init_locked(sc); + CGEM_UNLOCK(sc); +} + +/* Turn off interface. Free up any buffers in transmit or receive queues. */ +static void +cgem_stop(struct cgem_softc *sc) +{ + int i; + + CGEM_ASSERT_LOCKED(sc); + + callout_stop(&sc->tick_ch); + + /* Shut down hardware. */ + cgem_reset(sc); + + /* Clear out transmit queue. */ + memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc)); + for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { + sc->txring[i].ctl = CGEM_TXDESC_USED; + if (sc->txring_m[i]) { +#ifndef __rtems__ + /* Unload and destroy dmamap. */ + bus_dmamap_unload(sc->mbuf_dma_tag, + sc->txring_m_dmamap[i]); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->txring_m_dmamap[i]); + sc->txring_m_dmamap[i] = NULL; +#endif /* __rtems__ */ + m_freem(sc->txring_m[i]); + sc->txring_m[i] = NULL; + } + } + sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; + + sc->txring_hd_ptr = 0; + sc->txring_tl_ptr = 0; + sc->txring_queued = 0; + + /* Clear out receive queue. */ + memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc)); + for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { + sc->rxring[i].addr = CGEM_RXDESC_OWN; + if (sc->rxring_m[i]) { +#ifndef __rtems__ + /* Unload and destroy dmamap. */ + bus_dmamap_unload(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[i]); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[i]); + sc->rxring_m_dmamap[i] = NULL; +#endif /* __rtems__ */ + + m_freem(sc->rxring_m[i]); + sc->rxring_m[i] = NULL; + } + } + sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; + + sc->rxring_hd_ptr = 0; + sc->rxring_tl_ptr = 0; + sc->rxring_queued = 0; + + /* Force next statchg or linkchg to program net config register. */ + sc->mii_media_active = 0; +} + +static int +cgem_ioctl(if_t ifp, u_long cmd, caddr_t data) +{ + struct cgem_softc *sc = if_getsoftc(ifp); + struct ifreq *ifr = (struct ifreq *)data; + struct mii_data *mii; + int error = 0, mask; + + switch (cmd) { + case SIOCSIFFLAGS: + CGEM_LOCK(sc); + if ((if_getflags(ifp) & IFF_UP) != 0) { + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { + if (((if_getflags(ifp) ^ sc->if_old_flags) & + (IFF_PROMISC | IFF_ALLMULTI)) != 0) { + cgem_rx_filter(sc); + } + } else { + cgem_init_locked(sc); + } + } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { + if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); + cgem_stop(sc); + } + sc->if_old_flags = if_getflags(ifp); + CGEM_UNLOCK(sc); + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + /* Set up multi-cast filters. */ + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { + CGEM_LOCK(sc); + cgem_rx_filter(sc); + CGEM_UNLOCK(sc); + } + break; + + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + if (sc->miibus == NULL) + return (ENXIO); + mii = device_get_softc(sc->miibus); + error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); + break; + + case SIOCSIFCAP: + CGEM_LOCK(sc); + mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; + + if ((mask & IFCAP_TXCSUM) != 0) { + if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { + /* Turn on TX checksumming. */ + if_setcapenablebit(ifp, IFCAP_TXCSUM | + IFCAP_TXCSUM_IPV6, 0); + if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0); + + WR4(sc, CGEM_DMA_CFG, + RD4(sc, CGEM_DMA_CFG) | + CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); + } else { + /* Turn off TX checksumming. */ + if_setcapenablebit(ifp, 0, IFCAP_TXCSUM | + IFCAP_TXCSUM_IPV6); + if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST); + + WR4(sc, CGEM_DMA_CFG, + RD4(sc, CGEM_DMA_CFG) & + ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); + } + } + if ((mask & IFCAP_RXCSUM) != 0) { + if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { + /* Turn on RX checksumming. */ + if_setcapenablebit(ifp, IFCAP_RXCSUM | + IFCAP_RXCSUM_IPV6, 0); + sc->net_cfg_shadow |= + CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; + WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); + } else { + /* Turn off RX checksumming. */ + if_setcapenablebit(ifp, 0, IFCAP_RXCSUM | + IFCAP_RXCSUM_IPV6); + sc->net_cfg_shadow &= + ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; + WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); + } + } + if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == + (IFCAP_RXCSUM | IFCAP_TXCSUM)) + if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0); + else + if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM); + + CGEM_UNLOCK(sc); + break; + default: + error = ether_ioctl(ifp, cmd, data); + break; + } + + return (error); +} + +/* MII bus support routines. + */ +static void +cgem_child_detached(device_t dev, device_t child) +{ + struct cgem_softc *sc = device_get_softc(dev); + + if (child == sc->miibus) + sc->miibus = NULL; +} + +static int +cgem_ifmedia_upd(if_t ifp) +{ + struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); + struct mii_data *mii; + struct mii_softc *miisc; + int error = 0; + + mii = device_get_softc(sc->miibus); + CGEM_LOCK(sc); + if ((if_getflags(ifp) & IFF_UP) != 0) { + LIST_FOREACH(miisc, &mii->mii_phys, mii_list) + PHY_RESET(miisc); + error = mii_mediachg(mii); + } + CGEM_UNLOCK(sc); + + return (error); +} + +static void +cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) +{ + struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); + struct mii_data *mii; + + mii = device_get_softc(sc->miibus); + CGEM_LOCK(sc); + mii_pollstat(mii); + ifmr->ifm_active = mii->mii_media_active; + ifmr->ifm_status = mii->mii_media_status; + CGEM_UNLOCK(sc); +} + +static int +cgem_miibus_readreg(device_t dev, int phy, int reg) +{ + struct cgem_softc *sc = device_get_softc(dev); + int tries, val; + + WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 | + CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_READ | + (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | + (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT)); + + /* Wait for completion. */ + tries=0; + while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { + DELAY(5); + if (++tries > 200) { + device_printf(dev, "phy read timeout: %d\n", reg); + return (-1); + } + } + + val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK; + + if (reg == MII_EXTSR) + /* + * MAC does not support half-duplex at gig speeds. + * Let mii(4) exclude the capability. + */ + val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX); + + return (val); +} + +static int +cgem_miibus_writereg(device_t dev, int phy, int reg, int data) +{ + struct cgem_softc *sc = device_get_softc(dev); + int tries; + + WR4(sc, CGEM_PHY_MAINT, CGEM_PHY_MAINT_CLAUSE_22 | + CGEM_PHY_MAINT_MUST_10 | CGEM_PHY_MAINT_OP_WRITE | + (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | + (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) | + (data & CGEM_PHY_MAINT_DATA_MASK)); + + /* Wait for completion. */ + tries = 0; + while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { + DELAY(5); + if (++tries > 200) { + device_printf(dev, "phy write timeout: %d\n", reg); + return (-1); + } + } + + return (0); +} + +static void +cgem_miibus_statchg(device_t dev) +{ + struct cgem_softc *sc = device_get_softc(dev); + struct mii_data *mii = device_get_softc(sc->miibus); + + CGEM_ASSERT_LOCKED(sc); + + if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == + (IFM_ACTIVE | IFM_AVALID) && + sc->mii_media_active != mii->mii_media_active) + cgem_mediachange(sc, mii); +} + +static void +cgem_miibus_linkchg(device_t dev) +{ + struct cgem_softc *sc = device_get_softc(dev); + struct mii_data *mii = device_get_softc(sc->miibus); + + CGEM_ASSERT_LOCKED(sc); + + if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == + (IFM_ACTIVE | IFM_AVALID) && + sc->mii_media_active != mii->mii_media_active) + device_printf(dev, "link changed: %d\n"); + cgem_mediachange(sc, mii); +} + +/* + * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to + * provide a function to set the cgem's reference clock. + */ +static void cgem_set_high_speed_config(struct cgem_softc *sc, int speed) +{ + uint32_t reg_value; + uint32_t set_speed = 0; + switch (speed) + { + case 25000: + set_speed = 2; + break; + case 10000: + set_speed = 4; + break; + case 5000: + set_speed = 3; + break; + case 2500: + set_speed = 2; + break; + case 1000: + set_speed = 1; + break; + default: + set_speed = 0; + break; + } + + /* GEM_HSMAC(0x0050) provide rate to the external */ + reg_value = RD4(sc, CGEM_HSMAC); + reg_value &= ~CGEM_HSMACSPEED_MASK; + reg_value |= (set_speed) &CGEM_HSMACSPEED_MASK; + WR4(sc, CGEM_HSMAC, reg_value); + + reg_value = RD4(sc, CGEM_HSMAC); + + device_printf(sc->dev, "CGEM_HSMAC = %d\n", reg_value); +} + +static int __used +cgem_default_set_ref_clk(int unit, int frequency) +{ + struct cgem_softc *sc = interface_sc; + +#if defined(RTEMS_BSD_MODULE_DEV_CGEM64) && (RTEMS_BSD_MODULE_DEV_CGEM64) + if (interface_type == MII_CONTYPE_SGMII) + { + if (interface_speed == 1000) { + WR4(sc, 0x1c04, 0x1); /*0x1c04*/ + WR4(sc, 0x1c08, 0x4); /*0x1c08*/ + WR4(sc, 0x1c0c, 0x8); /*0x1c0c*/ + WR4(sc, 0x1c10, 0x1); /*0x1c10*/ + WR4(sc, 0x1c20, 0x0); /*0x1c20*/ + WR4(sc, 0x1c24, 0x0); /*0x1c24*/ + WR4(sc, 0x1c28, 0x0); /*0x1c28*/ + WR4(sc, 0x1c2c, 0x1); /*0x1c2c*/ + WR4(sc, 0x1c30, 0x1); /*0x1c30*/ + WR4(sc, 0x1c34, 0x0); /*0x1c34*/ + WR4(sc, 0x1c70, 0x0); /*0x1c70*/ + WR4(sc, 0x1c74, 0x0); /*0x1c74*/ + WR4(sc, 0x1c78, 0x0); /*0x1c78*/ + WR4(sc, 0x1c7c, 0x0); /*0x1c7c*/ + } else if (interface_speed == 100 || interface_speed == 10) { + WR4(sc, 0x1c04, 0x1); /*0x1c04*/ + WR4(sc, 0x1c08, 0x4); /*0x1c08*/ + WR4(sc, 0x1c0c, 0x8); /*0x1c0c*/ + WR4(sc, 0x1c10, 0x1); /*0x1c10*/ + WR4(sc, 0x1c20, 0x0); /*0x1c20*/ + WR4(sc, 0x1c24, 0x0); /*0x1c24*/ + WR4(sc, 0x1c28, 0x1); /*0x1c28*/ + WR4(sc, 0x1c2c, 0x1); /*0x1c2c*/ + WR4(sc, 0x1c30, 0x1); /*0x1c30*/ + WR4(sc, 0x1c34, 0x0); /*0x1c34*/ + WR4(sc, 0x1c70, 0x1); /*0x1c70*/ + WR4(sc, 0x1c74, 0x0); /*0x1c74*/ + WR4(sc, 0x1c78, 0x0); /*0x1c78*/ + WR4(sc, 0x1c7c, 0x1); /*0x1c7c*/ + } + } +#endif + + cgem_set_high_speed_config(sc, interface_speed); + + return 0; +} +__weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); + +/* Call to set reference clock and network config bits according to media. */ +static void +cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) +{ + int ref_clk_freq; + + CGEM_ASSERT_LOCKED(sc); + + /* Update hardware to reflect media. */ + sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | + CGEM_NET_CFG_FULL_DUPLEX); + + switch (IFM_SUBTYPE(mii->mii_media_active)) { + case IFM_1000_T: + sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 | + CGEM_NET_CFG_GIGE_EN); + ref_clk_freq = 125000000; + break; + case IFM_100_TX: + sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100; + ref_clk_freq = 25000000; + break; + default: + ref_clk_freq = 2500000; + } + + if ((mii->mii_media_active & IFM_FDX) != 0) + sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX; + + WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow); + +#ifndef __rtems__ + if (sc->ref_clk != NULL) { + CGEM_UNLOCK(sc); + if (clk_set_freq(sc->ref_clk, ref_clk_freq, 0)) + device_printf(sc->dev, "could not set ref clk to %d\n", + ref_clk_freq); + CGEM_LOCK(sc); + } +#else + /* Set the reference clock if necessary. */ + interface_sc = sc; + interface_speed = 1000; + interface_type = MII_CONTYPE_SGMII; + if (cgem_default_set_ref_clk(0, ref_clk_freq)) + device_printf(sc->dev, "cgem_mediachange: " + "could not set ref clk%d to %d.\n", + 0, ref_clk_freq); +#endif + + sc->mii_media_active = mii->mii_media_active; +} + +static void +cgem_add_sysctls(device_t dev) +{ + struct cgem_softc *sc = device_get_softc(dev); + struct sysctl_ctx_list *ctx; + struct sysctl_oid_list *child; + struct sysctl_oid *tree; + + ctx = device_get_sysctl_ctx(dev); + child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); + + SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW, + &sc->rxbufs, 0, "Number receive buffers to provide"); + + SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW, + &sc->rxhangwar, 0, "Enable receive hang work-around"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD, + &sc->rxoverruns, 0, "Receive overrun events"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD, + &sc->rxnobufs, 0, "Receive buf queue empty events"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD, + &sc->rxdmamapfails, 0, "Receive DMA map failures"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD, + &sc->txfull, 0, "Transmit ring full events"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD, + &sc->txdmamapfails, 0, "Transmit DMA map failures"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD, + &sc->txdefrags, 0, "Transmit m_defrag() calls"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD, + &sc->txdefragfails, 0, "Transmit m_defrag() failures"); + + tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", + CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "GEM statistics"); + child = SYSCTL_CHILDREN(tree); + + SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD, + &sc->stats.tx_bytes, "Total bytes transmitted"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD, + &sc->stats.tx_frames, 0, "Total frames transmitted"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD, + &sc->stats.tx_frames_bcast, 0, + "Number broadcast frames transmitted"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD, + &sc->stats.tx_frames_multi, 0, + "Number multicast frames transmitted"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause", + CTLFLAG_RD, &sc->stats.tx_frames_pause, 0, + "Number pause frames transmitted"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD, + &sc->stats.tx_frames_64b, 0, + "Number frames transmitted of size 64 bytes or less"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD, + &sc->stats.tx_frames_65to127b, 0, + "Number frames transmitted of size 65-127 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b", + CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0, + "Number frames transmitted of size 128-255 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b", + CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0, + "Number frames transmitted of size 256-511 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b", + CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0, + "Number frames transmitted of size 512-1023 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b", + CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0, + "Number frames transmitted of size 1024-1536 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs", + CTLFLAG_RD, &sc->stats.tx_under_runs, 0, + "Number transmit under-run events"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn", + CTLFLAG_RD, &sc->stats.tx_single_collisn, 0, + "Number single-collision transmit frames"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn", + CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0, + "Number multi-collision transmit frames"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn", + CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0, + "Number excessive collision transmit frames"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn", + CTLFLAG_RD, &sc->stats.tx_late_collisn, 0, + "Number late-collision transmit frames"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames", + CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0, + "Number deferred transmit frames"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs", + CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0, + "Number carrier sense errors on transmit"); + + SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD, + &sc->stats.rx_bytes, "Total bytes received"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD, + &sc->stats.rx_frames, 0, "Total frames received"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast", + CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0, + "Number broadcast frames received"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi", + CTLFLAG_RD, &sc->stats.rx_frames_multi, 0, + "Number multicast frames received"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause", + CTLFLAG_RD, &sc->stats.rx_frames_pause, 0, + "Number pause frames received"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b", + CTLFLAG_RD, &sc->stats.rx_frames_64b, 0, + "Number frames received of size 64 bytes or less"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b", + CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0, + "Number frames received of size 65-127 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b", + CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0, + "Number frames received of size 128-255 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b", + CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0, + "Number frames received of size 256-511 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b", + CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0, + "Number frames received of size 512-1023 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b", + CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0, + "Number frames received of size 1024-1536 bytes"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize", + CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0, + "Number undersize frames received"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize", + CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0, + "Number oversize frames received"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber", + CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0, + "Number jabber frames received"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs", + CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0, + "Number frames received with FCS errors"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs", + CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0, + "Number frames received with length errors"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs", + CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0, + "Number receive symbol errors"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs", + CTLFLAG_RD, &sc->stats.rx_align_errs, 0, + "Number receive alignment errors"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs", + CTLFLAG_RD, &sc->stats.rx_resource_errs, 0, + "Number frames received when no rx buffer available"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs", + CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0, + "Number frames received but not copied due to receive overrun"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs", + CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0, + "Number frames received with IP header checksum errors"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs", + CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0, + "Number frames received with TCP checksum errors"); + + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs", + CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0, + "Number frames received with UDP checksum errors"); +} + +static int +cgem_probe(device_t dev) +{ + +#ifndef __rtems__ + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) + return (ENXIO); +#endif /* __rtems__ */ + + device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface"); + return (0); +} + +static int +cgem_attach(device_t dev) +{ + struct cgem_softc *sc = device_get_softc(dev); + if_t ifp = NULL; +#ifndef __rtems__ + phandle_t node; + pcell_t cell; +#endif /* __rtems__ */ + int rid, err; + u_char eaddr[ETHER_ADDR_LEN]; + int hwquirks; + + sc->dev = dev; + CGEM_LOCK_INIT(sc); + + /* Key off of compatible string and set hardware-specific options. */ +#ifndef __rtems__ + hwquirks = ofw_bus_search_compatible(dev, compat_data)->ocd_data; + if ((hwquirks & HWQUIRK_NEEDNULLQS) != 0) + sc->neednullqs = 1; + if ((hwquirks & HWQUIRK_RXHANGWAR) != 0) + sc->rxhangwar = 1; + if ((hwquirks & HWQUIRK_TXCLK) != 0) { + if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->ref_clk) != 0) + device_printf(dev, + "could not retrieve reference clock.\n"); + else if (clk_enable(sc->ref_clk) != 0) + device_printf(dev, "could not enable clock.\n"); + } + if ((hwquirks & HWQUIRK_PCLK) != 0) { + if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->ref_clk) != 0) + device_printf(dev, + "could not retrieve reference clock.\n"); + else if (clk_enable(sc->ref_clk) != 0) + device_printf(dev, "could not enable clock.\n"); + } +#else + hwquirks = 0; + sc->neednullqs = 1; + sc->rxhangwar = 0; +#endif + +#ifndef __rtems__ + node = ofw_bus_get_node(dev); + sc->phy_contype = mii_fdt_get_contype(node); +#else + sc->phy_contype = MII_CONTYPE_SGMII; +#endif + + /* Get memory resource. */ + rid = 0; + sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE); + if (sc->mem_res == NULL) { + device_printf(dev, "could not allocate memory resources.\n"); + return (ENOMEM); + } + + /* Get IRQ resource. */ + rid = 0; + sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_ACTIVE); + if (sc->irq_res == NULL) { + device_printf(dev, "could not allocate interrupt resource.\n"); + cgem_detach(dev); + return (ENOMEM); + } + + /* Set up ifnet structure. */ + ifp = sc->ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + device_printf(dev, "could not allocate ifnet structure\n"); + cgem_detach(dev); + return (ENOMEM); + } + if_setsoftc(ifp, sc); + if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); + if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); + if_setinitfn(ifp, cgem_init); + if_setioctlfn(ifp, cgem_ioctl); + if_setstartfn(ifp, cgem_start); + // if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | + // IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0); + if_setsendqlen(ifp, CGEM_NUM_TX_DESCS); + if_setsendqready(ifp); + + /* Disable hardware checksumming by default. */ + if_sethwassist(ifp, 0); + if_setcapenable(ifp, if_getcapabilities(ifp) & + ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM)); + + sc->if_old_flags = if_getflags(ifp); + sc->rxbufs = DEFAULT_NUM_RX_BUFS; + + /* Reset hardware. */ + CGEM_LOCK(sc); + cgem_reset(sc); + CGEM_UNLOCK(sc); + + /* Attach phy to mii bus. */ + err = mii_attach(dev, &sc->miibus, ifp, + cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK, + MII_PHY_ANY, MII_OFFSET_ANY, 0); + if (err) + device_printf(dev, "warning: attaching PHYs failed\n"); + + /* Set up TX and RX descriptor area. */ + err = cgem_setup_descs(sc); + if (err) { + device_printf(dev, "could not set up dma mem for descs.\n"); + cgem_detach(dev); + return (ENOMEM); + } + + /* Get a MAC address. */ + cgem_get_mac(sc, eaddr); + + /* Start ticks. */ + callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); + + ether_ifattach(ifp, eaddr); + + err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE | + INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand); + if (err) { + device_printf(dev, "could not set interrupt handler.\n"); + ether_ifdetach(ifp); + cgem_detach(dev); + return (err); + } + + cgem_add_sysctls(dev); + + return (0); +} + +static int +cgem_detach(device_t dev) +{ + struct cgem_softc *sc = device_get_softc(dev); +#ifndef __rtems__ + int i; +#endif /* __rtems__ */ + + if (sc == NULL) + return (ENODEV); + + if (device_is_attached(dev)) { + CGEM_LOCK(sc); + cgem_stop(sc); + CGEM_UNLOCK(sc); + callout_drain(&sc->tick_ch); + if_setflagbits(sc->ifp, 0, IFF_UP); + ether_ifdetach(sc->ifp); + } + + if (sc->miibus != NULL) { + device_delete_child(dev, sc->miibus); + sc->miibus = NULL; + } + + /* Release resources. */ + if (sc->mem_res != NULL) { + bus_release_resource(dev, SYS_RES_MEMORY, + rman_get_rid(sc->mem_res), sc->mem_res); + sc->mem_res = NULL; + } + if (sc->irq_res != NULL) { + if (sc->intrhand) + bus_teardown_intr(dev, sc->irq_res, sc->intrhand); + bus_release_resource(dev, SYS_RES_IRQ, + rman_get_rid(sc->irq_res), sc->irq_res); + sc->irq_res = NULL; + } + + /* Release DMA resources. */ + if (sc->rxring != NULL) { + if (sc->rxring_physaddr != 0) { + bus_dmamap_unload(sc->desc_dma_tag, + sc->rxring_dma_map); + sc->rxring_physaddr = 0; + sc->txring_physaddr = 0; + sc->null_qs_physaddr = 0; + } + bus_dmamem_free(sc->desc_dma_tag, sc->rxring, + sc->rxring_dma_map); + sc->rxring = NULL; + sc->txring = NULL; + sc->null_qs = NULL; + +#ifndef __rtems__ + for (i = 0; i < CGEM_NUM_RX_DESCS; i++) + if (sc->rxring_m_dmamap[i] != NULL) { + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[i]); + sc->rxring_m_dmamap[i] = NULL; + } +#endif /* __rtems__ */ + } + if (sc->txring != NULL) { + if (sc->txring_physaddr != 0) { + bus_dmamap_unload(sc->desc_dma_tag, + sc->txring_dma_map); + sc->txring_physaddr = 0; + } + bus_dmamem_free(sc->desc_dma_tag, sc->txring, + sc->txring_dma_map); + sc->txring = NULL; +#ifndef __rtems__ + for (i = 0; i < CGEM_NUM_TX_DESCS; i++) + if (sc->txring_m_dmamap[i] != NULL) { + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->txring_m_dmamap[i]); + sc->txring_m_dmamap[i] = NULL; + } +#endif /* __rtems__ */ + } + if (sc->desc_dma_tag != NULL) { + bus_dma_tag_destroy(sc->desc_dma_tag); + sc->desc_dma_tag = NULL; + } + if (sc->mbuf_dma_tag != NULL) { + bus_dma_tag_destroy(sc->mbuf_dma_tag); + sc->mbuf_dma_tag = NULL; + } + +#ifndef __rtems__ + if (sc->ref_clk != NULL) { + clk_release(sc->ref_clk); + sc->ref_clk = NULL; + } +#endif + + bus_generic_detach(dev); + + CGEM_LOCK_DESTROY(sc); + + return (0); +} + +static device_method_t cgem_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, cgem_probe), + DEVMETHOD(device_attach, cgem_attach), + DEVMETHOD(device_detach, cgem_detach), + + /* Bus interface */ + DEVMETHOD(bus_child_detached, cgem_child_detached), + + /* MII interface */ + DEVMETHOD(miibus_readreg, cgem_miibus_readreg), + DEVMETHOD(miibus_writereg, cgem_miibus_writereg), + DEVMETHOD(miibus_statchg, cgem_miibus_statchg), + DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), + + DEVMETHOD_END +}; + +static driver_t cgem_driver = { + "cgem", + cgem_methods, + sizeof(struct cgem_softc), +}; + +#ifndef __rtems__ +DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL); +#else /* __rtems__ */ +DRIVER_MODULE(cgem, nexus, cgem_driver, cgem_devclass, NULL, NULL); +#endif /* __rtems__ */ +DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); +MODULE_DEPEND(cgem, miibus, 1, 1, 1); +MODULE_DEPEND(cgem, ether, 1, 1, 1); diff --git a/freebsd/sys/dev/cadence64/if_cgem_hw.h b/freebsd/sys/dev/cadence64/if_cgem_hw.h new file mode 100644 index 00000000..ac9fd3ec --- /dev/null +++ b/freebsd/sys/dev/cadence64/if_cgem_hw.h @@ -0,0 +1,447 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2012-2013 Thomas Skibo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Hardware and register defines for Cadence GEM Gigabit Ethernet + * controller such as the one used in Zynq-7000 SoC. + * + * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. + * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 + * and register definitions are in appendix B.18. + * + * Additional Reference: Zynq UltraScale+ Device Register Reference + * (UG1087 v1.7 Feb 8,2019): + * https://www.xilinx.com/html_docs/registers/ug1087/ug1087-zynq-ultrascale-registers.html + */ + +#ifndef _IF_CGEM_HW_H_ +#define _IF_CGEM_HW_H_ + +/* Cadence GEM hardware register definitions. */ +#define CGEM_NET_CTRL 0x000 /* Network Control */ +#define CGEM_NET_CTRL_FLUSH_DPRAM_PKT (1 << 18) +#define CGEM_NET_CTRL_TX_PFC_PRI_PAUSE_FRAME (1 << 17) +#define CGEM_NET_CTRL_EN_PFC_PRI_PAUSE_RX (1 << 16) +#define CGEM_NET_CTRL_STORE_RX_TSTAMP (1 << 15) +#define CGEM_NET_CTRL_TX_ZEROQ_PAUSE_FRAME (1 << 12) +#define CGEM_NET_CTRL_TX_PAUSE_FRAME (1 << 11) +#define CGEM_NET_CTRL_TX_HALT (1 << 10) +#define CGEM_NET_CTRL_START_TX (1 << 9) +#define CGEM_NET_CTRL_BACK_PRESSURE (1 << 8) +#define CGEM_NET_CTRL_WREN_STAT_REGS (1 << 7) +#define CGEM_NET_CTRL_INCR_STAT_REGS (1 << 6) +#define CGEM_NET_CTRL_CLR_STAT_REGS (1 << 5) +#define CGEM_NET_CTRL_MGMT_PORT_EN (1 << 4) +#define CGEM_NET_CTRL_TX_EN (1 << 3) +#define CGEM_NET_CTRL_RX_EN (1 << 2) +#define CGEM_NET_CTRL_LOOP_LOCAL (1 << 1) + +#define CGEM_NET_CFG 0x004 /* Network Configuration */ +#define CGEM_NET_CFG_UNIDIR_EN (1U << 31) +#define CGEM_NET_CFG_IGNORE_IPG_RX_ER (1 << 30) +#define CGEM_NET_CFG_RX_BAD_PREAMBLE (1 << 29) +#define CGEM_NET_CFG_IPG_STRETCH_EN (1 << 28) +#define CGEM_NET_CFG_SGMII_EN (1 << 27) +#define CGEM_NET_CFG_IGNORE_RX_FCS (1 << 26) +#define CGEM_NET_CFG_RX_HD_WHILE_TX (1 << 25) +#define CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN (1 << 24) +#define CGEM_NET_CFG_DIS_CP_PAUSE_FRAME (1 << 23) +#define CGEM_NET_CFG_DBUS_WIDTH_32 (0 << 21) +#define CGEM_NET_CFG_DBUS_WIDTH_64 (1 << 21) +#define CGEM_NET_CFG_DBUS_WIDTH_128 (2 << 21) +#define CGEM_NET_CFG_DBUS_WIDTH_MASK (3 << 21) +#define CGEM_NET_CFG_MDC_CLK_DIV_8 (0 << 18) +#define CGEM_NET_CFG_MDC_CLK_DIV_16 (1 << 18) +#define CGEM_NET_CFG_MDC_CLK_DIV_32 (2 << 18) +#define CGEM_NET_CFG_MDC_CLK_DIV_48 (3 << 18) +#define CGEM_NET_CFG_MDC_CLK_DIV_64 (4 << 18) +#define CGEM_NET_CFG_MDC_CLK_DIV_96 (5 << 18) +#define CGEM_NET_CFG_MDC_CLK_DIV_128 (6 << 18) +#define CGEM_NET_CFG_MDC_CLK_DIV_224 (7 << 18) +#define CGEM_NET_CFG_MDC_CLK_DIV_MASK (7 << 18) +#define CGEM_NET_CFG_FCS_REMOVE (1 << 17) +#define CGEM_NET_CFG_LEN_ERR_FRAME_DISC (1 << 16) +#define CGEM_NET_CFG_RX_BUF_OFFSET_SHFT 14 +#define CGEM_NET_CFG_RX_BUF_OFFSET_MASK (3 << 14) +#define CGEM_NET_CFG_RX_BUF_OFFSET(n) ((n) << 14) +#define CGEM_NET_CFG_PAUSE_EN (1 << 13) +#define CGEM_NET_CFG_RETRY_TEST (1 << 12) +#define CGEM_NET_CFG_PCS_SEL (1 << 11) +#define CGEM_NET_CFG_GIGE_EN (1 << 10) +#define CGEM_NET_CFG_EXT_ADDR_MATCH_EN (1 << 9) +#define CGEM_NET_CFG_1536RXEN (1 << 8) +#define CGEM_NET_CFG_UNI_HASH_EN (1 << 7) +#define CGEM_NET_CFG_MULTI_HASH_EN (1 << 6) +#define CGEM_NET_CFG_NO_BCAST (1 << 5) +#define CGEM_NET_CFG_COPY_ALL (1 << 4) +#define CGEM_NET_CFG_DISC_NON_VLAN (1 << 2) +#define CGEM_NET_CFG_FULL_DUPLEX (1 << 1) +#define CGEM_NET_CFG_SPEED100 (1 << 0) + +#define CGEM_NET_STAT 0x008 /* Network Status */ +#define CGEM_NET_STAT_PFC_PRI_PAUSE_NEG (1 << 6) +#define CGEM_NET_STAT_PCS_AUTONEG_PAUSE_TX_RES (1 << 5) +#define CGEM_NET_STAT_PCS_AUTONEG_PAUSE_RX_RES (1 << 4) +#define CGEM_NET_STAT_PCS_AUTONEG_DUP_RES (1 << 3) +#define CGEM_NET_STAT_PHY_MGMT_IDLE (1 << 2) +#define CGEM_NET_STAT_MDIO_IN_PIN_STATUS (1 << 1) +#define CGEM_NET_STAT_PCS_LINK_STATE (1 << 0) + +#define CGEM_USER_IO 0x00C /* User I/O */ + +#define CGEM_DMA_CFG 0x010 /* DMA Config */ +#define CGEM_DMA_CFG_ADDR_BUS_64 (1 << 30) +#define CGEM_DMA_CFG_DISC_WHEN_NO_AHB (1 << 24) +#define CGEM_DMA_CFG_RX_BUF_SIZE_SHIFT 16 +#define CGEM_DMA_CFG_RX_BUF_SIZE_MASK (0xff << 16) +#define CGEM_DMA_CFG_RX_BUF_SIZE(sz) ((((sz) + 63) / 64) << 16) +#define CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN (1 << 11) +#define CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL (1 << 10) +#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_1K (0 << 8) +#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_2K (1 << 8) +#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_4K (2 << 8) +#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K (3 << 8) +#define CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_MASK (3 << 8) +#define CGEM_DMA_CFG_AHB_ENDIAN_SWAP_PKT_EN (1 << 7) +#define CGEM_DMA_CFG_AHB_ENDIAN_SWAP_MGMT_EN (1 << 6) +#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_1 (1 << 0) +#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_4 (4 << 0) +#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_8 (8 << 0) +#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 (16 << 0) +#define CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_MASK (0x1f << 0) + +#define CGEM_TX_STAT 0x014 /* Transmit Status */ +#define CGEM_TX_STAT_HRESP_NOT_OK (1 << 8) +#define CGEM_TX_STAT_LATE_COLL (1 << 7) +#define CGEM_TX_STAT_UNDERRUN (1 << 6) +#define CGEM_TX_STAT_COMPLETE (1 << 5) +#define CGEM_TX_STAT_CORRUPT_AHB_ERR (1 << 4) +#define CGEM_TX_STAT_GO (1 << 3) +#define CGEM_TX_STAT_RETRY_LIMIT_EXC (1 << 2) +#define CGEM_TX_STAT_COLLISION (1 << 1) +#define CGEM_TX_STAT_USED_BIT_READ (1 << 0) +#define CGEM_TX_STAT_ALL 0x1ff + +#define CGEM_RX_QBAR 0x018 /* Receive Buf Q Base Addr */ +#define CGEM_TX_QBAR 0x01C /* Transmit Buf Q Base Addr */ + +#define CGEM_RX_STAT 0x020 /* Receive Status */ +#define CGEM_RX_STAT_HRESP_NOT_OK (1 << 3) +#define CGEM_RX_STAT_OVERRUN (1 << 2) +#define CGEM_RX_STAT_FRAME_RECD (1 << 1) +#define CGEM_RX_STAT_BUF_NOT_AVAIL (1 << 0) +#define CGEM_RX_STAT_ALL 0xf + +#define CGEM_INTR_STAT 0x024 /* Interrupt Status */ +#define CGEM_INTR_EN 0x028 /* Interrupt Enable */ +#define CGEM_INTR_DIS 0x02C /* Interrupt Disable */ +#define CGEM_INTR_MASK 0x030 /* Interrupt Mask */ +#define CGEM_INTR_TSU_SEC_INCR (1 << 26) +#define CGEM_INTR_PDELAY_RESP_TX (1 << 25) +#define CGEM_INTR_PDELAY_REQ_TX (1 << 24) +#define CGEM_INTR_PDELAY_RESP_RX (1 << 23) +#define CGEM_INTR_PDELAY_REQ_RX (1 << 22) +#define CGEM_INTR_SYNX_TX (1 << 21) +#define CGEM_INTR_DELAY_REQ_TX (1 << 20) +#define CGEM_INTR_SYNC_RX (1 << 19) +#define CGEM_INTR_DELAY_REQ_RX (1 << 18) +#define CGEM_INTR_PARTNER_PG_RX (1 << 17) +#define CGEM_INTR_AUTONEG_COMPL (1 << 16) +#define CGEM_INTR_EXT_INTR (1 << 15) +#define CGEM_INTR_PAUSE_TX (1 << 14) +#define CGEM_INTR_PAUSE_ZERO (1 << 13) +#define CGEM_INTR_PAUSE_NONZEROQ_RX (1 << 12) +#define CGEM_INTR_HRESP_NOT_OK (1 << 11) +#define CGEM_INTR_RX_OVERRUN (1 << 10) +#define CGEM_INTR_LINK_CHNG (1 << 9) +#define CGEM_INTR_TX_COMPLETE (1 << 7) +#define CGEM_INTR_TX_CORRUPT_AHB_ERR (1 << 6) +#define CGEM_INTR_RETRY_EX_LATE_COLLISION (1 << 5) +#define CGEM_INTR_TX_USED_READ (1 << 3) +#define CGEM_INTR_RX_USED_READ (1 << 2) +#define CGEM_INTR_RX_COMPLETE (1 << 1) +#define CGEM_INTR_MGMT_SENT (1 << 0) +#define CGEM_INTR_ALL 0x7FFFEFF + +#define CGEM_PHY_MAINT 0x034 /* PHY Maintenenace */ +#define CGEM_PHY_MAINT_CLAUSE_22 (1 << 30) +#define CGEM_PHY_MAINT_OP_SHIFT 28 +#define CGEM_PHY_MAINT_OP_MASK (3 << 28) +#define CGEM_PHY_MAINT_OP_READ (2 << 28) +#define CGEM_PHY_MAINT_OP_WRITE (1 << 28) +#define CGEM_PHY_MAINT_PHY_ADDR_SHIFT 23 +#define CGEM_PHY_MAINT_PHY_ADDR_MASK (0x1f << 23) +#define CGEM_PHY_MAINT_REG_ADDR_SHIFT 18 +#define CGEM_PHY_MAINT_REG_ADDR_MASK (0x1f << 18) +#define CGEM_PHY_MAINT_MUST_10 (2 << 16) +#define CGEM_PHY_MAINT_DATA_MASK 0xffff + +#define CGEM_RX_PAUSEQ 0x038 /* Received Pause Quantum */ +#define CGEM_TX_PAUSEQ 0x03C /* Transmit Puase Quantum */ + +#define CGEM_HSMAC 0x050 +#define CGEM_HSMACSPEED_MASK 0x7 + +#define CGEM_HASH_BOT 0x080 /* Hash Reg Bottom [31:0] */ +#define CGEM_HASH_TOP 0x084 /* Hash Reg Top [63:32] */ +#define CGEM_SPEC_ADDR_LOW(n) (0x088 + (n) * 8) +#define CGEM_SPEC_ADDR_HI(n) (0x08C + (n) * 8) + +#define CGEM_TYPE_ID_MATCH1 0x0A8 /* Type ID Match 1 */ +#define CGEM_TYPE_ID_MATCH_COPY_EN (1U << 31) +#define CGEM_TYPE_ID_MATCH2 0x0AC /* Type ID Match 2 */ +#define CGEM_TYPE_ID_MATCH3 0x0B0 /* Type ID Match 3 */ +#define CGEM_TYPE_ID_MATCH4 0x0B4 /* Type ID Match 4 */ + +#define CGEM_WAKE_ON_LAN 0x0B8 /* Wake on LAN Register */ +#define CGEM_WOL_MULTI_HASH_EN (1 << 19) +#define CGEM_WOL_SPEC_ADDR1_EN (1 << 18) +#define CGEM_WOL_ARP_REQ_EN (1 << 17) +#define CGEM_WOL_MAGIC_PKT_EN (1 << 16) +#define CGEM_WOL_ARP_REQ_IP_ADDR_MASK 0xffff + +#define CGEM_IPG_STRETCH /* IPG Stretch Register */ + +#define CGEM_STACKED_VLAN 0x0C0 /* Stacked VLAN Register */ +#define CGEM_STACKED_VLAN_EN (1U << 31) + +#define CGEM_TX_PFC_PAUSE 0x0C4 /* Transmit PFC Pause Reg */ +#define CGEM_TX_PFC_PAUSEQ_SEL_SHIFT 8 +#define CGEM_TX_PFC_PAUSEQ_SEL_MASK (0xff << 8) +#define CGEM_TX_PFC_PAUSE_PRI_EN_VEC_VAL_MASK 0xff + +#define CGEM_SPEC_ADDR1_MASK_BOT 0x0C8 /* Specific Addr Mask1 [31:0]*/ +#define CGEM_SPEC_ADDR1_MASK_TOP 0x0CC /* Specific Addr Mask1[47:32]*/ +#define CGEM_MODULE_ID 0x0FC /* Module ID */ +#define CGEM_OCTETS_TX_BOT 0x100 /* Octets xmitted [31:0] */ +#define CGEM_OCTETS_TX_TOP 0x104 /* Octets xmitted [47:32] */ +#define CGEM_FRAMES_TX 0x108 /* Frames xmitted */ +#define CGEM_BCAST_FRAMES_TX 0x10C /* Broadcast Frames xmitted */ +#define CGEM_MULTI_FRAMES_TX 0x110 /* Multicast Frames xmitted */ +#define CGEM_PAUSE_FRAMES_TX 0x114 /* Pause Frames xmitted */ +#define CGEM_FRAMES_64B_TX 0x118 /* 64-Byte Frames xmitted */ +#define CGEM_FRAMES_65_127B_TX 0x11C /* 65-127 Byte Frames xmitted*/ +#define CGEM_FRAMES_128_255B_TX 0x120 /* 128-255 Byte Frames xmit */ +#define CGEM_FRAMES_256_511B_TX 0x124 /* 256-511 Byte Frames xmit */ +#define CGEM_FRAMES_512_1023B_TX 0x128 /* 512-1023 Byte frames xmit */ +#define CGEM_FRAMES_1024_1518B_TX 0x12C /* 1024-1518 Byte frames xmit*/ +#define CGEM_TX_UNDERRUNS 0x134 /* Transmit Under-runs */ +#define CGEM_SINGLE_COLL_FRAMES 0x138 /* Single-Collision Frames */ +#define CGEM_MULTI_COLL_FRAMES 0x13C /* Multi-Collision Frames */ +#define CGEM_EXCESSIVE_COLL_FRAMES 0x140 /* Excessive Collision Frames*/ +#define CGEM_LATE_COLL 0x144 /* Late Collisions */ +#define CGEM_DEFERRED_TX_FRAMES 0x148 /* Deferred Transmit Frames */ +#define CGEM_CARRIER_SENSE_ERRS 0x14C /* Carrier Sense Errors */ +#define CGEM_OCTETS_RX_BOT 0x150 /* Octets Received [31:0] */ +#define CGEM_OCTETS_RX_TOP 0x154 /* Octets Received [47:32] */ +#define CGEM_FRAMES_RX 0x158 /* Frames Received */ +#define CGEM_BCAST_FRAMES_RX 0x15C /* Broadcast Frames Received */ +#define CGEM_MULTI_FRAMES_RX 0x160 /* Multicast Frames Received */ +#define CGEM_PAUSE_FRAMES_RX 0x164 /* Pause Frames Reeived */ +#define CGEM_FRAMES_64B_RX 0x168 /* 64-Byte Frames Received */ +#define CGEM_FRAMES_65_127B_RX 0x16C /* 65-127 Byte Frames Rx'd */ +#define CGEM_FRAMES_128_255B_RX 0x170 /* 128-255 Byte Frames Rx'd */ +#define CGEM_FRAMES_256_511B_RX 0x174 /* 256-511 Byte Frames Rx'd */ +#define CGEM_FRAMES_512_1023B_RX 0x178 /* 512-1023 Byte Frames Rx'd */ +#define CGEM_FRAMES_1024_1518B_RX 0x17C /* 1024-1518 Byte Frames Rx'd*/ +#define CGEM_UNDERSZ_RX 0x184 /* Undersize Frames Rx'd */ +#define CGEM_OVERSZ_RX 0x188 /* Oversize Frames Rx'd */ +#define CGEM_JABBERS_RX 0x18C /* Jabbers received */ +#define CGEM_FCS_ERRS 0x190 /* Frame Check Sequence Errs */ +#define CGEM_LENGTH_FIELD_ERRS 0x194 /* Length Firled Frame Errs */ +#define CGEM_RX_SYMBOL_ERRS 0x198 /* Receive Symbol Errs */ +#define CGEM_ALIGN_ERRS 0x19C /* Alignment Errors */ +#define CGEM_RX_RESOURCE_ERRS 0x1A0 /* Receive Resoure Errors */ +#define CGEM_RX_OVERRUN_ERRS 0x1A4 /* Receive Overrun Errors */ +#define CGEM_IP_HDR_CKSUM_ERRS 0x1A8 /* IP Hdr Checksum Errors */ +#define CGEM_TCP_CKSUM_ERRS 0x1AC /* TCP Checksum Errors */ +#define CGEM_UDP_CKSUM_ERRS 0x1B0 /* UDP Checksum Errors */ +#define CGEM_TIMER_STROBE_S 0x1C8 /* 1588 timer sync strobe s */ +#define CGEM_TIMER_STROBE_NS 0x1CC /* timer sync strobe ns */ +#define CGEM_TIMER_S 0x1D0 /* 1588 timer seconds */ +#define CGEM_TIMER_NS 0x1D4 /* 1588 timer ns */ +#define CGEM_ADJUST 0x1D8 /* 1588 timer adjust */ +#define CGEM_INCR 0x1DC /* 1588 timer increment */ +#define CGEM_PTP_TX_S 0x1E0 /* PTP Event Frame xmit secs */ +#define CGEM_PTP_TX_NS 0x1E4 /* PTP Event Frame xmit ns */ +#define CGEM_PTP_RX_S 0x1E8 /* PTP Event Frame rcv'd s */ +#define CGEM_PTP_RX_NS 0x1EC /* PTP Event Frame rcv'd ns */ +#define CGEM_PTP_PEER_TX_S 0x1F0 /* PTP Peer Event xmit s */ +#define CGEM_PTP_PEER_TX_NS 0x1F4 /* PTP Peer Event xmit ns */ +#define CGEM_PTP_PEER_RX_S 0x1F8 /* PTP Peer Event rcv'd s */ +#define CGEM_PTP_PEER_RX_NS 0x1FC /* PTP Peer Event rcv'd ns */ + +#define CGEM_DESIGN_CFG1 0x280 /* Design Configuration 1 */ +#define CGEM_DESIGN_CFG1_AXI_CACHE_WIDTH_MASK (0xfU << 28) +#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK (7 << 25) +#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_32 (1 << 25) +#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64 (2 << 25) +#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128 (4 << 25) +#define CGEM_DESIGN_CFG1_IRQ_READ_CLR (1 << 23) +#define CGEM_DESIGN_CFG1_NO_SNAPSHOT (1 << 22) +#define CGEM_DESIGN_CFG1_NO_STATS (1 << 21) +#define CGEM_DESIGN_CFG1_NO_SCAN_PINS (1 << 20) +#define CGEM_DESIGN_CFG1_USER_IN_WIDTH_MASK (0x1f << 15) +#define CGEM_DESIGN_CFG1_USER_OUT_WIDTH_MASK (0x1f << 10) +#define CGEM_DESIGN_CFG1_USER_IO (1 << 9) +#define CGEM_DESIGN_CFG1_APB_REV2 (1 << 8) +#define CGEM_DESIGN_CFG1_APB_REV1 (1 << 7) +#define CGEM_DESIGN_CFG1_EXT_FIFO_INTERFACE (1 << 6) +#define CGEM_DESIGN_CFG1_NO_INT_LOOPBACK (1 << 5) +#define CGEM_DESIGN_CFG1_INT_LOOPBACK (1 << 4) +#define CGEM_DESIGN_CFG1_TDC_50 (1 << 3) +#define CGEM_DESIGN_CFG1_RDC_50 (1 << 2) +#define CGEM_DESIGN_CFG1_SERDES (1 << 1) +#define CGEM_DESIGN_CFG1_NO_PCS (1 << 0) + +#define CGEM_DESIGN_CFG2 0x284 /* Design Configuration 2 */ +#define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_SHIFT 26 +#define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_MASK (0xf << 26) +#define CGEM_DESIGN_CFG2_RX_PBUF_ADDR_SHIFT 22 +#define CGEM_DESIGN_CFG2_RX_PBUF_ADDR_MASK (0xf << 22) +#define CGEM_DESIGN_CFG2_TX_PKT_BUF (1 << 21) +#define CGEM_DESIGN_CFG2_RX_PKT_BUF (1 << 20) +#define CGEM_DESIGN_CFG2_HPROT_VAL_SHIFT 16 +#define CGEM_DESIGN_CFG2_HPROT_VAL_MASK (0xf << 16) +#define CGEM_DESIGN_CFG2_JUMBO_MAX_LEN_MASK 0xffff + +#define CGEM_DESIGN_CFG3 0x288 /* Design Configuration 3 */ +#define CGEM_DESIGN_CFG3_RX_BASE2_FIFO_SZ_MASK (0xffffU << 16) +#define CGEM_DESIGN_CFG3_RX_BASE2_FIFO_SZ_SHIFT 16 +#define CGEM_DESIGN_CFG3_RX_FIFO_SIZE_MASK 0xffff + +#define CGEM_DESIGN_CFG4 0x28C /* Design Configuration 4 */ +#define CGEM_DESIGN_CFG4_TX_BASE2_FIFO_SZ_SHIFT 16 +#define CGEM_DESIGN_CFG4_TX_BASE2_FIFO_SZ_MASK (0xffffU << 16) +#define CGEM_DESIGN_CFG4_TX_FIFO_SIZE_MASK 0xffff + +#define CGEM_DESIGN_CFG5 0x290 /* Design Configuration 5 */ +#define CGEM_DESIGN_CFG5_TSU_CLK (1 << 28) +#define CGEM_DESIGN_CFG5_RX_BUF_LEN_DEF_SHIFT 20 +#define CGEM_DESIGN_CFG5_RX_BUF_LEN_DEF_MASK (0xff << 20) +#define CGEM_DESIGN_CFG5_TX_PBUF_SIZE_DEF (1 << 19) +#define CGEM_DESIGN_CFG5_RX_PBUF_SIZE_DEF_SHIFT 17 +#define CGEM_DESIGN_CFG5_RX_PBUF_SIZE_DEF_MASK (3 << 17) +#define CGEM_DESIGN_CFG5_ENDIAN_SWAP_DEF_SHIFT 15 +#define CGEM_DESIGN_CFG5_ENDIAN_SWAP_DEF_MASK (3 << 15) +#define CGEM_DESIGN_CFG5_MDC_CLOCK_DIV_SHIFT 12 +#define CGEM_DESIGN_CFG5_MDC_CLOCK_DIV_MASK (7 << 12) +#define CGEM_DESIGN_CFG5_DMA_BUS_WIDTH_SHIFT 10 +#define CGEM_DESIGN_CFG5_DMA_BUS_WIDTH_MASK (3 << 10) +#define CGEM_DESIGN_CFG5_PHY_IDENT (1 << 9) +#define CGEM_DESIGN_CFG5_TSU (1 << 8) +#define CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_SHIFT 4 +#define CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_MASK (0xf << 4) +#define CGEM_DESIGN_CFG5_RX_FIFO_CNT_WIDTH_MASK 0xf + +#define CGEM_DESIGN_CFG6 0x294 /* Design Configuration 6 */ +#define CGEM_DESIGN_CFG6_ADDR_64B (1 << 23) /* 64-bit addr cap */ +#define CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK 0xfffe +#define CGEM_DESIGN_CFG6_DMA_PRIO_Q(n) (1 << (n)) + +#define CGEM_TX_QN_BAR(n) (0x440 + ((n) - 1) * 4) +#define CGEM_RX_QN_BAR(n) (0x480 + ((n) - 1) * 4) + +#define CGEM_TX_QBAR_HI 0x4C8 +#define CGEM_RX_QBAR_HI 0x4D4 + +/* + * Transmit Descriptors: two or four 32-bit words: + * word0: address + * word1: length and control + * word2: address upper 32-bits (64-bit mode) + * word3: unused (64-bit mode) + */ + +struct cgem_tx_desc { + uint32_t addr; + uint32_t ctl; +#define CGEM_TXDESC_USED (1U << 31) /* done txmitting */ +#define CGEM_TXDESC_WRAP (1 << 30) /* end descr ring */ +#define CGEM_TXDESC_RETRY_ERR (1 << 29) +#define CGEM_TXDESC_AHB_ERR (1 << 27) +#define CGEM_TXDESC_LATE_COLL (1 << 26) +#define CGEM_TXDESC_CKSUM_GEN_STAT_MASK (7 << 20) +#define CGEM_TXDESC_CKSUM_GEN_STAT_VLAN_HDR_ERR (1 << 20) +#define CGEM_TXDESC_CKSUM_GEN_STAT_SNAP_HDR_ERR (2 << 20) +#define CGEM_TXDESC_CKSUM_GEN_STAT_IP_HDR_ERR (3 << 20) +#define CGEM_TXDESC_CKSUM_GEN_STAT_UNKNOWN_TYPE (4 << 20) +#define CGEM_TXDESC_CKSUM_GEN_STAT_UNSUPP_FRAG (5 << 20) +#define CGEM_TXDESC_CKSUM_GEN_STAT_NOT_TCPUDP (6 << 20) +#define CGEM_TXDESC_CKSUM_GEN_STAT_SHORT_PKT (7 << 20) +#define CGEM_TXDESC_NO_CRC_APPENDED (1 << 16) +#define CGEM_TXDESC_LAST_BUF (1 << 15) /* last in frame */ +#define CGEM_TXDESC_LENGTH_MASK 0x3fff +#ifdef CGEM64 + uint32_t addrhi; + uint32_t unused; +#endif +}; + +/* + * Receive Descriptors: two or four 32-bit words: + * word0: address | WRAP and OWN flags + * word1: length and control + * word2: address upper 32 bits (64-bit mode) + * word3: unused + */ + +struct cgem_rx_desc { + uint32_t addr; +#define CGEM_RXDESC_WRAP (1 << 1) /* goes in addr! */ +#define CGEM_RXDESC_OWN (1 << 0) /* buf filled */ + uint32_t ctl; +#define CGEM_RXDESC_BCAST (1U << 31)/* all 1's bcast */ +#define CGEM_RXDESC_MULTI_MATCH (1 << 30) /* mutlicast match */ +#define CGEM_RXDESC_UNICAST_MATCH (1 << 29) +#define CGEM_RXDESC_EXTERNAL_MATCH (1 << 28) /* ext addr match */ +#define CGEM_RXDESC_SPEC_MATCH_SHIFT 25 +#define CGEM_RXDESC_SPEC_MATCH_MASK (3 << 25) +#define CGEM_RXDESC_TYPE_ID_MATCH_SHIFT 22 +#define CGEM_RXDESC_TYPE_ID_MATCH_MASK (3 << 22) +#define CGEM_RXDESC_CKSUM_STAT_MASK (3 << 22) /* same as above */ +#define CGEM_RXDESC_CKSUM_STAT_NONE (0 << 22) +#define CGEM_RXDESC_CKSUM_STAT_IP_GOOD (1 << 22) +#define CGEM_RXDESC_CKSUM_STAT_TCP_GOOD (2 << 22) /* and ip good */ +#define CGEM_RXDESC_CKSUM_STAT_UDP_GOOD (3 << 22) /* and ip good */ +#define CGEM_RXDESC_VLAN_DETECTED (1 << 21) +#define CGEM_RXDESC_PRIO_DETECTED (1 << 20) +#define CGEM_RXDESC_VLAN_PRIO_SHIFT 17 +#define CGEM_RXDESC_VLAN_PRIO_MASK (7 << 17) +#define CGEM_RXDESC_CFI (1 << 16) +#define CGEM_RXDESC_EOF (1 << 15) /* end of frame */ +#define CGEM_RXDESC_SOF (1 << 14) /* start of frame */ +#define CGEM_RXDESC_BAD_FCS (1 << 13) +#define CGEM_RXDESC_LENGTH_MASK 0x1fff +#ifdef CGEM64 + uint32_t addrhi; + uint32_t unused; +#endif +}; + +#endif /* _IF_CGEM_HW_H_ */ diff --git a/freebsd/sys/dev/mii/mii_physubr.c b/freebsd/sys/dev/mii/mii_physubr.c index 9ba68190..2498b634 100644 --- a/freebsd/sys/dev/mii/mii_physubr.c +++ b/freebsd/sys/dev/mii/mii_physubr.c @@ -554,13 +554,19 @@ const struct mii_phydesc * mii_phy_match_gen(const struct mii_attach_args *ma, const struct mii_phydesc *mpd, size_t len) { - +#ifndef __rtems__ for (; mpd->mpd_name != NULL; mpd = (const struct mii_phydesc *)((const char *)mpd + len)) { if (MII_OUI(ma->mii_id1, ma->mii_id2) == mpd->mpd_oui && MII_MODEL(ma->mii_id2) == mpd->mpd_model) return (mpd); } +#else + for (; mpd->mpd_name != NULL; + mpd = (const struct mii_phydesc *)((const char *)mpd + len)) { + return (mpd); + } +#endif return (NULL); } diff --git a/freebsd/sys/dev/mmc/mmc.c b/freebsd/sys/dev/mmc/mmc.c index 5bc3bbf7..9e1d12b0 100644 --- a/freebsd/sys/dev/mmc/mmc.c +++ b/freebsd/sys/dev/mmc/mmc.c @@ -1956,9 +1956,14 @@ free_ivar: * is broken (in which case it also may impact the remainder * of the bus anyway, though). */ +#ifndef __rtems__ if ((newcard && child == NULL) || mmcbr_get_mode(sc->dev) == mode_sd) return; +#else + /* for RTEMS just probe one MMC device and exit */ + return; +#endif } } diff --git a/freebsd/sys/dev/pci/pci.c b/freebsd/sys/dev/pci/pci.c index 5402cb66..7b2db8a0 100644 --- a/freebsd/sys/dev/pci/pci.c +++ b/freebsd/sys/dev/pci/pci.c @@ -388,11 +388,19 @@ SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN, &pci_do_power_suspend, 1, "Transition from D0 -> D3 on suspend."); +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) static int pci_do_msi = 1; +#else +static int pci_do_msi = 0; +#endif SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1, "Enable support for MSI interrupts"); +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) static int pci_do_msix = 1; +#else +static int pci_do_msix = 0; +#endif SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1, "Enable support for MSI-X interrupts"); diff --git a/freebsd/sys/dev/pci/pci_host_generic.c b/freebsd/sys/dev/pci/pci_host_generic.c new file mode 100644 index 00000000..9895a8a1 --- /dev/null +++ b/freebsd/sys/dev/pci/pci_host_generic.c @@ -0,0 +1,495 @@ +#include + +/*- + * Copyright (c) 2015 Ruslan Bukin + * Copyright (c) 2014 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Semihalf under + * the sponsorship of the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* Generic ECAM PCIe driver */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +/* Assembling ECAM Configuration Address */ +#define PCIE_BUS_SHIFT 20 +#define PCIE_SLOT_SHIFT 15 +#define PCIE_FUNC_SHIFT 12 +#define PCIE_BUS_MASK 0xFF +#define PCIE_SLOT_MASK 0x1F +#define PCIE_FUNC_MASK 0x07 +#define PCIE_REG_MASK 0xFFF + +#define PCIE_ADDR_OFFSET(bus, slot, func, reg) \ + ((((bus) & PCIE_BUS_MASK) << PCIE_BUS_SHIFT) | \ + (((slot) & PCIE_SLOT_MASK) << PCIE_SLOT_SHIFT) | \ + (((func) & PCIE_FUNC_MASK) << PCIE_FUNC_SHIFT) | \ + ((reg) & PCIE_REG_MASK)) + +typedef void (*pci_host_generic_quirk_function)(device_t); + +struct pci_host_generic_quirk_entry { + int impl; + int part; + int var; + int rev; + pci_host_generic_quirk_function func; +}; + +struct pci_host_generic_block_entry { + int impl; + int part; + int var; + int rev; + int bus; + int slot; +}; + +/* Forward prototypes */ + +static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, + u_int func, u_int reg, int bytes); +static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, + u_int func, u_int reg, uint32_t val, int bytes); +static int generic_pcie_maxslots(device_t dev); +static int generic_pcie_read_ivar(device_t dev, device_t child, int index, + uintptr_t *result); +static int generic_pcie_write_ivar(device_t dev, device_t child, int index, + uintptr_t value); + +#if defined(__aarch64__) +static void pci_host_generic_apply_quirks(device_t); +static void thunderx2_ahci_bar_quirk(device_t); + +struct pci_host_generic_quirk_entry pci_host_generic_quirks[] = +{ + {CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0, 0, thunderx2_ahci_bar_quirk}, + {0, 0, 0, 0, NULL} +}; + +struct pci_host_generic_block_entry pci_host_generic_blocked[] = +{ + /* ThunderX2 AHCI on second socket */ + {CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0, 0, 0x80, 0x10}, + {0, 0, 0, 0, 0, 0} +}; +#endif + +int +pci_host_generic_core_attach(device_t dev) +{ + struct generic_pcie_core_softc *sc; + int error; + int rid; + + sc = device_get_softc(dev); + sc->dev = dev; + + /* Create the parent DMA tag to pass down the coherent flag */ + error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BUS_SPACE_MAXSIZE, /* maxsize */ + BUS_SPACE_UNRESTRICTED, /* nsegments */ + BUS_SPACE_MAXSIZE, /* maxsegsize */ + sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->dmat); + if (error != 0) + return (error); + + rid = 0; + sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); + if (sc->res == NULL) { + device_printf(dev, "could not map memory.\n"); + return (ENXIO); + } + + sc->bst = rman_get_bustag(sc->res); + sc->bsh = rman_get_bushandle(sc->res); + + sc->mem_rman.rm_type = RMAN_ARRAY; + sc->mem_rman.rm_descr = "PCIe Memory"; + sc->io_rman.rm_type = RMAN_ARRAY; + sc->io_rman.rm_descr = "PCIe IO window"; + + /* Initialize rman and allocate memory regions */ + error = rman_init(&sc->mem_rman); + if (error) { + device_printf(dev, "rman_init() failed. error = %d\n", error); + return (error); + } + + error = rman_init(&sc->io_rman); + if (error) { + device_printf(dev, "rman_init() failed. error = %d\n", error); + return (error); + } + +#if defined(__aarch64__) + pci_host_generic_apply_quirks(dev); +#endif + + return (0); +} + +#if defined(__aarch64__) +static void +pci_host_generic_apply_quirks(device_t dev) +{ + struct pci_host_generic_quirk_entry *quirk; + + quirk = pci_host_generic_quirks; + while (1) { + if (quirk->impl == 0) + break; + + if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, + quirk->impl, quirk->part, quirk->var, quirk->rev) && + quirk->func != NULL) + quirk->func(dev); + + quirk++; + } +} +#endif + +static uint32_t +generic_pcie_read_config(device_t dev, u_int bus, u_int slot, + u_int func, u_int reg, int bytes) +{ + struct generic_pcie_core_softc *sc; + bus_space_handle_t h; + bus_space_tag_t t; + uint64_t offset; + uint32_t data; +#if defined(__aarch64__) + struct pci_host_generic_block_entry *block; +#endif + + if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || + (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) + return (~0U); + +#if defined(__aarch64__) + block = pci_host_generic_blocked; + while (1) { + if (block->impl == 0) + break; + + if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, + block->impl, block->part, block->var, block->rev) && + block->bus == bus && block->slot == slot) + return (~0); + + block++; + } +#endif + + sc = device_get_softc(dev); + + offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); + t = sc->bst; + h = sc->bsh; + + switch (bytes) { + case 1: + data = bus_space_read_1(t, h, offset); + break; + case 2: + data = le16toh(bus_space_read_2(t, h, offset)); + break; + case 4: + data = le32toh(bus_space_read_4(t, h, offset)); + break; + default: + return (~0U); + } + + return (data); +} + +static void +generic_pcie_write_config(device_t dev, u_int bus, u_int slot, + u_int func, u_int reg, uint32_t val, int bytes) +{ + struct generic_pcie_core_softc *sc; + bus_space_handle_t h; + bus_space_tag_t t; + uint64_t offset; + + if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || + (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) + return; + + sc = device_get_softc(dev); + + offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); + + t = sc->bst; + h = sc->bsh; + + switch (bytes) { + case 1: + bus_space_write_1(t, h, offset, val); + break; + case 2: + bus_space_write_2(t, h, offset, htole16(val)); + break; + case 4: + bus_space_write_4(t, h, offset, htole32(val)); + break; + default: + return; + } +} + +static int +generic_pcie_maxslots(device_t dev) +{ + + return (31); /* max slots per bus acc. to standard */ +} + +static int +generic_pcie_read_ivar(device_t dev, device_t child, int index, + uintptr_t *result) +{ + struct generic_pcie_core_softc *sc; + int secondary_bus; + + sc = device_get_softc(dev); + + if (index == PCIB_IVAR_BUS) { + /* this pcib adds only pci bus 0 as child */ + secondary_bus = 0; + *result = secondary_bus; + return (0); + + } + + if (index == PCIB_IVAR_DOMAIN) { + *result = sc->ecam; + return (0); + } + + if (bootverbose) + device_printf(dev, "ERROR: Unknown index %d.\n", index); + return (ENOENT); +} + +static int +generic_pcie_write_ivar(device_t dev, device_t child, int index, + uintptr_t value) +{ + + return (ENOENT); +} + +static struct rman * +generic_pcie_rman(struct generic_pcie_core_softc *sc, int type) +{ + + switch (type) { + case SYS_RES_IOPORT: + return (&sc->io_rman); + case SYS_RES_MEMORY: + return (&sc->mem_rman); + default: + break; + } + + return (NULL); +} + +int +pci_host_generic_core_release_resource(device_t dev, device_t child, int type, + int rid, struct resource *res) +{ + struct generic_pcie_core_softc *sc; + struct rman *rm; + + sc = device_get_softc(dev); + +#if defined(NEW_PCIB) && defined(PCI_RES_BUS) + if (type == PCI_RES_BUS) { + return (pci_domain_release_bus(sc->ecam, child, rid, res)); + } +#endif + + rm = generic_pcie_rman(sc, type); + if (rm != NULL) { + KASSERT(rman_is_region_manager(res, rm), ("rman mismatch")); + rman_release_resource(res); + } + + return (bus_generic_release_resource(dev, child, type, rid, res)); +} + +struct resource * +pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, + int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) +{ + struct generic_pcie_core_softc *sc; + struct resource *res; + struct rman *rm; + + sc = device_get_softc(dev); + +#if defined(NEW_PCIB) && defined(PCI_RES_BUS) + if (type == PCI_RES_BUS) { + return (pci_domain_alloc_bus(sc->ecam, child, rid, start, end, + count, flags)); + } +#endif + + rm = generic_pcie_rman(sc, type); + if (rm == NULL) + return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, + type, rid, start, end, count, flags)); + + if (bootverbose) { + device_printf(dev, + "rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n", + start, end, count); + } + + res = rman_reserve_resource(rm, start, end, count, flags, child); + if (res == NULL) + goto fail; + + rman_set_rid(res, *rid); + + if (flags & RF_ACTIVE) + if (bus_activate_resource(child, type, *rid, res)) { + rman_release_resource(res); + goto fail; + } + + return (res); + +fail: + device_printf(dev, "%s FAIL: type=%d, rid=%d, " + "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", + __func__, type, *rid, start, end, count, flags); + + return (NULL); +} + +static int +generic_pcie_adjust_resource(device_t dev, device_t child, int type, + struct resource *res, rman_res_t start, rman_res_t end) +{ + struct generic_pcie_core_softc *sc; + struct rman *rm; + + sc = device_get_softc(dev); +#if defined(NEW_PCIB) && defined(PCI_RES_BUS) + if (type == PCI_RES_BUS) + return (pci_domain_adjust_bus(sc->ecam, child, res, start, + end)); +#endif + + rm = generic_pcie_rman(sc, type); + if (rm != NULL) + return (rman_adjust_resource(res, start, end)); + return (bus_generic_adjust_resource(dev, child, type, res, start, end)); +} + +static bus_dma_tag_t +generic_pcie_get_dma_tag(device_t dev, device_t child) +{ + struct generic_pcie_core_softc *sc; + + sc = device_get_softc(dev); + return (sc->dmat); +} + +static device_method_t generic_pcie_methods[] = { + DEVMETHOD(device_attach, pci_host_generic_core_attach), + DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), + DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), + DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), + DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), + DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), + DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), + DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), + + DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), + + /* pcib interface */ + DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), + DEVMETHOD(pcib_read_config, generic_pcie_read_config), + DEVMETHOD(pcib_write_config, generic_pcie_write_config), + + DEVMETHOD_END +}; + +DEFINE_CLASS_0(pcib, generic_pcie_core_driver, + generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); + +#if defined(__aarch64__) +static void thunderx2_ahci_bar_quirk(device_t dev) +{ + + /* + * XXX: + * On ThunderX2, AHCI BAR2 address is wrong. It needs to precisely + * match the one described in datasheet. Fixup it unconditionally. + */ + if (device_get_unit(dev) == 0) { + device_printf(dev, "running AHCI BAR fixup\n"); + PCIB_WRITE_CONFIG(dev, 0, 16, 0, 0x18, 0x01440000, 4); + PCIB_WRITE_CONFIG(dev, 0, 16, 0, 0x1c, 0x40, 4); + PCIB_WRITE_CONFIG(dev, 0, 16, 1, 0x18, 0x01450000, 4); + PCIB_WRITE_CONFIG(dev, 0, 16, 1, 0x1c, 0x40, 4); + } +} +#endif diff --git a/freebsd/sys/dev/pci/pci_host_generic.h b/freebsd/sys/dev/pci/pci_host_generic.h new file mode 100644 index 00000000..4afec262 --- /dev/null +++ b/freebsd/sys/dev/pci/pci_host_generic.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2015 Ruslan Bukin + * Copyright (c) 2015 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Semihalf. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * + * $FreeBSD$ + * + */ + +#ifndef __PCI_HOST_GENERIC_H_ +#define __PCI_HOST_GENERIC_H_ + +#include + +#define MAX_RANGES_TUPLES 16 +#define MIN_RANGES_TUPLES 2 + +struct pcie_range { + uint64_t pci_base; + uint64_t phys_base; + uint64_t size; + uint64_t flags; +#define FLAG_IO (1 << 0) +#define FLAG_MEM (1 << 1) +}; + +struct generic_pcie_core_softc { + struct pcie_range ranges[MAX_RANGES_TUPLES]; + int nranges; + int coherent; + struct rman mem_rman; + struct rman io_rman; + struct resource *res; + struct resource *res1; + int ecam; + bus_space_tag_t bst; + bus_space_handle_t bsh; + device_t dev; + bus_space_handle_t ioh; + bus_dma_tag_t dmat; +}; + +DECLARE_CLASS(generic_pcie_core_driver); + +int pci_host_generic_core_attach(device_t); +struct resource *pci_host_generic_core_alloc_resource(device_t, device_t, int, + int *, rman_res_t, rman_res_t, rman_res_t, u_int); +int pci_host_generic_core_release_resource(device_t, device_t, int, int, + struct resource *); + +#endif /* __PCI_HOST_GENERIC_H_ */ diff --git a/freebsd/sys/dev/pci/pci_host_generic_fdt.c b/freebsd/sys/dev/pci/pci_host_generic_fdt.c new file mode 100644 index 00000000..d1330615 --- /dev/null +++ b/freebsd/sys/dev/pci/pci_host_generic_fdt.c @@ -0,0 +1,669 @@ +#include + +/*- + * Copyright (c) 2015 Ruslan Bukin + * Copyright (c) 2014,2016 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Andrew Turner under + * the sponsorship of the FreeBSD Foundation. + * + * This software was developed by Semihalf under + * the sponsorship of the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* Generic ECAM PCIe driver FDT attachment */ + +#include +__FBSDID("$FreeBSD$"); + +#include + +#include +#include +#include +#include +#include +#include +#include + +#if defined(INTRNG) +#include +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#ifdef __rtems__ +#include +#include +#endif + +#define PCI_IO_WINDOW_OFFSET 0x1000 + +#define SPACE_CODE_SHIFT 24 +#define SPACE_CODE_MASK 0x3 +#define SPACE_CODE_IO_SPACE 0x1 +#define PROPS_CELL_SIZE 1 +#define PCI_ADDR_CELL_SIZE 2 + +/* OFW bus interface */ +struct generic_pcie_ofw_devinfo { + struct ofw_bus_devinfo di_dinfo; + struct resource_list di_rl; +}; + +/* Forward prototypes */ + +static int generic_pcie_fdt_probe(device_t dev); +static int parse_pci_mem_ranges(device_t, struct generic_pcie_core_softc *); +static int generic_pcie_fdt_release_resource(device_t dev, device_t child, + int type, int rid, struct resource *res); +static int generic_pcie_ofw_bus_attach(device_t); +static const struct ofw_bus_devinfo *generic_pcie_ofw_get_devinfo(device_t, + device_t); + +static __inline void +get_addr_size_cells(phandle_t node, pcell_t *addr_cells, pcell_t *size_cells) +{ + + *addr_cells = 2; + /* Find address cells if present */ + OF_getencprop(node, "#address-cells", addr_cells, sizeof(*addr_cells)); + + *size_cells = 2; + /* Find size cells if present */ + OF_getencprop(node, "#size-cells", size_cells, sizeof(*size_cells)); +} + +static int +generic_pcie_fdt_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (ofw_bus_is_compatible(dev, "pci-host-ecam-generic")) { + device_set_desc(dev, "Generic PCI host controller"); + return (BUS_PROBE_GENERIC); + } + if (ofw_bus_is_compatible(dev, "arm,gem5_pcie")) { + device_set_desc(dev, "GEM5 PCIe host controller"); + return (BUS_PROBE_DEFAULT); + } + + return (ENXIO); +} + +int +pci_host_generic_attach(device_t dev) +{ + struct generic_pcie_fdt_softc *sc; + uint64_t phys_base; + uint64_t pci_base; + uint64_t size; + phandle_t node; + int error; + int tuple; + + sc = device_get_softc(dev); + + /* Retrieve 'ranges' property from FDT */ + if (bootverbose) + device_printf(dev, "parsing FDT for ECAM%d:\n", sc->base.ecam); + if (parse_pci_mem_ranges(dev, &sc->base)) + return (ENXIO); + + /* Attach OFW bus */ + if (generic_pcie_ofw_bus_attach(dev) != 0) + return (ENXIO); + + node = ofw_bus_get_node(dev); + if (sc->base.coherent == 0) { + sc->base.coherent = OF_hasprop(node, "dma-coherent"); + } + if (bootverbose) + device_printf(dev, "Bus is%s cache-coherent\n", + sc->base.coherent ? "" : " not"); + + error = pci_host_generic_core_attach(dev); + if (error != 0) + return (error); + + for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { + phys_base = sc->base.ranges[tuple].phys_base; + pci_base = sc->base.ranges[tuple].pci_base; + size = sc->base.ranges[tuple].size; + if (phys_base == 0 || size == 0) + continue; /* empty range element */ + if (sc->base.ranges[tuple].flags & FLAG_MEM) { + error = rman_manage_region(&sc->base.mem_rman, + phys_base, phys_base + size - 1); + } else if (sc->base.ranges[tuple].flags & FLAG_IO) { + error = rman_manage_region(&sc->base.io_rman, + pci_base + PCI_IO_WINDOW_OFFSET, + pci_base + PCI_IO_WINDOW_OFFSET + size - 1); + } else + continue; + if (error) { + device_printf(dev, "rman_manage_region() failed." + "error = %d\n", error); + rman_fini(&sc->base.mem_rman); + return (error); + } + } + + ofw_bus_setup_iinfo(node, &sc->pci_iinfo, sizeof(cell_t)); + + device_add_child(dev, "pci", -1); + return (bus_generic_attach(dev)); +} + +static int +parse_pci_mem_ranges(device_t dev, struct generic_pcie_core_softc *sc) +{ + pcell_t pci_addr_cells, parent_addr_cells; + pcell_t attributes, size_cells; + cell_t *base_ranges; + int nbase_ranges; + phandle_t node; + int i, j, k; + int tuple; + + node = ofw_bus_get_node(dev); + + OF_getencprop(node, "#address-cells", &pci_addr_cells, + sizeof(pci_addr_cells)); + OF_getencprop(node, "#size-cells", &size_cells, + sizeof(size_cells)); + OF_getencprop(OF_parent(node), "#address-cells", &parent_addr_cells, + sizeof(parent_addr_cells)); + + if (parent_addr_cells > 2 || pci_addr_cells != 3 || size_cells > 2) { + device_printf(dev, + "Unexpected number of address or size cells in FDT\n"); + return (ENXIO); + } + + nbase_ranges = OF_getproplen(node, "ranges"); + sc->nranges = nbase_ranges / sizeof(cell_t) / + (parent_addr_cells + pci_addr_cells + size_cells); + base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); + OF_getencprop(node, "ranges", base_ranges, nbase_ranges); + + for (i = 0, j = 0; i < sc->nranges; i++) { + attributes = (base_ranges[j++] >> SPACE_CODE_SHIFT) & \ + SPACE_CODE_MASK; + if (attributes == SPACE_CODE_IO_SPACE) { + sc->ranges[i].flags |= FLAG_IO; + } else { + sc->ranges[i].flags |= FLAG_MEM; + } + + sc->ranges[i].pci_base = 0; + for (k = 0; k < (pci_addr_cells - 1); k++) { + sc->ranges[i].pci_base <<= 32; + sc->ranges[i].pci_base |= base_ranges[j++]; + } + sc->ranges[i].phys_base = 0; + for (k = 0; k < parent_addr_cells; k++) { + sc->ranges[i].phys_base <<= 32; + sc->ranges[i].phys_base |= base_ranges[j++]; + } + sc->ranges[i].size = 0; + for (k = 0; k < size_cells; k++) { + sc->ranges[i].size <<= 32; + sc->ranges[i].size |= base_ranges[j++]; + } + } + + for (; i < MAX_RANGES_TUPLES; i++) { + /* zero-fill remaining tuples to mark empty elements in array */ + sc->ranges[i].pci_base = 0; + sc->ranges[i].phys_base = 0; + sc->ranges[i].size = 0; + } + + if (bootverbose) { + for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { + device_printf(dev, + "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx\n", + sc->ranges[tuple].pci_base, + sc->ranges[tuple].phys_base, + sc->ranges[tuple].size); + } + } + + free(base_ranges, M_DEVBUF); + return (0); +} + +static int +generic_pcie_fdt_route_interrupt(device_t bus, device_t dev, int pin) +{ +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) + struct generic_pcie_fdt_softc *sc; + struct ofw_pci_register reg; + uint32_t pintr, mintr[4]; + phandle_t iparent; + int intrcells; + + sc = device_get_softc(bus); + pintr = pin; + + bzero(®, sizeof(reg)); + reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | + (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | + (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); + + intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), + &sc->pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), + mintr, sizeof(mintr), &iparent); + if (intrcells) { + pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); + return (pintr); + } + + device_printf(bus, "could not route pin %d for device %d.%d\n", + pin, pci_get_slot(dev), pci_get_function(dev)); + return (PCI_INVALID_IRQ); +#else + return (BSP_PHYTIUM_INTA_IRQ); +#endif +} + +static int +generic_pcie_fdt_release_resource(device_t dev, device_t child, int type, + int rid, struct resource *res) +{ + +#if defined(NEW_PCIB) && defined(PCI_RES_BUS) + if (type == PCI_RES_BUS) { + return (pci_host_generic_core_release_resource(dev, child, type, + rid, res)); + } +#endif + + /* For PCIe devices that do not have FDT nodes, use PCIB method */ + if ((int)ofw_bus_get_node(child) <= 0) { + return (pci_host_generic_core_release_resource(dev, child, type, + rid, res)); + } + + /* For other devices use OFW method */ + return (bus_generic_release_resource(dev, child, type, rid, res)); +} + +struct resource * +pci_host_generic_alloc_resource(device_t dev, device_t child, int type, + int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) +{ + struct generic_pcie_fdt_softc *sc; + struct generic_pcie_ofw_devinfo *di; + struct resource_list_entry *rle; + int i; + +#if defined(NEW_PCIB) && defined(PCI_RES_BUS) + if (type == PCI_RES_BUS) { + return (pci_host_generic_core_alloc_resource(dev, child, type, rid, + start, end, count, flags)); + } +#endif + + /* For PCIe devices that do not have FDT nodes, use PCIB method */ + if ((int)ofw_bus_get_node(child) <= 0) + return (pci_host_generic_core_alloc_resource(dev, child, type, + rid, start, end, count, flags)); + + /* For other devices use OFW method */ + sc = device_get_softc(dev); + + if (RMAN_IS_DEFAULT_RANGE(start, end)) { + if ((di = device_get_ivars(child)) == NULL) + return (NULL); + if (type == SYS_RES_IOPORT) + type = SYS_RES_MEMORY; + + /* Find defaults for this rid */ + rle = resource_list_find(&di->di_rl, type, *rid); + if (rle == NULL) + return (NULL); + + start = rle->start; + end = rle->end; + count = rle->count; + } + + if (type == SYS_RES_MEMORY) { + /* Remap through ranges property */ + for (i = 0; i < MAX_RANGES_TUPLES; i++) { + if (start >= sc->base.ranges[i].phys_base && + end < (sc->base.ranges[i].pci_base + + sc->base.ranges[i].size)) { + start -= sc->base.ranges[i].phys_base; + start += sc->base.ranges[i].pci_base; + end -= sc->base.ranges[i].phys_base; + end += sc->base.ranges[i].pci_base; + break; + } + } + + if (i == MAX_RANGES_TUPLES) { + device_printf(dev, "Could not map resource " + "%#jx-%#jx\n", start, end); + return (NULL); + } + } + + return (bus_generic_alloc_resource(dev, child, type, rid, start, + end, count, flags)); +} + +static int +generic_pcie_fdt_activate_resource(device_t dev, device_t child, int type, + int rid, struct resource *r) +{ + struct generic_pcie_fdt_softc *sc; + uint64_t phys_base; + uint64_t pci_base; + uint64_t size; + int found; + int res; + int i; + + sc = device_get_softc(dev); + + if ((res = rman_activate_resource(r)) != 0) + return (res); + + switch(type) { + case SYS_RES_IOPORT: + found = 0; + for (i = 0; i < MAX_RANGES_TUPLES; i++) { + pci_base = sc->base.ranges[i].pci_base; + phys_base = sc->base.ranges[i].phys_base; + size = sc->base.ranges[i].size; + + if ((rid > pci_base) && (rid < (pci_base + size))) { + found = 1; + break; + } + } + if (found) { + rman_set_start(r, rman_get_start(r) + phys_base); + rman_set_end(r, rman_get_end(r) + phys_base); + res = BUS_ACTIVATE_RESOURCE(device_get_parent(dev), + child, type, rid, r); + } else { + device_printf(dev, + "Failed to activate IOPORT resource\n"); + res = 0; + } + break; + case SYS_RES_MEMORY: + res = BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, + type, rid, r); + break; + default: + break; + } + + return (res); +} + +static int +generic_pcie_fdt_deactivate_resource(device_t dev, device_t child, int type, + int rid, struct resource *r) +{ + int res; + + if ((res = rman_deactivate_resource(r)) != 0) + return (res); + + switch(type) { + case SYS_RES_IOPORT: + case SYS_RES_MEMORY: + res = BUS_DEACTIVATE_RESOURCE(device_get_parent(dev), child, + type, rid, r); + break; + default: + break; + } + + return (res); +} + +static int +generic_pcie_fdt_alloc_msi(device_t pci, device_t child, int count, + int maxcount, int *irqs) +{ +#if defined(INTRNG) + phandle_t msi_parent; + int err; + + err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), + &msi_parent, NULL); + if (err != 0) + return (err); + return (intr_alloc_msi(pci, child, msi_parent, count, maxcount, + irqs)); +#else + return (ENXIO); +#endif +} + +static int +generic_pcie_fdt_release_msi(device_t pci, device_t child, int count, int *irqs) +{ +#if defined(INTRNG) + phandle_t msi_parent; + int err; + + err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), + &msi_parent, NULL); + if (err != 0) + return (err); + return (intr_release_msi(pci, child, msi_parent, count, irqs)); +#else + return (ENXIO); +#endif +} + +static int +generic_pcie_fdt_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, + uint32_t *data) +{ +#if defined(INTRNG) + phandle_t msi_parent; + int err; + + err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), + &msi_parent, NULL); + if (err != 0) + return (err); + return (intr_map_msi(pci, child, msi_parent, irq, addr, data)); +#else + return (ENXIO); +#endif +} + +static int +generic_pcie_fdt_alloc_msix(device_t pci, device_t child, int *irq) +{ +#if defined(INTRNG) + phandle_t msi_parent; + int err; + + err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), + &msi_parent, NULL); + if (err != 0) + return (err); + return (intr_alloc_msix(pci, child, msi_parent, irq)); +#else + return (ENXIO); +#endif +} + +static int +generic_pcie_fdt_release_msix(device_t pci, device_t child, int irq) +{ +#if defined(INTRNG) + phandle_t msi_parent; + int err; + + err = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), + &msi_parent, NULL); + if (err != 0) + return (err); + return (intr_release_msix(pci, child, msi_parent, irq)); +#else + return (ENXIO); +#endif +} + +int +generic_pcie_get_id(device_t pci, device_t child, enum pci_id_type type, + uintptr_t *id) +{ + phandle_t node; + int err; + uint32_t rid; + uint16_t pci_rid; + + if (type != PCI_ID_MSI) + return (pcib_get_id(pci, child, type, id)); + + node = ofw_bus_get_node(pci); + pci_rid = pci_get_rid(child); + + err = ofw_bus_msimap(node, pci_rid, NULL, &rid); + if (err != 0) + return (err); + *id = rid; + + return (0); +} + +static const struct ofw_bus_devinfo * +generic_pcie_ofw_get_devinfo(device_t bus __unused, device_t child) +{ + struct generic_pcie_ofw_devinfo *di; + + di = device_get_ivars(child); + return (&di->di_dinfo); +} + +/* Helper functions */ + +static int +generic_pcie_ofw_bus_attach(device_t dev) +{ + struct generic_pcie_ofw_devinfo *di; + device_t child; + phandle_t parent, node; + pcell_t addr_cells, size_cells; + + parent = ofw_bus_get_node(dev); + if (parent > 0) { + get_addr_size_cells(parent, &addr_cells, &size_cells); + /* Iterate through all bus subordinates */ + for (node = OF_child(parent); node > 0; node = OF_peer(node)) { + + /* Allocate and populate devinfo. */ + di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO); + if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) { + free(di, M_DEVBUF); + continue; + } + + /* Initialize and populate resource list. */ + resource_list_init(&di->di_rl); + ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells, + &di->di_rl); + ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL); + + /* Add newbus device for this FDT node */ + child = device_add_child(dev, NULL, -1); + if (child == NULL) { + resource_list_free(&di->di_rl); + ofw_bus_gen_destroy_devinfo(&di->di_dinfo); + free(di, M_DEVBUF); + continue; + } + + device_set_ivars(child, di); + } + } + + return (0); +} + +static device_method_t generic_pcie_fdt_methods[] = { + DEVMETHOD(device_probe, generic_pcie_fdt_probe), + DEVMETHOD(device_attach, pci_host_generic_attach), + DEVMETHOD(bus_alloc_resource, pci_host_generic_alloc_resource), + DEVMETHOD(bus_release_resource, generic_pcie_fdt_release_resource), + DEVMETHOD(bus_activate_resource, generic_pcie_fdt_activate_resource), + DEVMETHOD(bus_deactivate_resource,generic_pcie_fdt_deactivate_resource), + + /* pcib interface */ + DEVMETHOD(pcib_route_interrupt, generic_pcie_fdt_route_interrupt), + DEVMETHOD(pcib_alloc_msi, generic_pcie_fdt_alloc_msi), + DEVMETHOD(pcib_release_msi, generic_pcie_fdt_release_msi), + DEVMETHOD(pcib_alloc_msix, generic_pcie_fdt_alloc_msix), + DEVMETHOD(pcib_release_msix, generic_pcie_fdt_release_msix), + DEVMETHOD(pcib_map_msi, generic_pcie_fdt_map_msi), + DEVMETHOD(pcib_get_id, generic_pcie_get_id), + DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), + + /* ofw_bus interface */ + DEVMETHOD(ofw_bus_get_devinfo, generic_pcie_ofw_get_devinfo), + DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), + DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), + DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), + DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), + DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), + + DEVMETHOD_END +}; + +DEFINE_CLASS_1(pcib, generic_pcie_fdt_driver, generic_pcie_fdt_methods, + sizeof(struct generic_pcie_fdt_softc), generic_pcie_core_driver); + +static devclass_t generic_pcie_fdt_devclass; + +DRIVER_MODULE(pcib, simplebus, generic_pcie_fdt_driver, + generic_pcie_fdt_devclass, 0, 0); +DRIVER_MODULE(pcib, ofwbus, generic_pcie_fdt_driver, generic_pcie_fdt_devclass, + 0, 0); diff --git a/freebsd/sys/dev/pci/pci_host_generic_fdt.h b/freebsd/sys/dev/pci/pci_host_generic_fdt.h new file mode 100644 index 00000000..fba3ddcd --- /dev/null +++ b/freebsd/sys/dev/pci/pci_host_generic_fdt.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2015 Ruslan Bukin + * Copyright (c) 2015 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Semihalf. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * + * $FreeBSD$ + * + */ + +#ifndef __PCI_HOST_GENERIC_FDT_H_ +#define __PCI_HOST_GENERIC_FDT_H_ + +struct generic_pcie_fdt_softc { + struct generic_pcie_core_softc base; + struct ofw_bus_iinfo pci_iinfo; +}; + +DECLARE_CLASS(generic_pcie_fdt_driver); + +struct resource *pci_host_generic_alloc_resource(device_t, + device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); +int pci_host_generic_attach(device_t); +int generic_pcie_get_id(device_t, device_t, enum pci_id_type, uintptr_t *); + +#endif /* __PCI_HOST_GENERIC_FDT_H_ */ diff --git a/freebsd/sys/dev/phytium/phytium_sdif.c b/freebsd/sys/dev/phytium/phytium_sdif.c new file mode 100644 index 00000000..2b47af0b --- /dev/null +++ b/freebsd/sys/dev/phytium/phytium_sdif.c @@ -0,0 +1,936 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (C) 2024 Phytium Technology Co., Ltd. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Modify History: + * Ver   Who        Date         Changes + * ----- ------     --------    -------------------------------------- + * 1.0 zhugengyu 2024/04/13 init commit + */ + +#include +#include +#include + +#undef max +#undef min + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#include +#include + +#include + +#define SDIF_HC_DEVSTR "Phytium SDIF Host" + +#define SDIF_BLK_SIZE 512U +#define SDIF_MAX_BLK 1024U +#define SDIF_PAGE_SIZE 4096 +#define SDIF_TIME_OUT 10000U /* 10s */ + +/* using polling instead of interrupt */ +/* #define SDIF_POLLING */ + +struct phytium_sdif_softc { + device_t dev; + rtems_id task_id; + struct mtx sc_mtx; + struct mtx bus_mtx; + struct resource *io_res; + struct resource *irq_res; + bus_space_handle_t bushandle; + void *intrhandle; + int bus_busy; + + uint32_t card_clock; + uint32_t power_mode; + uint32_t bus_width; + struct mmc_host host; + + /* FSdif */ + uint32_t id; + FSdif hc; + FSdifConfig hc_cfg; + FSdifCmdData cmd_pkg; + FSdifData dat_pkg; + FSdifIDmaDesc *dma_desc; + uintptr_t dma_desc_phy; + uint32_t desc_num; + void *dma_buf; + uintptr_t dma_buf_phy; + + volatile boolean err_occur; + volatile boolean cmd_done; + volatile boolean data_done; +}; + +static void +PHYTIUM_MMC_LOCK(struct phytium_sdif_softc *sc) +{ + mtx_lock(&sc->sc_mtx); + sc->task_id = rtems_task_self(); +} + +#define PHYTIUM_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) +#define PHYTIUM_MMC_LOCK_INIT(_sc) \ + mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ + "phytium_mmc", MTX_DEF) + +static void phytium_sdif_relax(void) +{ + rtems_interval delay_ticks = RTEMS_MICROSECONDS_TO_TICKS(10); + rtems_interval start_time = rtems_clock_get_ticks_since_boot(); + rtems_interval curr_time; + + do { + curr_time = rtems_clock_get_ticks_since_boot(); + } while ((curr_time - start_time) < delay_ticks); +} + +#ifndef SDIF_POLLING +static void phyium_sdif_intr(void *arg) +{ + struct phytium_sdif_softc *sc = (struct phytium_sdif_softc *) arg; + rtems_status_code rs; + + FSdifInterruptHandler(0, &(sc->hc)); + + rs = rtems_event_transient_send(sc->task_id); + BSD_ASSERT(rs == RTEMS_SUCCESSFUL); +} +#endif + +static struct ofw_compat_data compat_data[] = { + {"phytium,sdif", 1}, + {NULL, 0}, +}; + + +static int phytium_sdif_probe(device_t dev) +{ + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) { + return (ENXIO); + } + + device_set_desc(dev, SDIF_HC_DEVSTR); + return (0); +} + +#ifndef SDIF_POLLING +static void phytium_sdif_card_detected(FSdif *const instance_p, void *args, u32 status, u32 dmac_status) +{ + struct phytium_sdif_softc *sc = (struct phytium_sdif_softc *)args; + + device_printf(sc->dev, "Card link changed !!!\n"); +} + +static void phytium_sdif_command_done(FSdif *const instance_p, void *args, u32 status, u32 dmac_status) +{ + struct phytium_sdif_softc *sc = (struct phytium_sdif_softc *)args; + + sc->cmd_done = true; +} + +static void phytium_sdif_data_done(FSdif *const instance_p, void *args, u32 status, u32 dmac_status) +{ + struct phytium_sdif_softc *sc = (struct phytium_sdif_softc *)args; + device_t dev = sc->dev; + uint32_t check_status = status & (FSDIF_INT_DTO_BIT | FSDIF_INT_RCRC_BIT | + FSDIF_INT_DCRC_BIT | FSDIF_INT_RE_BIT | + FSDIF_INT_DRTO_BIT | FSDIF_INT_EBE_BIT | + FSDIF_INT_SBE_BCI_BIT | FSDIF_INT_RTO_BIT); + uint32_t check_dmac = dmac_status & (FSDIF_DMAC_STATUS_AIS | FSDIF_DMAC_STATUS_DU); + + if (NULL == sc->cmd_pkg.data_p) + { + sc->data_done = true; + } + else if (check_status | check_dmac) + { + if (check_status & FSDIF_INT_DTO_BIT) + { + sc->data_done = true; + } + else + { + device_printf(dev, "Xfer data error, status: 0x%x, dmac status: 0x%x\n", + check_status, check_dmac); + } + } +} + +static void phytium_sdif_error_occurred(FSdif *const instance_p, void *args, u32 status, u32 dmac_status) +{ + struct phytium_sdif_softc *sc = (struct phytium_sdif_softc *)args; + device_t dev = sc->dev; + device_printf(dev, "Error occur !!! \n"); + device_printf(dev, "Status: 0x%x, dmac status: 0x%x. \n", status, dmac_status); + + if (status & FSDIF_INT_RE_BIT) + device_printf(dev, "Response err. 0x%x\n", FSDIF_INT_RE_BIT); + + if (status & FSDIF_INT_RTO_BIT) + device_printf(dev, "Response timeout. 0x%x\n", FSDIF_INT_RTO_BIT); + + if (dmac_status & FSDIF_DMAC_STATUS_DU) + device_printf(dev, "Descriptor un-readable. 0x%x\n", FSDIF_DMAC_STATUS_DU); + + if (status & FSDIF_INT_DCRC_BIT) + device_printf(dev, "Data CRC error. 0x%x\n", FSDIF_INT_DCRC_BIT); + + if (status & FSDIF_INT_RCRC_BIT) + device_printf(dev, "Data CRC error. 0x%x\n", FSDIF_INT_RCRC_BIT); + + sc->err_occur = true; +} +#endif + +static int phytium_sdif_platform_init(struct phytium_sdif_softc *sc) +{ + device_t dev = sc->dev; + phandle_t node = ofw_bus_get_node(dev); + pcell_t cell; + + memset(&sc->hc, 0U, sizeof(sc->hc)); + memset(&sc->hc_cfg, 0U, sizeof(sc->hc_cfg)); + + sc->bus_width = 0xff; + sc->card_clock = 0xff; + sc->power_mode = 0xff; + + sc->hc_cfg = *FSdifLookupConfig(sc->id); + if (OF_hasprop(node, "non-removable") > 0) { + sc->hc_cfg.non_removable = TRUE; + } else { + sc->hc_cfg.non_removable = FALSE; + } + sc->hc_cfg.trans_mode = FSDIF_IDMA_TRANS_MODE; + sc->hc_cfg.get_tuning = FSdifGetTimingSetting; + + if (FSDIF_SUCCESS != FSdifCfgInitialize(&sc->hc, &sc->hc_cfg)) { + device_printf(dev, "Failed to init sdif by configure\n"); + return (ENXIO); + } + + FSdifRegisterRelaxHandler(&sc->hc, phytium_sdif_relax); +#ifndef SDIF_POLLING + FSdifRegisterEvtHandler(&sc->hc, FSDIF_EVT_CARD_DETECTED, phytium_sdif_card_detected, (void *)sc); + FSdifRegisterEvtHandler(&sc->hc, FSDIF_EVT_ERR_OCCURE, phytium_sdif_error_occurred, (void *)sc); + FSdifRegisterEvtHandler(&sc->hc, FSDIF_EVT_CMD_DONE, phytium_sdif_command_done, (void *)sc); + FSdifRegisterEvtHandler(&sc->hc, FSDIF_EVT_DATA_DONE, phytium_sdif_data_done, (void *)sc); +#endif + + if (FSDIF_SUCCESS != FSdifSetIDMAList(&sc->hc, sc->dma_desc, sc->dma_desc_phy, sc->desc_num)) { + device_printf(dev, "Failed to init sdif DMA\n"); + return (ENXIO); + } + + PHYTIUM_MMC_LOCK_INIT(sc); + + sc->host.f_min = FSDIF_CLK_SPEED_400KHZ; + if (OF_getprop(node, "bus-frequency", &cell, sizeof(cell)) > 0) { + sc->host.f_max = fdt32_to_cpu(cell); + } else { + sc->host.f_max = FSDIF_CLK_SPEED_50_MHZ; + } + + if (OF_hasprop(node, "no-1-8-v") > 0) { + sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; + sc->host.caps = MMC_CAP_SIGNALING_330; + } else { + sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340 | MMC_OCR_LOW_VOLTAGE; + sc->host.caps = MMC_CAP_SIGNALING_330 | MMC_CAP_SIGNALING_180; + } + + if (OF_hasprop(node, "high-speed") > 0) { + sc->host.caps |= MMC_CAP_HSPEED; + } + + if (OF_hasprop(node, "non-removable") > 0) { + sc->host.caps |= MMC_CAP_SIGNALING_120; + if (OF_hasprop(node, "hs200") > 0) { + sc->host.caps |= MMC_CAP_MMC_HS200; + } + } + + sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; + + if ((!sc->hc.config.non_removable) && + (!FSdifCheckIfCardExists(sc->hc.config.base_addr))) { + device_printf(dev, "Card not exists\n"); + return (ENXIO); + } + + device_add_child(dev, "mmc", -1); + return bus_generic_attach(dev); +} + +static int phytium_sdif_detach(device_t dev) +{ + struct phytium_sdif_softc *sc = device_get_softc(dev); + int err; + + if ((sc->irq_res != NULL) && (sc->intrhandle != NULL)) { + err = bus_teardown_intr(dev, sc->irq_res, sc->intrhandle); + if (err != 0) { + device_printf(dev, "Could not tear down irq, %d\n", err); + sc->intrhandle = NULL; + } + } + + FSdifDeInitialize(&(sc->hc)); + + if (sc->irq_res != NULL) { + bus_release_resource(dev, SYS_RES_IRQ, + rman_get_rid(sc->irq_res), sc->irq_res); + sc->irq_res = NULL; + } + + if (sc->io_res != NULL) { + bus_release_resource(dev, SYS_RES_MEMORY, + rman_get_rid(sc->io_res), sc->io_res); + sc->io_res = NULL; + } + + if (sc->dma_desc_phy != 0) { + rtems_cache_coherent_free(__DEVOLATILE(void *, sc->dma_desc_phy)); + } + + if (sc->dma_buf_phy != 0) { + rtems_cache_coherent_free(__DEVOLATILE(void *, sc->dma_buf_phy)); + } + + return 0; +} + +static int phytium_sdif_attach(device_t dev) +{ + struct phytium_sdif_softc *sc = device_get_softc(dev); + const uint32_t desc_num = ((SDIF_MAX_BLK * SDIF_BLK_SIZE) / FSDIF_IDMAC_MAX_BUF_SIZE) + 1; + const size_t buf_size = SDIF_BLK_SIZE * SDIF_MAX_BLK; + phandle_t node; + pcell_t cell; + int err; + int rid = 0; + + sc->dev = dev; + + /* get sdif propority from fdt */ + node = ofw_bus_get_node(dev); + if (OF_getprop(node, "id", &cell, sizeof(cell)) > 0) { + sc->id = fdt32_to_cpu(cell); + } else { + device_printf(dev, "Failed to get host id\n"); + phytium_sdif_detach(dev); + return (ENXIO); + } + + /* IO memory */ + sc->io_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE); + if (sc->io_res == NULL) { + device_printf(dev, "Failed to map memory\n"); + phytium_sdif_detach(dev); + return (ENXIO); + } + + sc->bushandle = rman_get_bushandle(sc->io_res); + device_printf(dev, "SDIF-%d Register base 0x%x\n", sc->id, sc->bushandle); + + /* IRQ */ + sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_SHAREABLE | RF_ACTIVE); + if (sc->irq_res == NULL) { + device_printf(dev, "Failed to allocate IRQ\n"); + phytium_sdif_detach(dev); + return (ENXIO); + } + + /* allocate DMA memory */ + sc->dma_desc_phy = (uintptr_t)rtems_cache_coherent_allocate(desc_num * sizeof(FSdifIDmaDesc), + SDIF_BLK_SIZE, 0); + if (sc->dma_desc_phy == 0) { + device_printf(dev, "Failed to allocate Descriptor DMA memory\n"); + phytium_sdif_detach(dev); + return (ENOMEM); + } + + /* physical address == virtual address in RTEMS */ + sc->dma_desc = (void *)sc->dma_desc_phy; + sc->desc_num = desc_num; + memset(__DEVOLATILE(void *, sc->dma_desc), 0, desc_num * sizeof(FSdifIDmaDesc)); + + sc->dma_buf_phy = (uintptr_t)rtems_cache_coherent_allocate(buf_size, SDIF_BLK_SIZE, 0); + if (sc->dma_buf_phy == 0) { + device_printf(dev, "Failed to allocate DMA memory\n"); + phytium_sdif_detach(dev); + return (ENOMEM); + } + + sc->dma_buf = (void *)sc->dma_buf_phy; + memset(__DEVOLATILE(void *, sc->dma_buf), 0, buf_size); + + device_printf(dev, "DMA descriptor 0x%x buffer 0x%x\n", sc->dma_desc, sc->dma_buf); + + err = phytium_sdif_platform_init(sc); + if (err != 0) { + device_printf(dev, "Failed to init Sdif host\n"); + phytium_sdif_detach(dev); + } + + /* install IRQ handle */ +#ifndef SDIF_POLLING + err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, phyium_sdif_intr, sc, &(sc->intrhandle)); + if (err != 0) { + device_printf(dev, "Failed to setup error IRQ, %d\n", err); + sc->intrhandle = NULL; + phytium_sdif_detach(dev); + return (err); + } +#endif + + return err; +} + +static int phytium_sdif_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) +{ + struct phytium_sdif_softc *sc = device_get_softc(bus); + + switch (which) { + default: + return (EINVAL); + case MMCBR_IVAR_BUS_MODE: + *(int *)result = sc->host.ios.bus_mode; + break; + case MMCBR_IVAR_BUS_WIDTH: + *(int *)result = sc->host.ios.bus_width; + break; + case MMCBR_IVAR_CHIP_SELECT: + *(int *)result = sc->host.ios.chip_select; + break; + case MMCBR_IVAR_CLOCK: + *(int *)result = sc->host.ios.clock; + break; + case MMCBR_IVAR_F_MIN: + *(int *)result = sc->host.f_min; + break; + case MMCBR_IVAR_F_MAX: + *(int *)result = sc->host.f_max; + break; + case MMCBR_IVAR_HOST_OCR: + *(int *)result = sc->host.host_ocr; + break; + case MMCBR_IVAR_MODE: + *(int *)result = sc->host.mode; + break; + case MMCBR_IVAR_OCR: + *(int *)result = sc->host.ocr; + break; + case MMCBR_IVAR_POWER_MODE: + *(int *)result = sc->host.ios.power_mode; + break; + case MMCBR_IVAR_VDD: + *(int *)result = sc->host.ios.vdd; + break; + case MMCBR_IVAR_CAPS: + *(int *)result = sc->host.caps; + break; + case MMCBR_IVAR_TIMING: + *result = sc->host.ios.timing; + break; + case MMCBR_IVAR_MAX_DATA: + *(int *)result = SDIF_MAX_BLK; + break; + } + return (0); +} + +static int phytium_sdif_write_ivar(device_t bus, device_t child, int which, uintptr_t value) +{ + struct phytium_sdif_softc *sc = device_get_softc(bus); + + switch (which) { + default: + return (EINVAL); + case MMCBR_IVAR_BUS_MODE: + sc->host.ios.bus_mode = value; + break; + case MMCBR_IVAR_BUS_WIDTH: + sc->host.ios.bus_width = value; + break; + case MMCBR_IVAR_CHIP_SELECT: + sc->host.ios.chip_select = value; + break; + case MMCBR_IVAR_CLOCK: + sc->host.ios.clock = value; + break; + case MMCBR_IVAR_MODE: + sc->host.mode = value; + break; + case MMCBR_IVAR_OCR: + sc->host.ocr = value; + break; + case MMCBR_IVAR_POWER_MODE: + sc->host.ios.power_mode = value; + break; + case MMCBR_IVAR_VDD: + sc->host.ios.vdd = value; + break; + case MMCBR_IVAR_TIMING: + sc->host.ios.timing = value; + break; + /* These are read-only */ + case MMCBR_IVAR_CAPS: + case MMCBR_IVAR_HOST_OCR: + case MMCBR_IVAR_F_MIN: + case MMCBR_IVAR_F_MAX: + case MMCBR_IVAR_MAX_DATA: + return (EINVAL); + } + return (0); +} + +static int phytium_sdif_update_ios(device_t brdev, device_t reqdev) +{ + struct phytium_sdif_softc *sc = device_get_softc(brdev); + struct mmc_host *host; + struct mmc_ios *ios; + int err = 0; + + PHYTIUM_MMC_LOCK(sc); + + host = &sc->host; + ios = &host->ios; + + if (sc->card_clock != ios->clock) { + if (FSdifSetClkFreq(&sc->hc, ios->clock) != FSDIF_SUCCESS) { + device_printf(brdev, "Failed to update sdif clock\n"); + err = EBUSY; + goto err_exit; + } + sc->card_clock = ios->clock; + device_printf(brdev, "Set sdif clock to %d\n", ios->clock); + } + + if (sc->power_mode != ios->power_mode) { + if (ios->power_mode == power_off) { + FSdifSetPower(sc->hc.config.base_addr, FALSE); + } else { + FSdifSetPower(sc->hc.config.base_addr, TRUE); + } + sc->power_mode = ios->power_mode; + } + + if (sc->bus_width != ios->bus_width) { + switch (ios->bus_width) { + default: + FSdifSetBusWidth(sc->hc.config.base_addr, 1); + device_printf(brdev, "Set bus width bit-1\n"); + break; + case bus_width_4: + FSdifSetBusWidth(sc->hc.config.base_addr, 4); + device_printf(brdev, "Set bus width bit-4\n"); + break; + case bus_width_8: + FSdifSetBusWidth(sc->hc.config.base_addr, 8); + device_printf(brdev, "Set bus width bit-8\n"); + break; + } + sc->bus_width = ios->bus_width; + } + +err_exit: + PHYTIUM_MMC_UNLOCK(sc); + return (err); +} + +static int phytium_sdif_set_block_count(struct phytium_sdif_softc *sc, size_t block_cnt) +{ + device_t brdev = sc->dev; + FSdifCmdData *cmd_data = &sc->cmd_pkg; + int err = 0; +#ifndef SDIF_POLLING + rtems_interval sdif_timeout = RTEMS_MILLISECONDS_TO_TICKS(SDIF_TIME_OUT); + rtems_interval start_time, curr_time; +#endif + + PHYTIUM_MMC_LOCK(sc); + + memset(cmd_data, 0U, sizeof(*cmd_data)); + + cmd_data->cmdidx = MMC_SET_BLOCK_COUNT; + cmd_data->cmdarg = (u32)block_cnt; + /* MMC_RSP_R1 */ + cmd_data->flag = FSDIF_CMD_FLAG_EXP_RESP | FSDIF_CMD_FLAG_NEED_RESP_CRC | FSDIF_CMD_FLAG_ADTC; + cmd_data->data_p = NULL; + + sc->err_occur = 0; + sc->cmd_done = 0; + + if (FSDIF_SUCCESS != FSdifDMATransfer(&sc->hc, cmd_data)) { + device_printf(brdev, "Failed to start DMA transfer(set_block)\n"); + err = (ENXIO); + goto err_exit; + } + +#ifndef SDIF_POLLING + start_time = rtems_clock_get_ticks_since_boot(); + while (!sc->cmd_done) { + curr_time = rtems_clock_get_ticks_since_boot(); + if ((curr_time - start_time) > sdif_timeout) { + device_printf(brdev, "Wait command response timeout(set_block)\n"); + sc->err_occur = 1; + break; + } + } +#else + if (FSDIF_SUCCESS != FSdifPollWaitDMAEnd(&sc->hc, cmd_data)) { + device_printf(brdev, "Failed to wait DMA transfer end\n"); + err = (ENXIO); + goto err_exit; + } +#endif + +err_exit: + PHYTIUM_MMC_UNLOCK(sc); + return err; +} + +static int phytium_sdif_pre_request(struct phytium_sdif_softc *sc, struct mmc_request *req) +{ + int err = 0; + + if ((MMC_READ_MULTIPLE_BLOCK == req->cmd->opcode) || + (MMC_WRITE_MULTIPLE_BLOCK == req->cmd->opcode)) { + if ((req->cmd->data ) && (req->cmd->data->len > SDIF_BLK_SIZE)) { + err = phytium_sdif_set_block_count(sc, req->cmd->data->len / SDIF_BLK_SIZE); + } + } + + if (sc->hc.config.non_removable) { + /* ignore micro SD detect command, not in eMMC spec. */ + if ((ACMD_SD_SEND_OP_COND == req->cmd->opcode) || + (MMC_APP_CMD == req->cmd->opcode)) { + (*req->done)(req); + err = -1; + } + + /* ignore mmcsd_send_if_cond(CMD-8) which will failed for eMMC + but check cmd arg to let SEND_EXT_CSD (CMD-8) run */ + if ((SD_SEND_IF_COND == req->cmd->opcode) && + (0x1aa == req->cmd->arg)) { + (*req->done)(req); + err = -1; + } + } + + return err; +} + +static void phytium_sdif_convert_command_info(struct phytium_sdif_softc *sc, + struct mmc_request *in_trans, + FSdifCmdData *out_trans) +{ + struct mmc_command *in_cmd = in_trans->cmd; + struct mmc_data *in_data = in_trans->cmd->data; + FSdifCmdData *out_cmd = out_trans; + FSdifData *out_data = out_trans->data_p; + + if (MMC_GO_IDLE_STATE == in_cmd->opcode) { + out_cmd->flag |= FSDIF_CMD_FLAG_NEED_INIT; + } + + if (MMC_GO_INACTIVE_STATE == in_cmd->opcode) { + out_cmd->flag |= FSDIF_CMD_FLAG_ABORT; + } + + if (MMC_RSP_PRESENT & in_cmd->flags) { + out_cmd->flag |= FSDIF_CMD_FLAG_EXP_RESP; + + if (MMC_RSP_136 & in_cmd->flags) { + /* need 136 bits long response */ + out_cmd->flag |= FSDIF_CMD_FLAG_EXP_LONG_RESP; + } + + if (MMC_RSP_CRC & in_cmd->flags) { + /* most cmds need CRC */ + out_cmd->flag |= FSDIF_CMD_FLAG_NEED_RESP_CRC; + } + } + + if ((in_data) && (in_data->data)) { + assert(out_data); + out_cmd->flag |= FSDIF_CMD_FLAG_EXP_DATA; + + out_data->blksz = (in_data->len < SDIF_BLK_SIZE) ? + in_data->len: + SDIF_BLK_SIZE; + out_data->blkcnt = (in_data->len < SDIF_BLK_SIZE) ? + 1U: + in_data->len / SDIF_BLK_SIZE; + out_data->datalen = in_data->len; + + if (MMC_DATA_READ & in_data->flags) { + out_cmd->flag |= FSDIF_CMD_FLAG_READ_DATA; + + if (((uintptr_t)in_data->data % 512) == 0) { + out_data->buf = (void *)in_data->data; + out_data->buf_dma = (uintptr)in_data->data; + } else { + out_data->buf = (void *)sc->dma_buf; + out_data->buf_dma = (uintptr)sc->dma_buf; + } + + } else if (MMC_DATA_WRITE & in_data->flags) { + out_cmd->flag |= FSDIF_CMD_FLAG_WRITE_DATA; + if (((uintptr_t)in_data->data % 512) == 0) { + out_data->buf = (void *)in_data->data; + out_data->buf_dma = (uintptr)in_data->data; + } else { + out_data->buf = (void *)sc->dma_buf; + out_data->buf_dma = (uintptr)sc->dma_buf; + memcpy(out_data->buf, in_data->data, in_data->len); + } + } else { + assert(0); + } + } + + out_cmd->cmdidx = in_cmd->opcode; + out_cmd->cmdarg = in_cmd->arg; + + return; +} + +static int phytium_sdif_request(device_t brdev, device_t reqdev, struct mmc_request *req) +{ + struct phytium_sdif_softc *sc = device_get_softc(brdev); + FSdifCmdData *cmd_data = &(sc->cmd_pkg); + FSdifData *trans_data = &(sc->dat_pkg); +#ifndef SDIF_POLLING + rtems_interval sdif_timeout = RTEMS_MILLISECONDS_TO_TICKS(SDIF_TIME_OUT); + rtems_interval start_time, curr_time; +#endif + int err; + + err = phytium_sdif_pre_request(sc, req); + if (err != 0) { + if (err == -1) { + err = 0; /* skip command without error */ + } + return err; + } + + PHYTIUM_MMC_LOCK(sc); + + memset(cmd_data, 0U, sizeof(*cmd_data)); + + sc->err_occur = 0; + sc->cmd_done = 0; + if ((req->cmd->data) && (req->cmd->data->data)) { + memset(trans_data, 0, sizeof(*trans_data)); + cmd_data->data_p = trans_data; + sc->data_done = 0; /* need to wait data transfer done */ + } else { + cmd_data->data_p = NULL; + sc->data_done = 1; /* do not need to wait data transfer */ + } + + phytium_sdif_convert_command_info(sc, req, cmd_data); + + if (FSDIF_SUCCESS != FSdifDMATransfer(&sc->hc, cmd_data)) { + device_printf(brdev, "Failed to start DMA transfer\n"); + err = (ENXIO); + goto err_exit; + } + +#ifndef SDIF_POLLING + start_time = rtems_clock_get_ticks_since_boot(); + while (!sc->cmd_done || !sc->data_done) { + curr_time = rtems_clock_get_ticks_since_boot(); + if ((curr_time - start_time) > sdif_timeout) { + device_printf(brdev, "Wait command response timeout %d %d\n", + sc->cmd_done, sc->data_done); + sc->err_occur = 1; + break; + } + } +#else + if (FSDIF_SUCCESS != FSdifPollWaitDMAEnd(&sc->hc, cmd_data)) { + device_printf(brdev, "Failed to wait DMA transfer timeout\n"); + err = (ENXIO); + goto err_exit; + } +#endif + + if (!sc->err_occur) { + if (FSDIF_SUCCESS != FSdifGetCmdResponse(&sc->hc, cmd_data)) { + device_printf(brdev, "Failed to get command response\n"); + err = (ENXIO); + goto err_exit; + } + + if (MMC_RSP_136 & req->cmd->flags) { + req->cmd->resp[3] = cmd_data->response[0]; + req->cmd->resp[2] = cmd_data->response[1]; + req->cmd->resp[1] = cmd_data->response[2]; + req->cmd->resp[0] = cmd_data->response[3]; + } else if (MMC_RSP_PRESENT & req->cmd->flags) { + req->cmd->resp[0] = cmd_data->response[0]; + req->cmd->resp[1] = 0; + req->cmd->resp[2] = 0; + req->cmd->resp[3] = 0; + } + + if (cmd_data->data_p) { + if (cmd_data->data_p->buf != req->cmd->data->data) { + if (req->cmd->data->flags & MMC_DATA_READ) { + memcpy(req->cmd->data->data, cmd_data->data_p->buf, req->cmd->data->len); + } + } + } + +#ifndef SDIF_POLLING + start_time = rtems_clock_get_ticks_since_boot(); + do { + curr_time = rtems_clock_get_ticks_since_boot(); + } while(FSdifCheckIfCardBusy(sc->hc.config.base_addr) && + ((curr_time - start_time) <= sdif_timeout)); + + if ((curr_time - start_time) > sdif_timeout) { + device_printf(brdev, "Failed to wait card ready\n"); + err = (ENXIO); + goto err_exit; + } +#endif + + (*req->done)(req); + } + +err_exit: + PHYTIUM_MMC_UNLOCK(sc); + return err; +} + +static int phytium_sdif_get_ro(device_t brdev, device_t reqdev) +{ + return (0); +} + +static int phytium_sdif_acquire_host(device_t brdev, device_t reqdev) +{ + struct phytium_sdif_softc *sc = device_get_softc(brdev); + + PHYTIUM_MMC_LOCK(sc); + while (sc->bus_busy) + msleep(sc, &sc->sc_mtx, PZERO, "phytium_mmc: acquire host", 0); + sc->bus_busy = 1; + PHYTIUM_MMC_UNLOCK(sc); + return (0); +} + +static int phytium_sdif_release_host(device_t brdev, device_t reqdev) +{ + struct phytium_sdif_softc *sc = device_get_softc(brdev); + + PHYTIUM_MMC_LOCK(sc); + sc->bus_busy = 0; + wakeup(sc); + PHYTIUM_MMC_UNLOCK(sc); + return (0); +} + +static device_method_t phytium_sdif_methods[] = { + /* device_if */ + DEVMETHOD(device_probe, phytium_sdif_probe), + DEVMETHOD(device_attach, phytium_sdif_attach), + DEVMETHOD(device_detach, phytium_sdif_detach), + + /* Bus interface */ + DEVMETHOD(bus_read_ivar, phytium_sdif_read_ivar), + DEVMETHOD(bus_write_ivar, phytium_sdif_write_ivar), + + /* mmcbr_if */ + DEVMETHOD(mmcbr_update_ios, phytium_sdif_update_ios), + DEVMETHOD(mmcbr_request, phytium_sdif_request), + DEVMETHOD(mmcbr_get_ro, phytium_sdif_get_ro), + DEVMETHOD(mmcbr_acquire_host, phytium_sdif_acquire_host), + DEVMETHOD(mmcbr_release_host, phytium_sdif_release_host), + + DEVMETHOD_END +}; + +static devclass_t phytium_sdif_devclass; + +static driver_t phytium_sdif_driver = { + "sdif_phytium", + phytium_sdif_methods, + sizeof(struct phytium_sdif_softc), +}; + +DRIVER_MODULE(sdif_phytium, simplebus, phytium_sdif_driver, phytium_sdif_devclass, NULL, NULL); +MMC_DECLARE_BRIDGE(sdif_phytium); \ No newline at end of file diff --git a/freebsd/sys/dev/phytium/phytium_sdmmc.c b/freebsd/sys/dev/phytium/phytium_sdmmc.c new file mode 100644 index 00000000..e69de29b diff --git a/freebsd/sys/dev/usb/controller/xhci.c b/freebsd/sys/dev/usb/controller/xhci.c new file mode 100644 index 00000000..7f8852e7 --- /dev/null +++ b/freebsd/sys/dev/usb/controller/xhci.c @@ -0,0 +1,4373 @@ +#include + +/* $FreeBSD$ */ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2010 Hans Petter Selasky. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * USB eXtensible Host Controller Interface, a.k.a. USB 3.0 controller. + * + * The XHCI 1.0 spec can be found at + * http://www.intel.com/technology/usb/download/xHCI_Specification_for_USB.pdf + * and the USB 3.0 spec at + * http://www.usb.org/developers/docs/usb_30_spec_060910.zip + */ + +/* + * A few words about the design implementation: This driver emulates + * the concept about TDs which is found in EHCI specification. This + * way we achieve that the USB controller drivers look similar to + * eachother which makes it easier to understand the code. + */ + +#ifdef USB_GLOBAL_INCLUDE_FILE +#include USB_GLOBAL_INCLUDE_FILE +#else +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define USB_DEBUG_VAR xhcidebug + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#endif /* USB_GLOBAL_INCLUDE_FILE */ + +#include +#include + +#define XHCI_BUS2SC(bus) \ + ((struct xhci_softc *)(((uint8_t *)(bus)) - \ + ((uint8_t *)&(((struct xhci_softc *)0)->sc_bus)))) + +static SYSCTL_NODE(_hw_usb, OID_AUTO, xhci, CTLFLAG_RW, 0, "USB XHCI"); + +static int xhcistreams; +SYSCTL_INT(_hw_usb_xhci, OID_AUTO, streams, CTLFLAG_RWTUN, + &xhcistreams, 0, "Set to enable streams mode support"); + +#ifdef USB_DEBUG +static int xhcidebug; +static int xhciroute; +static int xhcipolling; +static int xhcidma32; +static int xhcictlstep; + +SYSCTL_INT(_hw_usb_xhci, OID_AUTO, debug, CTLFLAG_RWTUN, + &xhcidebug, 0, "Debug level"); +SYSCTL_INT(_hw_usb_xhci, OID_AUTO, xhci_port_route, CTLFLAG_RWTUN, + &xhciroute, 0, "Routing bitmap for switching EHCI ports to the XHCI controller"); +SYSCTL_INT(_hw_usb_xhci, OID_AUTO, use_polling, CTLFLAG_RWTUN, + &xhcipolling, 0, "Set to enable software interrupt polling for the XHCI controller"); +SYSCTL_INT(_hw_usb_xhci, OID_AUTO, dma32, CTLFLAG_RWTUN, + &xhcidma32, 0, "Set to only use 32-bit DMA for the XHCI controller"); +SYSCTL_INT(_hw_usb_xhci, OID_AUTO, ctlstep, CTLFLAG_RWTUN, + &xhcictlstep, 0, "Set to enable control endpoint status stage stepping"); +#else +#define xhciroute 0 +#define xhcidma32 0 +#define xhcictlstep 0 +#endif + +#define XHCI_INTR_ENDPT 1 + +struct xhci_std_temp { + struct xhci_softc *sc; + struct usb_page_cache *pc; + struct xhci_td *td; + struct xhci_td *td_next; + uint32_t len; + uint32_t offset; + uint32_t max_packet_size; + uint32_t average; + uint16_t isoc_delta; + uint16_t isoc_frame; + uint8_t shortpkt; + uint8_t multishort; + uint8_t last_frame; + uint8_t trb_type; + uint8_t direction; + uint8_t tbc; + uint8_t tlbpc; + uint8_t step_td; + uint8_t do_isoc_sync; +}; + +static void xhci_do_poll(struct usb_bus *); +static void xhci_device_done(struct usb_xfer *, usb_error_t); +static void xhci_root_intr(struct xhci_softc *); +static void xhci_free_device_ext(struct usb_device *); +static struct xhci_endpoint_ext *xhci_get_endpoint_ext(struct usb_device *, + struct usb_endpoint_descriptor *); +static usb_proc_callback_t xhci_configure_msg; +static usb_error_t xhci_configure_device(struct usb_device *); +static usb_error_t xhci_configure_endpoint(struct usb_device *, + struct usb_endpoint_descriptor *, struct xhci_endpoint_ext *, + uint16_t, uint8_t, uint8_t, uint8_t, uint16_t, uint16_t, + uint8_t); +static usb_error_t xhci_configure_mask(struct usb_device *, + uint32_t, uint8_t); +static usb_error_t xhci_cmd_evaluate_ctx(struct xhci_softc *, + uint64_t, uint8_t); +static void xhci_endpoint_doorbell(struct usb_xfer *); +static void xhci_ctx_set_le32(struct xhci_softc *sc, volatile uint32_t *ptr, uint32_t val); +static uint32_t xhci_ctx_get_le32(struct xhci_softc *sc, volatile uint32_t *ptr); +static void xhci_ctx_set_le64(struct xhci_softc *sc, volatile uint64_t *ptr, uint64_t val); +#ifdef USB_DEBUG +static uint64_t xhci_ctx_get_le64(struct xhci_softc *sc, volatile uint64_t *ptr); +#endif + +static const struct usb_bus_methods xhci_bus_methods; + +#ifdef USB_DEBUG +static void +xhci_dump_trb(struct xhci_trb *trb) +{ + DPRINTFN(5, "trb = %p\n", trb); + DPRINTFN(5, "qwTrb0 = 0x%016llx\n", (long long)le64toh(trb->qwTrb0)); + DPRINTFN(5, "dwTrb2 = 0x%08x\n", le32toh(trb->dwTrb2)); + DPRINTFN(5, "dwTrb3 = 0x%08x\n", le32toh(trb->dwTrb3)); +} + +static void +xhci_dump_endpoint(struct xhci_softc *sc, struct xhci_endp_ctx *pep) +{ + DPRINTFN(5, "pep = %p\n", pep); + DPRINTFN(5, "dwEpCtx0=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx0)); + DPRINTFN(5, "dwEpCtx1=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx1)); + DPRINTFN(5, "qwEpCtx2=0x%016llx\n", (long long)xhci_ctx_get_le64(sc, &pep->qwEpCtx2)); + DPRINTFN(5, "dwEpCtx4=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx4)); + DPRINTFN(5, "dwEpCtx5=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx5)); + DPRINTFN(5, "dwEpCtx6=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx6)); + DPRINTFN(5, "dwEpCtx7=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx7)); +} + +static void +xhci_dump_device(struct xhci_softc *sc, struct xhci_slot_ctx *psl) +{ + DPRINTFN(5, "psl = %p\n", psl); + DPRINTFN(5, "dwSctx0=0x%08x\n", xhci_ctx_get_le32(sc, &psl->dwSctx0)); + DPRINTFN(5, "dwSctx1=0x%08x\n", xhci_ctx_get_le32(sc, &psl->dwSctx1)); + DPRINTFN(5, "dwSctx2=0x%08x\n", xhci_ctx_get_le32(sc, &psl->dwSctx2)); + DPRINTFN(5, "dwSctx3=0x%08x\n", xhci_ctx_get_le32(sc, &psl->dwSctx3)); +} +#endif + +uint8_t +xhci_use_polling(void) +{ +#ifdef USB_DEBUG + return (xhcipolling != 0); +#else + return (0); +#endif +} + +static void +xhci_iterate_hw_softc(struct usb_bus *bus, usb_bus_mem_sub_cb_t *cb) +{ + struct xhci_softc *sc = XHCI_BUS2SC(bus); + uint16_t i; + + cb(bus, &sc->sc_hw.root_pc, &sc->sc_hw.root_pg, + sizeof(struct xhci_hw_root), XHCI_PAGE_SIZE); + + cb(bus, &sc->sc_hw.ctx_pc, &sc->sc_hw.ctx_pg, + sizeof(struct xhci_dev_ctx_addr), XHCI_PAGE_SIZE); + + for (i = 0; i != sc->sc_noscratch; i++) { + cb(bus, &sc->sc_hw.scratch_pc[i], &sc->sc_hw.scratch_pg[i], + XHCI_PAGE_SIZE, XHCI_PAGE_SIZE); + } +} + +static void +xhci_ctx_set_le32(struct xhci_softc *sc, volatile uint32_t *ptr, uint32_t val) +{ + if (sc->sc_ctx_is_64_byte) { + uint32_t offset; + /* exploit the fact that our structures are XHCI_PAGE_SIZE aligned */ + /* all contexts are initially 32-bytes */ + offset = ((uintptr_t)ptr) & ((XHCI_PAGE_SIZE - 1) & ~(31U)); + ptr = (volatile uint32_t *)(((volatile uint8_t *)ptr) + offset); + } + *ptr = htole32(val); +} + +static uint32_t +xhci_ctx_get_le32(struct xhci_softc *sc, volatile uint32_t *ptr) +{ + if (sc->sc_ctx_is_64_byte) { + uint32_t offset; + /* exploit the fact that our structures are XHCI_PAGE_SIZE aligned */ + /* all contexts are initially 32-bytes */ + offset = ((uintptr_t)ptr) & ((XHCI_PAGE_SIZE - 1) & ~(31U)); + ptr = (volatile uint32_t *)(((volatile uint8_t *)ptr) + offset); + } + return (le32toh(*ptr)); +} + +static void +xhci_ctx_set_le64(struct xhci_softc *sc, volatile uint64_t *ptr, uint64_t val) +{ + if (sc->sc_ctx_is_64_byte) { + uint32_t offset; + /* exploit the fact that our structures are XHCI_PAGE_SIZE aligned */ + /* all contexts are initially 32-bytes */ + offset = ((uintptr_t)ptr) & ((XHCI_PAGE_SIZE - 1) & ~(31U)); + ptr = (volatile uint64_t *)(((volatile uint8_t *)ptr) + offset); + } + *ptr = htole64(val); +} + +#ifdef USB_DEBUG +static uint64_t +xhci_ctx_get_le64(struct xhci_softc *sc, volatile uint64_t *ptr) +{ + if (sc->sc_ctx_is_64_byte) { + uint32_t offset; + /* exploit the fact that our structures are XHCI_PAGE_SIZE aligned */ + /* all contexts are initially 32-bytes */ + offset = ((uintptr_t)ptr) & ((XHCI_PAGE_SIZE - 1) & ~(31U)); + ptr = (volatile uint64_t *)(((volatile uint8_t *)ptr) + offset); + } + return (le64toh(*ptr)); +} +#endif + +static int +xhci_reset_command_queue_locked(struct xhci_softc *sc) +{ + struct usb_page_search buf_res; + struct xhci_hw_root *phwr; + uint64_t addr; + uint32_t temp; + + DPRINTF("\n"); + + temp = XREAD4(sc, oper, XHCI_CRCR_LO); + if (temp & XHCI_CRCR_LO_CRR) { + DPRINTF("Command ring running\n"); + temp &= ~(XHCI_CRCR_LO_CS | XHCI_CRCR_LO_CA); + + /* + * Try to abort the last command as per section + * 4.6.1.2 "Aborting a Command" of the XHCI + * specification: + */ + + /* stop and cancel */ + XWRITE4(sc, oper, XHCI_CRCR_LO, temp | XHCI_CRCR_LO_CS); + XWRITE4(sc, oper, XHCI_CRCR_HI, 0); + + XWRITE4(sc, oper, XHCI_CRCR_LO, temp | XHCI_CRCR_LO_CA); + XWRITE4(sc, oper, XHCI_CRCR_HI, 0); + + /* wait 250ms */ + usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 4); + + /* check if command ring is still running */ + temp = XREAD4(sc, oper, XHCI_CRCR_LO); + if (temp & XHCI_CRCR_LO_CRR) { + DPRINTF("Comand ring still running\n"); + return (USB_ERR_IOERROR); + } + } + + /* reset command ring */ + sc->sc_command_ccs = 1; + sc->sc_command_idx = 0; + + usbd_get_page(&sc->sc_hw.root_pc, 0, &buf_res); + + /* set up command ring control base address */ + addr = buf_res.physaddr; + phwr = buf_res.buffer; + addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_commands[0]; + + DPRINTF("CRCR=0x%016llx\n", (unsigned long long)addr); + + memset(phwr->hwr_commands, 0, sizeof(phwr->hwr_commands)); + phwr->hwr_commands[XHCI_MAX_COMMANDS - 1].qwTrb0 = htole64(addr); + + usb_pc_cpu_flush(&sc->sc_hw.root_pc); + + XWRITE4(sc, oper, XHCI_CRCR_LO, ((uint32_t)addr) | XHCI_CRCR_LO_RCS); + XWRITE4(sc, oper, XHCI_CRCR_HI, (uint32_t)(addr >> 32)); + + return (0); +} + +usb_error_t +xhci_start_controller(struct xhci_softc *sc) +{ + struct usb_page_search buf_res; + struct xhci_hw_root *phwr; + struct xhci_dev_ctx_addr *pdctxa; + usb_error_t err; + uint64_t addr; + uint32_t temp; + uint16_t i; + + DPRINTF("\n"); + + sc->sc_event_ccs = 1; + sc->sc_event_idx = 0; + sc->sc_command_ccs = 1; + sc->sc_command_idx = 0; + + err = xhci_reset_controller(sc); + if (err) + return (err); + + /* set up number of device slots */ + DPRINTF("CONFIG=0x%08x -> 0x%08x\n", + XREAD4(sc, oper, XHCI_CONFIG), sc->sc_noslot); + + XWRITE4(sc, oper, XHCI_CONFIG, sc->sc_noslot); + + temp = XREAD4(sc, oper, XHCI_USBSTS); + + /* clear interrupts */ + XWRITE4(sc, oper, XHCI_USBSTS, temp); + /* disable all device notifications */ + XWRITE4(sc, oper, XHCI_DNCTRL, 0); + + /* set up device context base address */ + usbd_get_page(&sc->sc_hw.ctx_pc, 0, &buf_res); + pdctxa = buf_res.buffer; + memset(pdctxa, 0, sizeof(*pdctxa)); + + addr = buf_res.physaddr; + addr += (uintptr_t)&((struct xhci_dev_ctx_addr *)0)->qwSpBufPtr[0]; + + /* slot 0 points to the table of scratchpad pointers */ + pdctxa->qwBaaDevCtxAddr[0] = htole64(addr); + + for (i = 0; i != sc->sc_noscratch; i++) { + struct usb_page_search buf_scp; + usbd_get_page(&sc->sc_hw.scratch_pc[i], 0, &buf_scp); + pdctxa->qwSpBufPtr[i] = htole64((uint64_t)buf_scp.physaddr); + } + + addr = buf_res.physaddr; + + XWRITE4(sc, oper, XHCI_DCBAAP_LO, (uint32_t)addr); + XWRITE4(sc, oper, XHCI_DCBAAP_HI, (uint32_t)(addr >> 32)); + XWRITE4(sc, oper, XHCI_DCBAAP_LO, (uint32_t)addr); + XWRITE4(sc, oper, XHCI_DCBAAP_HI, (uint32_t)(addr >> 32)); + + /* set up event table size */ + DPRINTF("ERSTSZ=0x%08x -> 0x%08x\n", + XREAD4(sc, runt, XHCI_ERSTSZ(0)), sc->sc_erst_max); + + XWRITE4(sc, runt, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(sc->sc_erst_max)); + + /* set up interrupt rate */ + XWRITE4(sc, runt, XHCI_IMOD(0), sc->sc_imod_default); + + usbd_get_page(&sc->sc_hw.root_pc, 0, &buf_res); + + phwr = buf_res.buffer; + addr = buf_res.physaddr; + addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_events[0]; + + /* reset hardware root structure */ + memset(phwr, 0, sizeof(*phwr)); + + phwr->hwr_ring_seg[0].qwEvrsTablePtr = htole64(addr); + phwr->hwr_ring_seg[0].dwEvrsTableSize = htole32(XHCI_MAX_EVENTS); + + DPRINTF("ERDP(0)=0x%016llx\n", (unsigned long long)addr); + + XWRITE4(sc, runt, XHCI_ERDP_LO(0), (uint32_t)addr); + XWRITE4(sc, runt, XHCI_ERDP_HI(0), (uint32_t)(addr >> 32)); + + addr = buf_res.physaddr; + + DPRINTF("ERSTBA(0)=0x%016llx\n", (unsigned long long)addr); + + XWRITE4(sc, runt, XHCI_ERSTBA_LO(0), (uint32_t)addr); + XWRITE4(sc, runt, XHCI_ERSTBA_HI(0), (uint32_t)(addr >> 32)); + + /* set up interrupter registers */ + temp = XREAD4(sc, runt, XHCI_IMAN(0)); + temp |= XHCI_IMAN_INTR_ENA; + XWRITE4(sc, runt, XHCI_IMAN(0), temp); + + /* set up command ring control base address */ + addr = buf_res.physaddr; + addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_commands[0]; + + DPRINTF("CRCR=0x%016llx\n", (unsigned long long)addr); + + XWRITE4(sc, oper, XHCI_CRCR_LO, ((uint32_t)addr) | XHCI_CRCR_LO_RCS); + XWRITE4(sc, oper, XHCI_CRCR_HI, (uint32_t)(addr >> 32)); + + phwr->hwr_commands[XHCI_MAX_COMMANDS - 1].qwTrb0 = htole64(addr); + + usb_bus_mem_flush_all(&sc->sc_bus, &xhci_iterate_hw_softc); + + /* Go! */ + XWRITE4(sc, oper, XHCI_USBCMD, XHCI_CMD_RS | + XHCI_CMD_INTE | XHCI_CMD_HSEE); + + for (i = 0; i != 100; i++) { + usb_pause_mtx(NULL, hz / 100); + temp = XREAD4(sc, oper, XHCI_USBSTS) & XHCI_STS_HCH; + if (!temp) + break; + } + if (temp) { + XWRITE4(sc, oper, XHCI_USBCMD, 0); + device_printf(sc->sc_bus.parent, "Run timeout.\n"); + return (USB_ERR_IOERROR); + } + + /* catch any lost interrupts */ + xhci_do_poll(&sc->sc_bus); + + if (sc->sc_port_route != NULL) { + /* Route all ports to the XHCI by default */ + sc->sc_port_route(sc->sc_bus.parent, + ~xhciroute, xhciroute); + } + return (0); +} + +usb_error_t +xhci_halt_controller(struct xhci_softc *sc) +{ + uint32_t temp; + uint16_t i; + + DPRINTF("\n"); + + sc->sc_capa_off = 0; + sc->sc_oper_off = XREAD1(sc, capa, XHCI_CAPLENGTH); + sc->sc_runt_off = XREAD4(sc, capa, XHCI_RTSOFF) & ~0xF; + sc->sc_door_off = XREAD4(sc, capa, XHCI_DBOFF) & ~0x3; + + /* Halt controller */ + XWRITE4(sc, oper, XHCI_USBCMD, 0); + + for (i = 0; i != 100; i++) { + usb_pause_mtx(NULL, hz / 100); + temp = XREAD4(sc, oper, XHCI_USBSTS) & XHCI_STS_HCH; + if (temp) + break; + } + + if (!temp) { + device_printf(sc->sc_bus.parent, "Controller halt timeout.\n"); + return (USB_ERR_IOERROR); + } + return (0); +} + +usb_error_t +xhci_reset_controller(struct xhci_softc *sc) +{ + uint32_t temp = 0; + uint16_t i; + + DPRINTF("\n"); + + /* Reset controller */ + XWRITE4(sc, oper, XHCI_USBCMD, XHCI_CMD_HCRST); + + for (i = 0; i != 100; i++) { + usb_pause_mtx(NULL, hz / 100); + temp = (XREAD4(sc, oper, XHCI_USBCMD) & XHCI_CMD_HCRST) | + (XREAD4(sc, oper, XHCI_USBSTS) & XHCI_STS_CNR); + if (!temp) + break; + } + + if (temp) { + device_printf(sc->sc_bus.parent, "Controller " + "reset timeout.\n"); + return (USB_ERR_IOERROR); + } + return (0); +} + +usb_error_t +xhci_init(struct xhci_softc *sc, device_t self, uint8_t dma32) +{ + uint32_t temp; + uint32_t version; + + DPRINTF("\n"); + + /* initialize some bus fields */ + sc->sc_bus.parent = self; + + /* set the bus revision */ + sc->sc_bus.usbrev = USB_REV_3_0; + + /* set up the bus struct */ + sc->sc_bus.methods = &xhci_bus_methods; + + /* set up devices array */ + sc->sc_bus.devices = sc->sc_devices; + sc->sc_bus.devices_max = XHCI_MAX_DEVICES; + + /* set default cycle state in case of early interrupts */ + sc->sc_event_ccs = 1; + sc->sc_command_ccs = 1; + + /* set up bus space offsets */ + sc->sc_capa_off = 0; + sc->sc_oper_off = XREAD1(sc, capa, XHCI_CAPLENGTH); + sc->sc_runt_off = XREAD4(sc, capa, XHCI_RTSOFF) & ~0x1F; + sc->sc_door_off = XREAD4(sc, capa, XHCI_DBOFF) & ~0x3; + + DPRINTF("CAPLENGTH=0x%x\n", sc->sc_oper_off); + DPRINTF("RUNTIMEOFFSET=0x%x\n", sc->sc_runt_off); + DPRINTF("DOOROFFSET=0x%x\n", sc->sc_door_off); + + version = (XREAD4(sc, capa, XHCI_CAPLENGTH) >> 16) & 0xFFFF; + DPRINTF("xHCI version = 0x%04x\n", version); + + if (!(XREAD4(sc, oper, XHCI_PAGESIZE) & XHCI_PAGESIZE_4K)) { + device_printf(sc->sc_bus.parent, "Controller does " + "not support 4K page size.\n"); + return (ENXIO); + } + + temp = XREAD4(sc, capa, XHCI_HCSPARAMS0); + + DPRINTF("HCS0 = 0x%08x\n", temp); + + /* set up context size */ + if (XHCI_HCS0_CSZ(temp)) { + sc->sc_ctx_is_64_byte = 1; + } else { + sc->sc_ctx_is_64_byte = 0; + } + + /* get DMA bits */ + sc->sc_bus.dma_bits = (XHCI_HCS0_AC64(temp) && + xhcidma32 == 0 && dma32 == 0) ? 64 : 32; + + device_printf(self, "%d bytes context size, %d-bit DMA\n", + sc->sc_ctx_is_64_byte ? 64 : 32, (int)sc->sc_bus.dma_bits); + + temp = XREAD4(sc, capa, XHCI_HCSPARAMS1); + + /* get number of device slots */ + sc->sc_noport = XHCI_HCS1_N_PORTS(temp); + + if (sc->sc_noport == 0) { + device_printf(sc->sc_bus.parent, "Invalid number " + "of ports: %u\n", sc->sc_noport); + return (ENXIO); + } + + sc->sc_noport = sc->sc_noport; + sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(temp); + + DPRINTF("Max slots: %u\n", sc->sc_noslot); + + if (sc->sc_noslot > XHCI_MAX_DEVICES) + sc->sc_noslot = XHCI_MAX_DEVICES; + + temp = XREAD4(sc, capa, XHCI_HCSPARAMS2); + + DPRINTF("HCS2=0x%08x\n", temp); + + /* get number of scratchpads */ + sc->sc_noscratch = XHCI_HCS2_SPB_MAX(temp); + + if (sc->sc_noscratch > XHCI_MAX_SCRATCHPADS) { + device_printf(sc->sc_bus.parent, "XHCI request " + "too many scratchpads\n"); + return (ENOMEM); + } + + DPRINTF("Max scratch: %u\n", sc->sc_noscratch); + + /* get event table size */ + sc->sc_erst_max = 1U << XHCI_HCS2_ERST_MAX(temp); + if (sc->sc_erst_max > XHCI_MAX_RSEG) + sc->sc_erst_max = XHCI_MAX_RSEG; + + temp = XREAD4(sc, capa, XHCI_HCSPARAMS3); + + /* get maximum exit latency */ + sc->sc_exit_lat_max = XHCI_HCS3_U1_DEL(temp) + + XHCI_HCS3_U2_DEL(temp) + 250 /* us */; + + /* Check if we should use the default IMOD value. */ + if (sc->sc_imod_default == 0) + sc->sc_imod_default = XHCI_IMOD_DEFAULT; + + /* get all DMA memory */ + if (usb_bus_mem_alloc_all(&sc->sc_bus, + USB_GET_DMA_TAG(self), &xhci_iterate_hw_softc)) { + return (ENOMEM); + } + + /* set up command queue mutex and condition varible */ + cv_init(&sc->sc_cmd_cv, "CMDQ"); + sx_init(&sc->sc_cmd_sx, "CMDQ lock"); + + sc->sc_config_msg[0].hdr.pm_callback = &xhci_configure_msg; + sc->sc_config_msg[0].bus = &sc->sc_bus; + sc->sc_config_msg[1].hdr.pm_callback = &xhci_configure_msg; + sc->sc_config_msg[1].bus = &sc->sc_bus; + + return (0); +} + +void +xhci_uninit(struct xhci_softc *sc) +{ + /* + * NOTE: At this point the control transfer process is gone + * and "xhci_configure_msg" is no longer called. Consequently + * waiting for the configuration messages to complete is not + * needed. + */ + usb_bus_mem_free_all(&sc->sc_bus, &xhci_iterate_hw_softc); + + cv_destroy(&sc->sc_cmd_cv); + sx_destroy(&sc->sc_cmd_sx); +} + +static void +xhci_set_hw_power_sleep(struct usb_bus *bus, uint32_t state) +{ + struct xhci_softc *sc = XHCI_BUS2SC(bus); + + switch (state) { + case USB_HW_POWER_SUSPEND: + DPRINTF("Stopping the XHCI\n"); + xhci_halt_controller(sc); + xhci_reset_controller(sc); + break; + case USB_HW_POWER_SHUTDOWN: + DPRINTF("Stopping the XHCI\n"); + xhci_halt_controller(sc); + xhci_reset_controller(sc); + break; + case USB_HW_POWER_RESUME: + DPRINTF("Starting the XHCI\n"); + xhci_start_controller(sc); + break; + default: + break; + } +} + +static usb_error_t +xhci_generic_done_sub(struct usb_xfer *xfer) +{ + struct xhci_td *td; + struct xhci_td *td_alt_next; + uint32_t len; + uint8_t status; + + td = xfer->td_transfer_cache; + td_alt_next = td->alt_next; + + if (xfer->aframes != xfer->nframes) + usbd_xfer_set_frame_len(xfer, xfer->aframes, 0); + + while (1) { + + usb_pc_cpu_invalidate(td->page_cache); + + status = td->status; + len = td->remainder; + + DPRINTFN(4, "xfer=%p[%u/%u] rem=%u/%u status=%u\n", + xfer, (unsigned int)xfer->aframes, + (unsigned int)xfer->nframes, + (unsigned int)len, (unsigned int)td->len, + (unsigned int)status); + + /* + * Verify the status length and + * add the length to "frlengths[]": + */ + if (len > td->len) { + /* should not happen */ + DPRINTF("Invalid status length, " + "0x%04x/0x%04x bytes\n", len, td->len); + status = XHCI_TRB_ERROR_LENGTH; + } else if (xfer->aframes != xfer->nframes) { + xfer->frlengths[xfer->aframes] += td->len - len; + } + /* Check for last transfer */ + if (((void *)td) == xfer->td_transfer_last) { + td = NULL; + break; + } + /* Check for transfer error */ + if (status != XHCI_TRB_ERROR_SHORT_PKT && + status != XHCI_TRB_ERROR_SUCCESS) { + /* the transfer is finished */ + td = NULL; + break; + } + /* Check for short transfer */ + if (len > 0) { + if (xfer->flags_int.short_frames_ok || + xfer->flags_int.isochronous_xfr || + xfer->flags_int.control_xfr) { + /* follow alt next */ + td = td->alt_next; + } else { + /* the transfer is finished */ + td = NULL; + } + break; + } + td = td->obj_next; + + if (td->alt_next != td_alt_next) { + /* this USB frame is complete */ + break; + } + } + + /* update transfer cache */ + + xfer->td_transfer_cache = td; + + return ((status == XHCI_TRB_ERROR_STALL) ? USB_ERR_STALLED : + (status != XHCI_TRB_ERROR_SHORT_PKT && + status != XHCI_TRB_ERROR_SUCCESS) ? USB_ERR_IOERROR : + USB_ERR_NORMAL_COMPLETION); +} + +static void +xhci_generic_done(struct usb_xfer *xfer) +{ + usb_error_t err = 0; + + DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n", + xfer, xfer->endpoint); + + /* reset scanner */ + + xfer->td_transfer_cache = xfer->td_transfer_first; + + if (xfer->flags_int.control_xfr) { + + if (xfer->flags_int.control_hdr) + err = xhci_generic_done_sub(xfer); + + xfer->aframes = 1; + + if (xfer->td_transfer_cache == NULL) + goto done; + } + + while (xfer->aframes != xfer->nframes) { + + err = xhci_generic_done_sub(xfer); + xfer->aframes++; + + if (xfer->td_transfer_cache == NULL) + goto done; + } + + if (xfer->flags_int.control_xfr && + !xfer->flags_int.control_act) + err = xhci_generic_done_sub(xfer); +done: + /* transfer is complete */ + xhci_device_done(xfer, err); +} + +static void +xhci_activate_transfer(struct usb_xfer *xfer) +{ + struct xhci_td *td; + + td = xfer->td_transfer_cache; + + usb_pc_cpu_invalidate(td->page_cache); + + if (!(td->td_trb[0].dwTrb3 & htole32(XHCI_TRB_3_CYCLE_BIT))) { + + /* activate the transfer */ + + td->td_trb[0].dwTrb3 |= htole32(XHCI_TRB_3_CYCLE_BIT); + usb_pc_cpu_flush(td->page_cache); + + xhci_endpoint_doorbell(xfer); + } +} + +static void +xhci_skip_transfer(struct usb_xfer *xfer) +{ + struct xhci_td *td; + struct xhci_td *td_last; + + td = xfer->td_transfer_cache; + td_last = xfer->td_transfer_last; + + td = td->alt_next; + + usb_pc_cpu_invalidate(td->page_cache); + + if (!(td->td_trb[0].dwTrb3 & htole32(XHCI_TRB_3_CYCLE_BIT))) { + + usb_pc_cpu_invalidate(td_last->page_cache); + + /* copy LINK TRB to current waiting location */ + + td->td_trb[0].qwTrb0 = td_last->td_trb[td_last->ntrb].qwTrb0; + td->td_trb[0].dwTrb2 = td_last->td_trb[td_last->ntrb].dwTrb2; + usb_pc_cpu_flush(td->page_cache); + + td->td_trb[0].dwTrb3 = td_last->td_trb[td_last->ntrb].dwTrb3; + usb_pc_cpu_flush(td->page_cache); + + xhci_endpoint_doorbell(xfer); + } +} + +/*------------------------------------------------------------------------* + * xhci_check_transfer + *------------------------------------------------------------------------*/ +static void +xhci_check_transfer(struct xhci_softc *sc, struct xhci_trb *trb) +{ + struct xhci_endpoint_ext *pepext; + int64_t offset; + uint64_t td_event; + uint32_t temp; + uint32_t remainder; + uint16_t stream_id; + uint16_t i; + uint8_t status; + uint8_t halted; + uint8_t epno; + uint8_t index; + + /* decode TRB */ + td_event = le64toh(trb->qwTrb0); + temp = le32toh(trb->dwTrb2); + + remainder = XHCI_TRB_2_REM_GET(temp); + status = XHCI_TRB_2_ERROR_GET(temp); + stream_id = XHCI_TRB_2_STREAM_GET(temp); + + temp = le32toh(trb->dwTrb3); + epno = XHCI_TRB_3_EP_GET(temp); + index = XHCI_TRB_3_SLOT_GET(temp); + + /* check if error means halted */ + halted = (status != XHCI_TRB_ERROR_SHORT_PKT && + status != XHCI_TRB_ERROR_SUCCESS); + + DPRINTF("slot=%u epno=%u stream=%u remainder=%u status=%u\n", + index, epno, stream_id, remainder, status); + + if (index > sc->sc_noslot) { + DPRINTF("Invalid slot.\n"); + return; + } + + if ((epno == 0) || (epno >= XHCI_MAX_ENDPOINTS)) { + DPRINTF("Invalid endpoint.\n"); + return; + } + + pepext = &sc->sc_hw.devs[index].endp[epno]; + + if (pepext->trb_ep_mode != USB_EP_MODE_STREAMS) { + stream_id = 0; + DPRINTF("stream_id=0\n"); + } else if (stream_id >= XHCI_MAX_STREAMS) { + DPRINTF("Invalid stream ID.\n"); + return; + } + + /* try to find the USB transfer that generated the event */ + for (i = 0; i != (XHCI_MAX_TRANSFERS - 1); i++) { + struct usb_xfer *xfer; + struct xhci_td *td; + + xfer = pepext->xfer[i + (XHCI_MAX_TRANSFERS * stream_id)]; + if (xfer == NULL) + continue; + + td = xfer->td_transfer_cache; + + DPRINTFN(5, "Checking if 0x%016llx == (0x%016llx .. 0x%016llx)\n", + (long long)td_event, + (long long)td->td_self, + (long long)td->td_self + sizeof(td->td_trb)); + + /* + * NOTE: Some XHCI implementations might not trigger + * an event on the last LINK TRB so we need to + * consider both the last and second last event + * address as conditions for a successful transfer. + * + * NOTE: We assume that the XHCI will only trigger one + * event per chain of TRBs. + */ + + offset = td_event - td->td_self; + + if (offset >= 0 && + offset < (int64_t)sizeof(td->td_trb)) { + + usb_pc_cpu_invalidate(td->page_cache); + + /* compute rest of remainder, if any */ + for (i = (offset / 16) + 1; i < td->ntrb; i++) { + temp = le32toh(td->td_trb[i].dwTrb2); + remainder += XHCI_TRB_2_BYTES_GET(temp); + } + + DPRINTFN(5, "New remainder: %u\n", remainder); + + /* clear isochronous transfer errors */ + if (xfer->flags_int.isochronous_xfr) { + if (halted) { + halted = 0; + status = XHCI_TRB_ERROR_SUCCESS; + remainder = td->len; + } + } + + /* "td->remainder" is verified later */ + td->remainder = remainder; + td->status = status; + + usb_pc_cpu_flush(td->page_cache); + + /* + * 1) Last transfer descriptor makes the + * transfer done + */ + if (((void *)td) == xfer->td_transfer_last) { + DPRINTF("TD is last\n"); + xhci_generic_done(xfer); + break; + } + + /* + * 2) Any kind of error makes the transfer + * done + */ + if (halted) { + DPRINTF("TD has I/O error\n"); + xhci_generic_done(xfer); + break; + } + + /* + * 3) If there is no alternate next transfer, + * a short packet also makes the transfer done + */ + if (td->remainder > 0) { + if (td->alt_next == NULL) { + DPRINTF( + "short TD has no alternate next\n"); + xhci_generic_done(xfer); + break; + } + DPRINTF("TD has short pkt\n"); + if (xfer->flags_int.short_frames_ok || + xfer->flags_int.isochronous_xfr || + xfer->flags_int.control_xfr) { + /* follow the alt next */ + xfer->td_transfer_cache = td->alt_next; + xhci_activate_transfer(xfer); + break; + } + xhci_skip_transfer(xfer); + xhci_generic_done(xfer); + break; + } + + /* + * 4) Transfer complete - go to next TD + */ + DPRINTF("Following next TD\n"); + xfer->td_transfer_cache = td->obj_next; + xhci_activate_transfer(xfer); + break; /* there should only be one match */ + } + } +} + +static int +xhci_check_command(struct xhci_softc *sc, struct xhci_trb *trb) +{ + if (sc->sc_cmd_addr == trb->qwTrb0) { + DPRINTF("Received command event\n"); + sc->sc_cmd_result[0] = trb->dwTrb2; + sc->sc_cmd_result[1] = trb->dwTrb3; + cv_signal(&sc->sc_cmd_cv); + return (1); /* command match */ + } + return (0); +} + +static int +xhci_interrupt_poll(struct xhci_softc *sc) +{ + struct usb_page_search buf_res; + struct xhci_hw_root *phwr; + uint64_t addr; + uint32_t temp; + int retval = 0; + uint16_t i; + uint8_t event; + uint8_t j; + uint8_t k; + uint8_t t; + + usbd_get_page(&sc->sc_hw.root_pc, 0, &buf_res); + + phwr = buf_res.buffer; + + /* Receive any events */ + + usb_pc_cpu_invalidate(&sc->sc_hw.root_pc); + + i = sc->sc_event_idx; + j = sc->sc_event_ccs; + t = 2; + + while (1) { + + temp = le32toh(phwr->hwr_events[i].dwTrb3); + + k = (temp & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0; + + if (j != k) + break; + + event = XHCI_TRB_3_TYPE_GET(temp); + + DPRINTFN(10, "event[%u] = %u (0x%016llx 0x%08lx 0x%08lx)\n", + i, event, (long long)le64toh(phwr->hwr_events[i].qwTrb0), + (long)le32toh(phwr->hwr_events[i].dwTrb2), + (long)le32toh(phwr->hwr_events[i].dwTrb3)); + + switch (event) { + case XHCI_TRB_EVENT_TRANSFER: + xhci_check_transfer(sc, &phwr->hwr_events[i]); + break; + case XHCI_TRB_EVENT_CMD_COMPLETE: + retval |= xhci_check_command(sc, &phwr->hwr_events[i]); + break; + default: + DPRINTF("Unhandled event = %u\n", event); + break; + } + + i++; + + if (i == XHCI_MAX_EVENTS) { + i = 0; + j ^= 1; + + /* check for timeout */ + if (!--t) + break; + } + } + + sc->sc_event_idx = i; + sc->sc_event_ccs = j; + + /* + * NOTE: The Event Ring Dequeue Pointer Register is 64-bit + * latched. That means to activate the register we need to + * write both the low and high double word of the 64-bit + * register. + */ + + addr = buf_res.physaddr; + addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_events[i]; + + /* try to clear busy bit */ + addr |= XHCI_ERDP_LO_BUSY; + + XWRITE4(sc, runt, XHCI_ERDP_LO(0), (uint32_t)addr); + XWRITE4(sc, runt, XHCI_ERDP_HI(0), (uint32_t)(addr >> 32)); + + return (retval); +} + +static usb_error_t +xhci_do_command(struct xhci_softc *sc, struct xhci_trb *trb, + uint16_t timeout_ms) +{ + struct usb_page_search buf_res; + struct xhci_hw_root *phwr; + uint64_t addr; + uint32_t temp; + uint8_t i; + uint8_t j; + uint8_t timeout = 0; + int err; + + XHCI_CMD_ASSERT_LOCKED(sc); + + /* get hardware root structure */ + + usbd_get_page(&sc->sc_hw.root_pc, 0, &buf_res); + + phwr = buf_res.buffer; + + /* Queue command */ + + USB_BUS_LOCK(&sc->sc_bus); +retry: + i = sc->sc_command_idx; + j = sc->sc_command_ccs; + + DPRINTFN(10, "command[%u] = %u (0x%016llx, 0x%08lx, 0x%08lx)\n", + i, XHCI_TRB_3_TYPE_GET(le32toh(trb->dwTrb3)), + (long long)le64toh(trb->qwTrb0), + (long)le32toh(trb->dwTrb2), + (long)le32toh(trb->dwTrb3)); + + phwr->hwr_commands[i].qwTrb0 = trb->qwTrb0; + phwr->hwr_commands[i].dwTrb2 = trb->dwTrb2; + + usb_pc_cpu_flush(&sc->sc_hw.root_pc); + + temp = trb->dwTrb3; + + if (j) + temp |= htole32(XHCI_TRB_3_CYCLE_BIT); + else + temp &= ~htole32(XHCI_TRB_3_CYCLE_BIT); + + temp &= ~htole32(XHCI_TRB_3_TC_BIT); + + phwr->hwr_commands[i].dwTrb3 = temp; + + usb_pc_cpu_flush(&sc->sc_hw.root_pc); + + addr = buf_res.physaddr; + addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_commands[i]; + + sc->sc_cmd_addr = htole64(addr); + + i++; + + if (i == (XHCI_MAX_COMMANDS - 1)) { + + if (j) { + temp = htole32(XHCI_TRB_3_TC_BIT | + XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | + XHCI_TRB_3_CYCLE_BIT); + } else { + temp = htole32(XHCI_TRB_3_TC_BIT | + XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK)); + } + + phwr->hwr_commands[i].dwTrb3 = temp; + + usb_pc_cpu_flush(&sc->sc_hw.root_pc); + + i = 0; + j ^= 1; + } + + sc->sc_command_idx = i; + sc->sc_command_ccs = j; + + XWRITE4(sc, door, XHCI_DOORBELL(0), 0); + + err = cv_timedwait(&sc->sc_cmd_cv, &sc->sc_bus.bus_mtx, + USB_MS_TO_TICKS(timeout_ms)); + + /* + * In some error cases event interrupts are not generated. + * Poll one time to see if the command has completed. + */ + if (err != 0 && xhci_interrupt_poll(sc) != 0) { + DPRINTF("Command was completed when polling\n"); + err = 0; + } + if (err != 0) { + DPRINTF("Command timeout!\n"); + /* + * After some weeks of continuous operation, it has + * been observed that the ASMedia Technology, ASM1042 + * SuperSpeed USB Host Controller can suddenly stop + * accepting commands via the command queue. Try to + * first reset the command queue. If that fails do a + * host controller reset. + */ + if (timeout == 0 && + xhci_reset_command_queue_locked(sc) == 0) { + temp = le32toh(trb->dwTrb3); + + /* + * Avoid infinite XHCI reset loops if the set + * address command fails to respond due to a + * non-enumerating device: + */ + if (XHCI_TRB_3_TYPE_GET(temp) == XHCI_TRB_TYPE_ADDRESS_DEVICE && + (temp & XHCI_TRB_3_BSR_BIT) == 0) { + DPRINTF("Set address timeout\n"); + } else { + timeout = 1; + goto retry; + } + } else { + DPRINTF("Controller reset!\n"); + usb_bus_reset_async_locked(&sc->sc_bus); + } + err = USB_ERR_TIMEOUT; + trb->dwTrb2 = 0; + trb->dwTrb3 = 0; + } else { + temp = le32toh(sc->sc_cmd_result[0]); + if (XHCI_TRB_2_ERROR_GET(temp) != XHCI_TRB_ERROR_SUCCESS) + err = USB_ERR_IOERROR; + + trb->dwTrb2 = sc->sc_cmd_result[0]; + trb->dwTrb3 = sc->sc_cmd_result[1]; + } + + USB_BUS_UNLOCK(&sc->sc_bus); + + return (err); +} + +#if 0 +static usb_error_t +xhci_cmd_nop(struct xhci_softc *sc) +{ + struct xhci_trb trb; + uint32_t temp; + + DPRINTF("\n"); + + trb.qwTrb0 = 0; + trb.dwTrb2 = 0; + temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NOOP); + + trb.dwTrb3 = htole32(temp); + + return (xhci_do_command(sc, &trb, 100 /* ms */)); +} +#endif + +static usb_error_t +xhci_cmd_enable_slot(struct xhci_softc *sc, uint8_t *pslot) +{ + struct xhci_trb trb; + uint32_t temp; + usb_error_t err; + + DPRINTF("\n"); + + trb.qwTrb0 = 0; + trb.dwTrb2 = 0; + trb.dwTrb3 = htole32(XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT)); + + err = xhci_do_command(sc, &trb, 100 /* ms */); + if (err) + goto done; + + temp = le32toh(trb.dwTrb3); + + *pslot = XHCI_TRB_3_SLOT_GET(temp); + +done: + return (err); +} + +static usb_error_t +xhci_cmd_disable_slot(struct xhci_softc *sc, uint8_t slot_id) +{ + struct xhci_trb trb; + uint32_t temp; + + DPRINTF("\n"); + + trb.qwTrb0 = 0; + trb.dwTrb2 = 0; + temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT) | + XHCI_TRB_3_SLOT_SET(slot_id); + + trb.dwTrb3 = htole32(temp); + + return (xhci_do_command(sc, &trb, 100 /* ms */)); +} + +static usb_error_t +xhci_cmd_set_address(struct xhci_softc *sc, uint64_t input_ctx, + uint8_t bsr, uint8_t slot_id) +{ + struct xhci_trb trb; + uint32_t temp; + + DPRINTF("\n"); + + trb.qwTrb0 = htole64(input_ctx); + trb.dwTrb2 = 0; + temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) | + XHCI_TRB_3_SLOT_SET(slot_id); + + if (bsr) + temp |= XHCI_TRB_3_BSR_BIT; + + trb.dwTrb3 = htole32(temp); + + return (xhci_do_command(sc, &trb, 500 /* ms */)); +} + +static usb_error_t +xhci_set_address(struct usb_device *udev, struct mtx *mtx, uint16_t address) +{ + struct usb_page_search buf_inp; + struct usb_page_search buf_dev; + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + struct xhci_hw_dev *hdev; + struct xhci_dev_ctx *pdev; + struct xhci_endpoint_ext *pepext; + uint32_t temp; + uint16_t mps; + usb_error_t err; + uint8_t index; + + /* the root HUB case is not handled here */ + if (udev->parent_hub == NULL) + return (USB_ERR_INVAL); + + index = udev->controller_slot_id; + + hdev = &sc->sc_hw.devs[index]; + + if (mtx != NULL) + mtx_unlock(mtx); + + XHCI_CMD_LOCK(sc); + + switch (hdev->state) { + case XHCI_ST_DEFAULT: + case XHCI_ST_ENABLED: + + hdev->state = XHCI_ST_ENABLED; + + /* set configure mask to slot and EP0 */ + xhci_configure_mask(udev, 3, 0); + + /* configure input slot context structure */ + err = xhci_configure_device(udev); + + if (err != 0) { + DPRINTF("Could not configure device\n"); + break; + } + + /* configure input endpoint context structure */ + switch (udev->speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + mps = 8; + break; + case USB_SPEED_HIGH: + mps = 64; + break; + default: + mps = 512; + break; + } + + pepext = xhci_get_endpoint_ext(udev, + &udev->ctrl_ep_desc); + + /* ensure the control endpoint is setup again */ + USB_BUS_LOCK(udev->bus); + pepext->trb_halted = 1; + pepext->trb_running = 0; + USB_BUS_UNLOCK(udev->bus); + + err = xhci_configure_endpoint(udev, + &udev->ctrl_ep_desc, pepext, + 0, 1, 1, 0, mps, mps, USB_EP_MODE_DEFAULT); + + if (err != 0) { + DPRINTF("Could not configure default endpoint\n"); + break; + } + + /* execute set address command */ + usbd_get_page(&hdev->input_pc, 0, &buf_inp); + + err = xhci_cmd_set_address(sc, buf_inp.physaddr, + (address == 0), index); + + if (err != 0) { + temp = le32toh(sc->sc_cmd_result[0]); + if (address == 0 && sc->sc_port_route != NULL && + XHCI_TRB_2_ERROR_GET(temp) == + XHCI_TRB_ERROR_PARAMETER) { + /* LynxPoint XHCI - ports are not switchable */ + /* Un-route all ports from the XHCI */ + sc->sc_port_route(sc->sc_bus.parent, 0, ~0); + } + DPRINTF("Could not set address " + "for slot %u.\n", index); + if (address != 0) + break; + } + + /* update device address to new value */ + + usbd_get_page(&hdev->device_pc, 0, &buf_dev); + pdev = buf_dev.buffer; + usb_pc_cpu_invalidate(&hdev->device_pc); + + temp = xhci_ctx_get_le32(sc, &pdev->ctx_slot.dwSctx3); + udev->address = XHCI_SCTX_3_DEV_ADDR_GET(temp); + + /* update device state to new value */ + + if (address != 0) + hdev->state = XHCI_ST_ADDRESSED; + else + hdev->state = XHCI_ST_DEFAULT; + break; + + default: + DPRINTF("Wrong state for set address.\n"); + err = USB_ERR_IOERROR; + break; + } + XHCI_CMD_UNLOCK(sc); + + if (mtx != NULL) + mtx_lock(mtx); + + return (err); +} + +static usb_error_t +xhci_cmd_configure_ep(struct xhci_softc *sc, uint64_t input_ctx, + uint8_t deconfigure, uint8_t slot_id) +{ + struct xhci_trb trb; + uint32_t temp; + + DPRINTF("\n"); + + trb.qwTrb0 = htole64(input_ctx); + trb.dwTrb2 = 0; + temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP) | + XHCI_TRB_3_SLOT_SET(slot_id); + + if (deconfigure) + temp |= XHCI_TRB_3_DCEP_BIT; + + trb.dwTrb3 = htole32(temp); + + return (xhci_do_command(sc, &trb, 100 /* ms */)); +} + +static usb_error_t +xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint64_t input_ctx, + uint8_t slot_id) +{ + struct xhci_trb trb; + uint32_t temp; + + DPRINTF("\n"); + + trb.qwTrb0 = htole64(input_ctx); + trb.dwTrb2 = 0; + temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX) | + XHCI_TRB_3_SLOT_SET(slot_id); + trb.dwTrb3 = htole32(temp); + + return (xhci_do_command(sc, &trb, 100 /* ms */)); +} + +static usb_error_t +xhci_cmd_reset_ep(struct xhci_softc *sc, uint8_t preserve, + uint8_t ep_id, uint8_t slot_id) +{ + struct xhci_trb trb; + uint32_t temp; + + DPRINTF("\n"); + + trb.qwTrb0 = 0; + trb.dwTrb2 = 0; + temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP) | + XHCI_TRB_3_SLOT_SET(slot_id) | + XHCI_TRB_3_EP_SET(ep_id); + + if (preserve) + temp |= XHCI_TRB_3_PRSV_BIT; + + trb.dwTrb3 = htole32(temp); + + return (xhci_do_command(sc, &trb, 100 /* ms */)); +} + +static usb_error_t +xhci_cmd_set_tr_dequeue_ptr(struct xhci_softc *sc, uint64_t dequeue_ptr, + uint16_t stream_id, uint8_t ep_id, uint8_t slot_id) +{ + struct xhci_trb trb; + uint32_t temp; + + DPRINTF("\n"); + + trb.qwTrb0 = htole64(dequeue_ptr); + + temp = XHCI_TRB_2_STREAM_SET(stream_id); + trb.dwTrb2 = htole32(temp); + + temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE) | + XHCI_TRB_3_SLOT_SET(slot_id) | + XHCI_TRB_3_EP_SET(ep_id); + trb.dwTrb3 = htole32(temp); + + return (xhci_do_command(sc, &trb, 100 /* ms */)); +} + +static usb_error_t +xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t suspend, + uint8_t ep_id, uint8_t slot_id) +{ + struct xhci_trb trb; + uint32_t temp; + + DPRINTF("\n"); + + trb.qwTrb0 = 0; + trb.dwTrb2 = 0; + temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) | + XHCI_TRB_3_SLOT_SET(slot_id) | + XHCI_TRB_3_EP_SET(ep_id); + + if (suspend) + temp |= XHCI_TRB_3_SUSP_EP_BIT; + + trb.dwTrb3 = htole32(temp); + + return (xhci_do_command(sc, &trb, 100 /* ms */)); +} + +static usb_error_t +xhci_cmd_reset_dev(struct xhci_softc *sc, uint8_t slot_id) +{ + struct xhci_trb trb; + uint32_t temp; + + DPRINTF("\n"); + + trb.qwTrb0 = 0; + trb.dwTrb2 = 0; + temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_DEVICE) | + XHCI_TRB_3_SLOT_SET(slot_id); + + trb.dwTrb3 = htole32(temp); + + return (xhci_do_command(sc, &trb, 100 /* ms */)); +} + +/*------------------------------------------------------------------------* + * xhci_interrupt - XHCI interrupt handler + *------------------------------------------------------------------------*/ +void +xhci_interrupt(struct xhci_softc *sc) +{ + uint32_t status; + uint32_t temp; + + USB_BUS_LOCK(&sc->sc_bus); + + status = XREAD4(sc, oper, XHCI_USBSTS); + + /* acknowledge interrupts, if any */ + if (status != 0) { + XWRITE4(sc, oper, XHCI_USBSTS, status); + DPRINTFN(16, "real interrupt (status=0x%08x)\n", status); + } + + temp = XREAD4(sc, runt, XHCI_IMAN(0)); + + /* force clearing of pending interrupts */ + if (temp & XHCI_IMAN_INTR_PEND) + XWRITE4(sc, runt, XHCI_IMAN(0), temp); + + /* check for event(s) */ + xhci_interrupt_poll(sc); + + if (status & (XHCI_STS_PCD | XHCI_STS_HCH | + XHCI_STS_HSE | XHCI_STS_HCE)) { + + if (status & XHCI_STS_PCD) { + xhci_root_intr(sc); + } + + if (status & XHCI_STS_HCH) { + printf("%s: host controller halted\n", + __FUNCTION__); + } + + if (status & XHCI_STS_HSE) { + printf("%s: host system error\n", + __FUNCTION__); + } + + if (status & XHCI_STS_HCE) { + printf("%s: host controller error\n", + __FUNCTION__); + } + } + USB_BUS_UNLOCK(&sc->sc_bus); +} + +/*------------------------------------------------------------------------* + * xhci_timeout - XHCI timeout handler + *------------------------------------------------------------------------*/ +static void +xhci_timeout(void *arg) +{ + struct usb_xfer *xfer = arg; + + DPRINTF("xfer=%p\n", xfer); + + USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); + + /* transfer is transferred */ + xhci_device_done(xfer, USB_ERR_TIMEOUT); +} + +static void +xhci_do_poll(struct usb_bus *bus) +{ + struct xhci_softc *sc = XHCI_BUS2SC(bus); + + USB_BUS_LOCK(&sc->sc_bus); + xhci_interrupt_poll(sc); + USB_BUS_UNLOCK(&sc->sc_bus); +} + +static void +xhci_setup_generic_chain_sub(struct xhci_std_temp *temp) +{ + struct usb_page_search buf_res; + struct xhci_td *td; + struct xhci_td *td_next; + struct xhci_td *td_alt_next; + struct xhci_td *td_first; + uint32_t buf_offset; + uint32_t average; + uint32_t len_old; + uint32_t npkt_off; + uint32_t dword; + uint8_t shortpkt_old; + uint8_t precompute; + uint8_t x; + + td_alt_next = NULL; + buf_offset = 0; + shortpkt_old = temp->shortpkt; + len_old = temp->len; + npkt_off = 0; + precompute = 1; + +restart: + + td = temp->td; + td_next = td_first = temp->td_next; + + while (1) { + + if (temp->len == 0) { + + if (temp->shortpkt) + break; + + /* send a Zero Length Packet, ZLP, last */ + + temp->shortpkt = 1; + average = 0; + + } else { + + average = temp->average; + + if (temp->len < average) { + if (temp->len % temp->max_packet_size) { + temp->shortpkt = 1; + } + average = temp->len; + } + } + + if (td_next == NULL) + panic("%s: out of XHCI transfer descriptors!", __FUNCTION__); + + /* get next TD */ + + td = td_next; + td_next = td->obj_next; + + /* check if we are pre-computing */ + + if (precompute) { + + /* update remaining length */ + + temp->len -= average; + + continue; + } + /* fill out current TD */ + + td->len = average; + td->remainder = 0; + td->status = 0; + + /* update remaining length */ + + temp->len -= average; + + /* reset TRB index */ + + x = 0; + + if (temp->trb_type == XHCI_TRB_TYPE_SETUP_STAGE) { + /* immediate data */ + + if (average > 8) + average = 8; + + td->td_trb[0].qwTrb0 = 0; + + usbd_copy_out(temp->pc, temp->offset + buf_offset, + (uint8_t *)(uintptr_t)&td->td_trb[0].qwTrb0, + average); + + dword = XHCI_TRB_2_BYTES_SET(8) | + XHCI_TRB_2_TDSZ_SET(0) | + XHCI_TRB_2_IRQ_SET(0); + + td->td_trb[0].dwTrb2 = htole32(dword); + + dword = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) | + XHCI_TRB_3_IDT_BIT | XHCI_TRB_3_CYCLE_BIT; + + /* check wLength */ + if (td->td_trb[0].qwTrb0 & + htole64(XHCI_TRB_0_WLENGTH_MASK)) { + if (td->td_trb[0].qwTrb0 & + htole64(XHCI_TRB_0_DIR_IN_MASK)) + dword |= XHCI_TRB_3_TRT_IN; + else + dword |= XHCI_TRB_3_TRT_OUT; + } + + td->td_trb[0].dwTrb3 = htole32(dword); +#ifdef USB_DEBUG + xhci_dump_trb(&td->td_trb[x]); +#endif + x++; + + } else do { + + uint32_t npkt; + + /* fill out buffer pointers */ + + if (average == 0) { + memset(&buf_res, 0, sizeof(buf_res)); + } else { + usbd_get_page(temp->pc, temp->offset + + buf_offset, &buf_res); + + /* get length to end of page */ + if (buf_res.length > average) + buf_res.length = average; + + /* check for maximum length */ + if (buf_res.length > XHCI_TD_PAGE_SIZE) + buf_res.length = XHCI_TD_PAGE_SIZE; + + npkt_off += buf_res.length; + } + + /* set up npkt */ + npkt = howmany(len_old - npkt_off, + temp->max_packet_size); + + if (npkt == 0) + npkt = 1; + else if (npkt > 31) + npkt = 31; + + /* fill out TRB's */ + td->td_trb[x].qwTrb0 = + htole64((uint64_t)buf_res.physaddr); + + dword = + XHCI_TRB_2_BYTES_SET(buf_res.length) | + XHCI_TRB_2_TDSZ_SET(npkt) | + XHCI_TRB_2_IRQ_SET(0); + + td->td_trb[x].dwTrb2 = htole32(dword); + + switch (temp->trb_type) { + case XHCI_TRB_TYPE_ISOCH: + dword = XHCI_TRB_3_CHAIN_BIT | XHCI_TRB_3_CYCLE_BIT | + XHCI_TRB_3_TBC_SET(temp->tbc) | + XHCI_TRB_3_TLBPC_SET(temp->tlbpc); + if (td != td_first) { + dword |= XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL); + } else if (temp->do_isoc_sync != 0) { + temp->do_isoc_sync = 0; + /* wait until "isoc_frame" */ + dword |= XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | + XHCI_TRB_3_FRID_SET(temp->isoc_frame / 8); + } else { + /* start data transfer at next interval */ + dword |= XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | + XHCI_TRB_3_ISO_SIA_BIT; + } + if (temp->direction == UE_DIR_IN) + dword |= XHCI_TRB_3_ISP_BIT; + break; + case XHCI_TRB_TYPE_DATA_STAGE: + dword = XHCI_TRB_3_CHAIN_BIT | XHCI_TRB_3_CYCLE_BIT | + XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE); + if (temp->direction == UE_DIR_IN) + dword |= XHCI_TRB_3_DIR_IN | XHCI_TRB_3_ISP_BIT; + /* + * Section 3.2.9 in the XHCI + * specification about control + * transfers says that we should use a + * normal-TRB if there are more TRBs + * extending the data-stage + * TRB. Update the "trb_type". + */ + temp->trb_type = XHCI_TRB_TYPE_NORMAL; + break; + case XHCI_TRB_TYPE_STATUS_STAGE: + dword = XHCI_TRB_3_CHAIN_BIT | XHCI_TRB_3_CYCLE_BIT | + XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE); + if (temp->direction == UE_DIR_IN) + dword |= XHCI_TRB_3_DIR_IN; + break; + default: /* XHCI_TRB_TYPE_NORMAL */ + dword = XHCI_TRB_3_CHAIN_BIT | XHCI_TRB_3_CYCLE_BIT | + XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL); + if (temp->direction == UE_DIR_IN) + dword |= XHCI_TRB_3_ISP_BIT; + break; + } + td->td_trb[x].dwTrb3 = htole32(dword); + + average -= buf_res.length; + buf_offset += buf_res.length; +#ifdef USB_DEBUG + xhci_dump_trb(&td->td_trb[x]); +#endif + x++; + + } while (average != 0); + + td->td_trb[x-1].dwTrb3 |= htole32(XHCI_TRB_3_IOC_BIT); + + /* store number of data TRB's */ + + td->ntrb = x; + + DPRINTF("NTRB=%u\n", x); + + /* fill out link TRB */ + + if (td_next != NULL) { + /* link the current TD with the next one */ + td->td_trb[x].qwTrb0 = htole64((uint64_t)td_next->td_self); + DPRINTF("LINK=0x%08llx\n", (long long)td_next->td_self); + } else { + /* this field will get updated later */ + DPRINTF("NOLINK\n"); + } + + dword = XHCI_TRB_2_IRQ_SET(0); + + td->td_trb[x].dwTrb2 = htole32(dword); + + dword = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | + XHCI_TRB_3_CYCLE_BIT | XHCI_TRB_3_IOC_BIT | + /* + * CHAIN-BIT: Ensure that a multi-TRB IN-endpoint + * frame only receives a single short packet event + * by setting the CHAIN bit in the LINK field. In + * addition some XHCI controllers have problems + * sending a ZLP unless the CHAIN-BIT is set in + * the LINK TRB. + */ + XHCI_TRB_3_CHAIN_BIT; + + td->td_trb[x].dwTrb3 = htole32(dword); + + td->alt_next = td_alt_next; +#ifdef USB_DEBUG + xhci_dump_trb(&td->td_trb[x]); +#endif + usb_pc_cpu_flush(td->page_cache); + } + + if (precompute) { + precompute = 0; + + /* set up alt next pointer, if any */ + if (temp->last_frame) { + td_alt_next = NULL; + } else { + /* we use this field internally */ + td_alt_next = td_next; + } + + /* restore */ + temp->shortpkt = shortpkt_old; + temp->len = len_old; + goto restart; + } + + /* + * Remove cycle bit from the first TRB if we are + * stepping them: + */ + if (temp->step_td != 0) { + td_first->td_trb[0].dwTrb3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT); + usb_pc_cpu_flush(td_first->page_cache); + } + + /* clear TD SIZE to zero, hence this is the last TRB */ + /* remove chain bit because this is the last data TRB in the chain */ + td->td_trb[td->ntrb - 1].dwTrb2 &= ~htole32(XHCI_TRB_2_TDSZ_SET(15)); + td->td_trb[td->ntrb - 1].dwTrb3 &= ~htole32(XHCI_TRB_3_CHAIN_BIT); + /* remove CHAIN-BIT from last LINK TRB */ + td->td_trb[td->ntrb].dwTrb3 &= ~htole32(XHCI_TRB_3_CHAIN_BIT); + + usb_pc_cpu_flush(td->page_cache); + + temp->td = td; + temp->td_next = td_next; +} + +static void +xhci_setup_generic_chain(struct usb_xfer *xfer) +{ + struct xhci_std_temp temp; + struct xhci_td *td; + uint32_t x; + uint32_t y; + uint8_t mult; + + temp.do_isoc_sync = 0; + temp.step_td = 0; + temp.tbc = 0; + temp.tlbpc = 0; + temp.average = xfer->max_hc_frame_size; + temp.max_packet_size = xfer->max_packet_size; + temp.sc = XHCI_BUS2SC(xfer->xroot->bus); + temp.pc = NULL; + temp.last_frame = 0; + temp.offset = 0; + temp.multishort = xfer->flags_int.isochronous_xfr || + xfer->flags_int.control_xfr || + xfer->flags_int.short_frames_ok; + + /* toggle the DMA set we are using */ + xfer->flags_int.curr_dma_set ^= 1; + + /* get next DMA set */ + td = xfer->td_start[xfer->flags_int.curr_dma_set]; + + temp.td = NULL; + temp.td_next = td; + + xfer->td_transfer_first = td; + xfer->td_transfer_cache = td; + + if (xfer->flags_int.isochronous_xfr) { + uint8_t shift; + + /* compute multiplier for ISOCHRONOUS transfers */ + mult = xfer->endpoint->ecomp ? + UE_GET_SS_ISO_MULT(xfer->endpoint->ecomp->bmAttributes) + : 0; + /* check for USB 2.0 multiplier */ + if (mult == 0) { + mult = (xfer->endpoint->edesc-> + wMaxPacketSize[1] >> 3) & 3; + } + /* range check */ + if (mult > 2) + mult = 3; + else + mult++; + + x = XREAD4(temp.sc, runt, XHCI_MFINDEX); + + DPRINTF("MFINDEX=0x%08x\n", x); + + switch (usbd_get_speed(xfer->xroot->udev)) { + case USB_SPEED_FULL: + shift = 3; + temp.isoc_delta = 8; /* 1ms */ + x += temp.isoc_delta - 1; + x &= ~(temp.isoc_delta - 1); + break; + default: + shift = usbd_xfer_get_fps_shift(xfer); + temp.isoc_delta = 1U << shift; + x += temp.isoc_delta - 1; + x &= ~(temp.isoc_delta - 1); + /* simple frame load balancing */ + x += xfer->endpoint->usb_uframe; + break; + } + + y = XHCI_MFINDEX_GET(x - xfer->endpoint->isoc_next); + + if ((xfer->endpoint->is_synced == 0) || + (y < (xfer->nframes << shift)) || + (XHCI_MFINDEX_GET(-y) >= (128 * 8))) { + /* + * If there is data underflow or the pipe + * queue is empty we schedule the transfer a + * few frames ahead of the current frame + * position. Else two isochronous transfers + * might overlap. + */ + xfer->endpoint->isoc_next = XHCI_MFINDEX_GET(x + (3 * 8)); + xfer->endpoint->is_synced = 1; + temp.do_isoc_sync = 1; + + DPRINTFN(3, "start next=%d\n", xfer->endpoint->isoc_next); + } + + /* compute isochronous completion time */ + + y = XHCI_MFINDEX_GET(xfer->endpoint->isoc_next - (x & ~7)); + + xfer->isoc_time_complete = + usb_isoc_time_expand(&temp.sc->sc_bus, x / 8) + + (y / 8) + (((xfer->nframes << shift) + 7) / 8); + + x = 0; + temp.isoc_frame = xfer->endpoint->isoc_next; + temp.trb_type = XHCI_TRB_TYPE_ISOCH; + + xfer->endpoint->isoc_next += xfer->nframes << shift; + + } else if (xfer->flags_int.control_xfr) { + + /* check if we should prepend a setup message */ + + if (xfer->flags_int.control_hdr) { + + temp.len = xfer->frlengths[0]; + temp.pc = xfer->frbuffers + 0; + temp.shortpkt = temp.len ? 1 : 0; + temp.trb_type = XHCI_TRB_TYPE_SETUP_STAGE; + temp.direction = 0; + + /* check for last frame */ + if (xfer->nframes == 1) { + /* no STATUS stage yet, SETUP is last */ + if (xfer->flags_int.control_act) + temp.last_frame = 1; + } + + xhci_setup_generic_chain_sub(&temp); + } + x = 1; + mult = 1; + temp.isoc_delta = 0; + temp.isoc_frame = 0; + temp.trb_type = xfer->flags_int.control_did_data ? + XHCI_TRB_TYPE_NORMAL : XHCI_TRB_TYPE_DATA_STAGE; + } else { + x = 0; + mult = 1; + temp.isoc_delta = 0; + temp.isoc_frame = 0; + temp.trb_type = XHCI_TRB_TYPE_NORMAL; + } + + if (x != xfer->nframes) { + /* set up page_cache pointer */ + temp.pc = xfer->frbuffers + x; + /* set endpoint direction */ + temp.direction = UE_GET_DIR(xfer->endpointno); + } + + while (x != xfer->nframes) { + + /* DATA0 / DATA1 message */ + + temp.len = xfer->frlengths[x]; + temp.step_td = ((xfer->endpointno & UE_DIR_IN) && + x != 0 && temp.multishort == 0); + + x++; + + if (x == xfer->nframes) { + if (xfer->flags_int.control_xfr) { + /* no STATUS stage yet, DATA is last */ + if (xfer->flags_int.control_act) + temp.last_frame = 1; + } else { + temp.last_frame = 1; + } + } + if (temp.len == 0) { + + /* make sure that we send an USB packet */ + + temp.shortpkt = 0; + + temp.tbc = 0; + temp.tlbpc = mult - 1; + + } else if (xfer->flags_int.isochronous_xfr) { + + uint8_t tdpc; + + /* + * Isochronous transfers don't have short + * packet termination: + */ + + temp.shortpkt = 1; + + /* isochronous transfers have a transfer limit */ + + if (temp.len > xfer->max_frame_size) + temp.len = xfer->max_frame_size; + + /* compute TD packet count */ + tdpc = howmany(temp.len, xfer->max_packet_size); + + temp.tbc = howmany(tdpc, mult) - 1; + temp.tlbpc = (tdpc % mult); + + if (temp.tlbpc == 0) + temp.tlbpc = mult - 1; + else + temp.tlbpc--; + } else { + + /* regular data transfer */ + + temp.shortpkt = xfer->flags.force_short_xfer ? 0 : 1; + } + + xhci_setup_generic_chain_sub(&temp); + + if (xfer->flags_int.isochronous_xfr) { + temp.offset += xfer->frlengths[x - 1]; + temp.isoc_frame += temp.isoc_delta; + } else { + /* get next Page Cache pointer */ + temp.pc = xfer->frbuffers + x; + } + } + + /* check if we should append a status stage */ + + if (xfer->flags_int.control_xfr && + !xfer->flags_int.control_act) { + + /* + * Send a DATA1 message and invert the current + * endpoint direction. + */ + if (xhcictlstep || temp.sc->sc_ctlstep) { + /* + * Some XHCI controllers will not delay the + * status stage until the next SOF. Force this + * behaviour to avoid failed control + * transfers. + */ + temp.step_td = (xfer->nframes != 0); + } else { + temp.step_td = 0; + } + temp.direction = UE_GET_DIR(xfer->endpointno) ^ UE_DIR_IN; + temp.len = 0; + temp.pc = NULL; + temp.shortpkt = 0; + temp.last_frame = 1; + temp.trb_type = XHCI_TRB_TYPE_STATUS_STAGE; + + xhci_setup_generic_chain_sub(&temp); + } + + td = temp.td; + + /* must have at least one frame! */ + + xfer->td_transfer_last = td; + + DPRINTF("first=%p last=%p\n", xfer->td_transfer_first, td); +} + +static void +xhci_set_slot_pointer(struct xhci_softc *sc, uint8_t index, uint64_t dev_addr) +{ + struct usb_page_search buf_res; + struct xhci_dev_ctx_addr *pdctxa; + + usbd_get_page(&sc->sc_hw.ctx_pc, 0, &buf_res); + + pdctxa = buf_res.buffer; + + DPRINTF("addr[%u]=0x%016llx\n", index, (long long)dev_addr); + + pdctxa->qwBaaDevCtxAddr[index] = htole64(dev_addr); + + usb_pc_cpu_flush(&sc->sc_hw.ctx_pc); +} + +static usb_error_t +xhci_configure_mask(struct usb_device *udev, uint32_t mask, uint8_t drop) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + struct usb_page_search buf_inp; + struct xhci_input_dev_ctx *pinp; + uint32_t temp; + uint8_t index; + uint8_t x; + + index = udev->controller_slot_id; + + usbd_get_page(&sc->sc_hw.devs[index].input_pc, 0, &buf_inp); + + pinp = buf_inp.buffer; + + if (drop) { + mask &= XHCI_INCTX_NON_CTRL_MASK; + xhci_ctx_set_le32(sc, &pinp->ctx_input.dwInCtx0, mask); + xhci_ctx_set_le32(sc, &pinp->ctx_input.dwInCtx1, 0); + } else { + /* + * Some hardware requires that we drop the endpoint + * context before adding it again: + */ + xhci_ctx_set_le32(sc, &pinp->ctx_input.dwInCtx0, + mask & XHCI_INCTX_NON_CTRL_MASK); + + /* Add new endpoint context */ + xhci_ctx_set_le32(sc, &pinp->ctx_input.dwInCtx1, mask); + + /* find most significant set bit */ + for (x = 31; x != 1; x--) { + if (mask & (1 << x)) + break; + } + + /* adjust */ + x--; + + /* figure out the maximum number of contexts */ + if (x > sc->sc_hw.devs[index].context_num) + sc->sc_hw.devs[index].context_num = x; + else + x = sc->sc_hw.devs[index].context_num; + + /* update number of contexts */ + temp = xhci_ctx_get_le32(sc, &pinp->ctx_slot.dwSctx0); + temp &= ~XHCI_SCTX_0_CTX_NUM_SET(31); + temp |= XHCI_SCTX_0_CTX_NUM_SET(x + 1); + xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx0, temp); + } + usb_pc_cpu_flush(&sc->sc_hw.devs[index].input_pc); + return (0); +} + +static usb_error_t +xhci_configure_endpoint(struct usb_device *udev, + struct usb_endpoint_descriptor *edesc, struct xhci_endpoint_ext *pepext, + uint16_t interval, uint8_t max_packet_count, + uint8_t mult, uint8_t fps_shift, uint16_t max_packet_size, + uint16_t max_frame_size, uint8_t ep_mode) +{ + struct usb_page_search buf_inp; + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + struct xhci_input_dev_ctx *pinp; + uint64_t ring_addr = pepext->physaddr; + uint32_t temp; + uint8_t index; + uint8_t epno; + uint8_t type; + + index = udev->controller_slot_id; + + usbd_get_page(&sc->sc_hw.devs[index].input_pc, 0, &buf_inp); + + pinp = buf_inp.buffer; + + epno = edesc->bEndpointAddress; + type = edesc->bmAttributes & UE_XFERTYPE; + + if (type == UE_CONTROL) + epno |= UE_DIR_IN; + + epno = XHCI_EPNO2EPID(epno); + + if (epno == 0) + return (USB_ERR_NO_PIPE); /* invalid */ + + if (max_packet_count == 0) + return (USB_ERR_BAD_BUFSIZE); + + max_packet_count--; + + if (mult == 0) + return (USB_ERR_BAD_BUFSIZE); + + /* store endpoint mode */ + pepext->trb_ep_mode = ep_mode; + /* store bMaxPacketSize for control endpoints */ + pepext->trb_ep_maxp = edesc->wMaxPacketSize[0]; + usb_pc_cpu_flush(pepext->page_cache); + + if (ep_mode == USB_EP_MODE_STREAMS) { + temp = XHCI_EPCTX_0_EPSTATE_SET(0) | + XHCI_EPCTX_0_MAXP_STREAMS_SET(XHCI_MAX_STREAMS_LOG - 1) | + XHCI_EPCTX_0_LSA_SET(1); + + ring_addr += sizeof(struct xhci_trb) * + XHCI_MAX_TRANSFERS * XHCI_MAX_STREAMS; + } else { + temp = XHCI_EPCTX_0_EPSTATE_SET(0) | + XHCI_EPCTX_0_MAXP_STREAMS_SET(0) | + XHCI_EPCTX_0_LSA_SET(0); + + ring_addr |= XHCI_EPCTX_2_DCS_SET(1); + } + + switch (udev->speed) { + case USB_SPEED_FULL: + case USB_SPEED_LOW: + /* 1ms -> 125us */ + fps_shift += 3; + break; + default: + break; + } + + switch (type) { + case UE_INTERRUPT: + if (fps_shift > 3) + fps_shift--; + temp |= XHCI_EPCTX_0_IVAL_SET(fps_shift); + break; + case UE_ISOCHRONOUS: + temp |= XHCI_EPCTX_0_IVAL_SET(fps_shift); + + switch (udev->speed) { + case USB_SPEED_SUPER: + if (mult > 3) + mult = 3; + temp |= XHCI_EPCTX_0_MULT_SET(mult - 1); + max_packet_count /= mult; + break; + default: + break; + } + break; + default: + break; + } + + xhci_ctx_set_le32(sc, &pinp->ctx_ep[epno - 1].dwEpCtx0, temp); + + temp = + XHCI_EPCTX_1_HID_SET(0) | + XHCI_EPCTX_1_MAXB_SET(max_packet_count) | + XHCI_EPCTX_1_MAXP_SIZE_SET(max_packet_size); + + /* + * Always enable the "three strikes and you are gone" feature + * except for ISOCHRONOUS endpoints. This is suggested by + * section 4.3.3 in the XHCI specification about device slot + * initialisation. + */ + if (type != UE_ISOCHRONOUS) + temp |= XHCI_EPCTX_1_CERR_SET(3); + + switch (type) { + case UE_CONTROL: + temp |= XHCI_EPCTX_1_EPTYPE_SET(4); + break; + case UE_ISOCHRONOUS: + temp |= XHCI_EPCTX_1_EPTYPE_SET(1); + break; + case UE_BULK: + temp |= XHCI_EPCTX_1_EPTYPE_SET(2); + break; + default: + temp |= XHCI_EPCTX_1_EPTYPE_SET(3); + break; + } + + /* check for IN direction */ + if (epno & 1) + temp |= XHCI_EPCTX_1_EPTYPE_SET(4); + + xhci_ctx_set_le32(sc, &pinp->ctx_ep[epno - 1].dwEpCtx1, temp); + xhci_ctx_set_le64(sc, &pinp->ctx_ep[epno - 1].qwEpCtx2, ring_addr); + + switch (edesc->bmAttributes & UE_XFERTYPE) { + case UE_INTERRUPT: + case UE_ISOCHRONOUS: + temp = XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(max_frame_size) | + XHCI_EPCTX_4_AVG_TRB_LEN_SET(MIN(XHCI_PAGE_SIZE, + max_frame_size)); + break; + case UE_CONTROL: + temp = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); + break; + default: + temp = XHCI_EPCTX_4_AVG_TRB_LEN_SET(XHCI_PAGE_SIZE); + break; + } + + xhci_ctx_set_le32(sc, &pinp->ctx_ep[epno - 1].dwEpCtx4, temp); + +#ifdef USB_DEBUG + xhci_dump_endpoint(sc, &pinp->ctx_ep[epno - 1]); +#endif + usb_pc_cpu_flush(&sc->sc_hw.devs[index].input_pc); + + return (0); /* success */ +} + +static usb_error_t +xhci_configure_endpoint_by_xfer(struct usb_xfer *xfer) +{ + struct xhci_endpoint_ext *pepext; + struct usb_endpoint_ss_comp_descriptor *ecomp; + usb_stream_t x; + + pepext = xhci_get_endpoint_ext(xfer->xroot->udev, + xfer->endpoint->edesc); + + ecomp = xfer->endpoint->ecomp; + + for (x = 0; x != XHCI_MAX_STREAMS; x++) { + uint64_t temp; + + /* halt any transfers */ + pepext->trb[x * XHCI_MAX_TRANSFERS].dwTrb3 = 0; + + /* compute start of TRB ring for stream "x" */ + temp = pepext->physaddr + + (x * XHCI_MAX_TRANSFERS * sizeof(struct xhci_trb)) + + XHCI_SCTX_0_SCT_SEC_TR_RING; + + /* make tree structure */ + pepext->trb[(XHCI_MAX_TRANSFERS * + XHCI_MAX_STREAMS) + x].qwTrb0 = htole64(temp); + + /* reserved fields */ + pepext->trb[(XHCI_MAX_TRANSFERS * + XHCI_MAX_STREAMS) + x].dwTrb2 = 0; + pepext->trb[(XHCI_MAX_TRANSFERS * + XHCI_MAX_STREAMS) + x].dwTrb3 = 0; + } + usb_pc_cpu_flush(pepext->page_cache); + + return (xhci_configure_endpoint(xfer->xroot->udev, + xfer->endpoint->edesc, pepext, + xfer->interval, xfer->max_packet_count, + (ecomp != NULL) ? UE_GET_SS_ISO_MULT(ecomp->bmAttributes) + 1 : 1, + usbd_xfer_get_fps_shift(xfer), xfer->max_packet_size, + xfer->max_frame_size, xfer->endpoint->ep_mode)); +} + +static usb_error_t +xhci_configure_device(struct usb_device *udev) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + struct usb_page_search buf_inp; + struct usb_page_cache *pcinp; + struct xhci_input_dev_ctx *pinp; + struct usb_device *hubdev; + uint32_t temp; + uint32_t route; + uint32_t rh_port; + uint8_t is_hub; + uint8_t index; + uint8_t depth; + + index = udev->controller_slot_id; + + DPRINTF("index=%u\n", index); + + pcinp = &sc->sc_hw.devs[index].input_pc; + + usbd_get_page(pcinp, 0, &buf_inp); + + pinp = buf_inp.buffer; + + rh_port = 0; + route = 0; + + /* figure out route string and root HUB port number */ + + for (hubdev = udev; hubdev != NULL; hubdev = hubdev->parent_hub) { + + if (hubdev->parent_hub == NULL) + break; + + depth = hubdev->parent_hub->depth; + + /* + * NOTE: HS/FS/LS devices and the SS root HUB can have + * more than 15 ports + */ + + rh_port = hubdev->port_no; + + if (depth == 0) + break; + + if (rh_port > 15) + rh_port = 15; + + if (depth < 6) + route |= rh_port << (4 * (depth - 1)); + } + + DPRINTF("Route=0x%08x\n", route); + + temp = XHCI_SCTX_0_ROUTE_SET(route) | + XHCI_SCTX_0_CTX_NUM_SET( + sc->sc_hw.devs[index].context_num + 1); + + switch (udev->speed) { + case USB_SPEED_LOW: + temp |= XHCI_SCTX_0_SPEED_SET(2); + if (udev->parent_hs_hub != NULL && + udev->parent_hs_hub->ddesc.bDeviceProtocol == + UDPROTO_HSHUBMTT) { + DPRINTF("Device inherits MTT\n"); + temp |= XHCI_SCTX_0_MTT_SET(1); + } + break; + case USB_SPEED_HIGH: + temp |= XHCI_SCTX_0_SPEED_SET(3); + if (sc->sc_hw.devs[index].nports != 0 && + udev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT) { + DPRINTF("HUB supports MTT\n"); + temp |= XHCI_SCTX_0_MTT_SET(1); + } + break; + case USB_SPEED_FULL: + temp |= XHCI_SCTX_0_SPEED_SET(1); + if (udev->parent_hs_hub != NULL && + udev->parent_hs_hub->ddesc.bDeviceProtocol == + UDPROTO_HSHUBMTT) { + DPRINTF("Device inherits MTT\n"); + temp |= XHCI_SCTX_0_MTT_SET(1); + } + break; + default: + temp |= XHCI_SCTX_0_SPEED_SET(4); + break; + } + + is_hub = sc->sc_hw.devs[index].nports != 0 && + (udev->speed == USB_SPEED_SUPER || + udev->speed == USB_SPEED_HIGH); + + if (is_hub) + temp |= XHCI_SCTX_0_HUB_SET(1); + + xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx0, temp); + + temp = XHCI_SCTX_1_RH_PORT_SET(rh_port); + + if (is_hub) { + temp |= XHCI_SCTX_1_NUM_PORTS_SET( + sc->sc_hw.devs[index].nports); + } + + switch (udev->speed) { + case USB_SPEED_SUPER: + switch (sc->sc_hw.devs[index].state) { + case XHCI_ST_ADDRESSED: + case XHCI_ST_CONFIGURED: + /* enable power save */ + temp |= XHCI_SCTX_1_MAX_EL_SET(sc->sc_exit_lat_max); + break; + default: + /* disable power save */ + break; + } + break; + default: + break; + } + + xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx1, temp); + + temp = XHCI_SCTX_2_IRQ_TARGET_SET(0); + + if (is_hub) { + temp |= XHCI_SCTX_2_TT_THINK_TIME_SET( + sc->sc_hw.devs[index].tt); + } + + hubdev = udev->parent_hs_hub; + + /* check if we should activate the transaction translator */ + switch (udev->speed) { + case USB_SPEED_FULL: + case USB_SPEED_LOW: + if (hubdev != NULL) { + temp |= XHCI_SCTX_2_TT_HUB_SID_SET( + hubdev->controller_slot_id); + temp |= XHCI_SCTX_2_TT_PORT_NUM_SET( + udev->hs_port_no); + } + break; + default: + break; + } + + xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx2, temp); + + /* + * These fields should be initialized to zero, according to + * XHCI section 6.2.2 - slot context: + */ + temp = XHCI_SCTX_3_DEV_ADDR_SET(0) | + XHCI_SCTX_3_SLOT_STATE_SET(0); + + xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx3, temp); + +#ifdef USB_DEBUG + xhci_dump_device(sc, &pinp->ctx_slot); +#endif + usb_pc_cpu_flush(pcinp); + + return (0); /* success */ +} + +static usb_error_t +xhci_alloc_device_ext(struct usb_device *udev) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + struct usb_page_search buf_dev; + struct usb_page_search buf_ep; + struct xhci_trb *trb; + struct usb_page_cache *pc; + struct usb_page *pg; + uint64_t addr; + uint8_t index; + uint8_t i; + + index = udev->controller_slot_id; + + pc = &sc->sc_hw.devs[index].device_pc; + pg = &sc->sc_hw.devs[index].device_pg; + + /* need to initialize the page cache */ + pc->tag_parent = sc->sc_bus.dma_parent_tag; + + if (usb_pc_alloc_mem(pc, pg, sc->sc_ctx_is_64_byte ? + (2 * sizeof(struct xhci_dev_ctx)) : + sizeof(struct xhci_dev_ctx), XHCI_PAGE_SIZE)) + goto error; + + usbd_get_page(pc, 0, &buf_dev); + + pc = &sc->sc_hw.devs[index].input_pc; + pg = &sc->sc_hw.devs[index].input_pg; + + /* need to initialize the page cache */ + pc->tag_parent = sc->sc_bus.dma_parent_tag; + + if (usb_pc_alloc_mem(pc, pg, sc->sc_ctx_is_64_byte ? + (2 * sizeof(struct xhci_input_dev_ctx)) : + sizeof(struct xhci_input_dev_ctx), XHCI_PAGE_SIZE)) { + goto error; + } + + /* initialize all endpoint LINK TRBs */ + + for (i = 0; i != XHCI_MAX_ENDPOINTS; i++) { + + pc = &sc->sc_hw.devs[index].endpoint_pc[i]; + pg = &sc->sc_hw.devs[index].endpoint_pg[i]; + + /* need to initialize the page cache */ + pc->tag_parent = sc->sc_bus.dma_parent_tag; + + if (usb_pc_alloc_mem(pc, pg, + sizeof(struct xhci_dev_endpoint_trbs), XHCI_TRB_ALIGN)) { + goto error; + } + + /* lookup endpoint TRB ring */ + usbd_get_page(pc, 0, &buf_ep); + + /* get TRB pointer */ + trb = buf_ep.buffer; + trb += XHCI_MAX_TRANSFERS - 1; + + /* get TRB start address */ + addr = buf_ep.physaddr; + + /* create LINK TRB */ + trb->qwTrb0 = htole64(addr); + trb->dwTrb2 = htole32(XHCI_TRB_2_IRQ_SET(0)); + trb->dwTrb3 = htole32(XHCI_TRB_3_CYCLE_BIT | + XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK)); + + usb_pc_cpu_flush(pc); + } + + xhci_set_slot_pointer(sc, index, buf_dev.physaddr); + + return (0); + +error: + xhci_free_device_ext(udev); + + return (USB_ERR_NOMEM); +} + +static void +xhci_free_device_ext(struct usb_device *udev) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + uint8_t index; + uint8_t i; + + index = udev->controller_slot_id; + xhci_set_slot_pointer(sc, index, 0); + + usb_pc_free_mem(&sc->sc_hw.devs[index].device_pc); + usb_pc_free_mem(&sc->sc_hw.devs[index].input_pc); + for (i = 0; i != XHCI_MAX_ENDPOINTS; i++) + usb_pc_free_mem(&sc->sc_hw.devs[index].endpoint_pc[i]); +} + +static struct xhci_endpoint_ext * +xhci_get_endpoint_ext(struct usb_device *udev, struct usb_endpoint_descriptor *edesc) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + struct xhci_endpoint_ext *pepext; + struct usb_page_cache *pc; + struct usb_page_search buf_ep; + uint8_t epno; + uint8_t index; + + epno = edesc->bEndpointAddress; + if ((edesc->bmAttributes & UE_XFERTYPE) == UE_CONTROL) + epno |= UE_DIR_IN; + + epno = XHCI_EPNO2EPID(epno); + + index = udev->controller_slot_id; + + pc = &sc->sc_hw.devs[index].endpoint_pc[epno]; + + usbd_get_page(pc, 0, &buf_ep); + + pepext = &sc->sc_hw.devs[index].endp[epno]; + pepext->page_cache = pc; + pepext->trb = buf_ep.buffer; + pepext->physaddr = buf_ep.physaddr; + + return (pepext); +} + +static void +xhci_endpoint_doorbell(struct usb_xfer *xfer) +{ + struct xhci_softc *sc = XHCI_BUS2SC(xfer->xroot->bus); + uint8_t epno; + uint8_t index; + + epno = xfer->endpointno; + if (xfer->flags_int.control_xfr) + epno |= UE_DIR_IN; + + epno = XHCI_EPNO2EPID(epno); + index = xfer->xroot->udev->controller_slot_id; + + if (xfer->xroot->udev->flags.self_suspended == 0) { + XWRITE4(sc, door, XHCI_DOORBELL(index), + epno | XHCI_DB_SID_SET(xfer->stream_id)); + } +} + +static void +xhci_transfer_remove(struct usb_xfer *xfer, usb_error_t error) +{ + struct xhci_endpoint_ext *pepext; + + if (xfer->flags_int.bandwidth_reclaimed) { + xfer->flags_int.bandwidth_reclaimed = 0; + + pepext = xhci_get_endpoint_ext(xfer->xroot->udev, + xfer->endpoint->edesc); + + pepext->trb_used[xfer->stream_id]--; + + pepext->xfer[xfer->qh_pos] = NULL; + + if (error && pepext->trb_running != 0) { + pepext->trb_halted = 1; + pepext->trb_running = 0; + } + } +} + +static usb_error_t +xhci_transfer_insert(struct usb_xfer *xfer) +{ + struct xhci_td *td_first; + struct xhci_td *td_last; + struct xhci_trb *trb_link; + struct xhci_endpoint_ext *pepext; + uint64_t addr; + usb_stream_t id; + uint8_t i; + uint8_t inext; + uint8_t trb_limit; + + DPRINTFN(8, "\n"); + + id = xfer->stream_id; + + /* check if already inserted */ + if (xfer->flags_int.bandwidth_reclaimed) { + DPRINTFN(8, "Already in schedule\n"); + return (0); + } + + pepext = xhci_get_endpoint_ext(xfer->xroot->udev, + xfer->endpoint->edesc); + + td_first = xfer->td_transfer_first; + td_last = xfer->td_transfer_last; + addr = pepext->physaddr; + + switch (xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE) { + case UE_CONTROL: + case UE_INTERRUPT: + /* single buffered */ + trb_limit = 1; + break; + default: + /* multi buffered */ + trb_limit = (XHCI_MAX_TRANSFERS - 2); + break; + } + + if (pepext->trb_used[id] >= trb_limit) { + DPRINTFN(8, "Too many TDs queued.\n"); + return (USB_ERR_NOMEM); + } + + /* check if bMaxPacketSize changed */ + if (xfer->flags_int.control_xfr != 0 && + pepext->trb_ep_maxp != xfer->endpoint->edesc->wMaxPacketSize[0]) { + + DPRINTFN(8, "Reconfigure control endpoint\n"); + + /* force driver to reconfigure endpoint */ + pepext->trb_halted = 1; + pepext->trb_running = 0; + } + + /* check for stopped condition, after putting transfer on interrupt queue */ + if (pepext->trb_running == 0) { + struct xhci_softc *sc = XHCI_BUS2SC(xfer->xroot->bus); + + DPRINTFN(8, "Not running\n"); + + /* start configuration */ + (void)usb_proc_msignal(USB_BUS_CONTROL_XFER_PROC(&sc->sc_bus), + &sc->sc_config_msg[0], &sc->sc_config_msg[1]); + return (0); + } + + pepext->trb_used[id]++; + + /* get current TRB index */ + i = pepext->trb_index[id]; + + /* get next TRB index */ + inext = (i + 1); + + /* the last entry of the ring is a hardcoded link TRB */ + if (inext >= (XHCI_MAX_TRANSFERS - 1)) + inext = 0; + + /* store next TRB index, before stream ID offset is added */ + pepext->trb_index[id] = inext; + + /* offset for stream */ + i += id * XHCI_MAX_TRANSFERS; + inext += id * XHCI_MAX_TRANSFERS; + + /* compute terminating return address */ + addr += (inext * sizeof(struct xhci_trb)); + + /* compute link TRB pointer */ + trb_link = td_last->td_trb + td_last->ntrb; + + /* update next pointer of last link TRB */ + trb_link->qwTrb0 = htole64(addr); + trb_link->dwTrb2 = htole32(XHCI_TRB_2_IRQ_SET(0)); + trb_link->dwTrb3 = htole32(XHCI_TRB_3_IOC_BIT | + XHCI_TRB_3_CYCLE_BIT | + XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK)); + +#ifdef USB_DEBUG + xhci_dump_trb(&td_last->td_trb[td_last->ntrb]); +#endif + usb_pc_cpu_flush(td_last->page_cache); + + /* write ahead chain end marker */ + + pepext->trb[inext].qwTrb0 = 0; + pepext->trb[inext].dwTrb2 = 0; + pepext->trb[inext].dwTrb3 = 0; + + /* update next pointer of link TRB */ + + pepext->trb[i].qwTrb0 = htole64((uint64_t)td_first->td_self); + pepext->trb[i].dwTrb2 = htole32(XHCI_TRB_2_IRQ_SET(0)); + +#ifdef USB_DEBUG + xhci_dump_trb(&pepext->trb[i]); +#endif + usb_pc_cpu_flush(pepext->page_cache); + + /* toggle cycle bit which activates the transfer chain */ + + pepext->trb[i].dwTrb3 = htole32(XHCI_TRB_3_CYCLE_BIT | + XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK)); + + usb_pc_cpu_flush(pepext->page_cache); + + DPRINTF("qh_pos = %u\n", i); + + pepext->xfer[i] = xfer; + + xfer->qh_pos = i; + + xfer->flags_int.bandwidth_reclaimed = 1; + + xhci_endpoint_doorbell(xfer); + + return (0); +} + +static void +xhci_root_intr(struct xhci_softc *sc) +{ + uint16_t i; + + USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); + + /* clear any old interrupt data */ + memset(sc->sc_hub_idata, 0, sizeof(sc->sc_hub_idata)); + + for (i = 1; i <= sc->sc_noport; i++) { + /* pick out CHANGE bits from the status register */ + if (XREAD4(sc, oper, XHCI_PORTSC(i)) & ( + XHCI_PS_CSC | XHCI_PS_PEC | + XHCI_PS_OCC | XHCI_PS_WRC | + XHCI_PS_PRC | XHCI_PS_PLC | + XHCI_PS_CEC)) { + sc->sc_hub_idata[i / 8] |= 1 << (i % 8); + DPRINTF("port %d changed\n", i); + } + } + uhub_root_intr(&sc->sc_bus, sc->sc_hub_idata, + sizeof(sc->sc_hub_idata)); +} + +/*------------------------------------------------------------------------* + * xhci_device_done - XHCI done handler + * + * NOTE: This function can be called two times in a row on + * the same USB transfer. From close and from interrupt. + *------------------------------------------------------------------------*/ +static void +xhci_device_done(struct usb_xfer *xfer, usb_error_t error) +{ + DPRINTFN(2, "xfer=%p, endpoint=%p, error=%d\n", + xfer, xfer->endpoint, error); + + /* remove transfer from HW queue */ + xhci_transfer_remove(xfer, error); + + /* dequeue transfer and start next transfer */ + usbd_transfer_done(xfer, error); +} + +/*------------------------------------------------------------------------* + * XHCI data transfer support (generic type) + *------------------------------------------------------------------------*/ +static void +xhci_device_generic_open(struct usb_xfer *xfer) +{ + if (xfer->flags_int.isochronous_xfr) { + switch (xfer->xroot->udev->speed) { + case USB_SPEED_FULL: + break; + default: + usb_hs_bandwidth_alloc(xfer); + break; + } + } +} + +static void +xhci_device_generic_close(struct usb_xfer *xfer) +{ + DPRINTF("\n"); + + xhci_device_done(xfer, USB_ERR_CANCELLED); + + if (xfer->flags_int.isochronous_xfr) { + switch (xfer->xroot->udev->speed) { + case USB_SPEED_FULL: + break; + default: + usb_hs_bandwidth_free(xfer); + break; + } + } +} + +static void +xhci_device_generic_multi_enter(struct usb_endpoint *ep, + usb_stream_t stream_id, struct usb_xfer *enter_xfer) +{ + struct usb_xfer *xfer; + + /* check if there is a current transfer */ + xfer = ep->endpoint_q[stream_id].curr; + if (xfer == NULL) + return; + + /* + * Check if the current transfer is started and then pickup + * the next one, if any. Else wait for next start event due to + * block on failure feature. + */ + if (!xfer->flags_int.bandwidth_reclaimed) + return; + + xfer = TAILQ_FIRST(&ep->endpoint_q[stream_id].head); + if (xfer == NULL) { + /* + * In case of enter we have to consider that the + * transfer is queued by the USB core after the enter + * method is called. + */ + xfer = enter_xfer; + + if (xfer == NULL) + return; + } + + /* try to multi buffer */ + xhci_transfer_insert(xfer); +} + +static void +xhci_device_generic_enter(struct usb_xfer *xfer) +{ + DPRINTF("\n"); + + /* set up TD's and QH */ + xhci_setup_generic_chain(xfer); + + xhci_device_generic_multi_enter(xfer->endpoint, + xfer->stream_id, xfer); +} + +static void +xhci_device_generic_start(struct usb_xfer *xfer) +{ + DPRINTF("\n"); + + /* try to insert xfer on HW queue */ + xhci_transfer_insert(xfer); + + /* try to multi buffer */ + xhci_device_generic_multi_enter(xfer->endpoint, + xfer->stream_id, NULL); + + /* add transfer last on interrupt queue */ + usbd_transfer_enqueue(&xfer->xroot->bus->intr_q, xfer); + + /* start timeout, if any */ + if (xfer->timeout != 0) + usbd_transfer_timeout_ms(xfer, &xhci_timeout, xfer->timeout); +} + +static const struct usb_pipe_methods xhci_device_generic_methods = +{ + .open = xhci_device_generic_open, + .close = xhci_device_generic_close, + .enter = xhci_device_generic_enter, + .start = xhci_device_generic_start, +}; + +/*------------------------------------------------------------------------* + * xhci root HUB support + *------------------------------------------------------------------------* + * Simulate a hardware HUB by handling all the necessary requests. + *------------------------------------------------------------------------*/ + +#define HSETW(ptr, val) ptr = { (uint8_t)(val), (uint8_t)((val) >> 8) } + +static const +struct usb_device_descriptor xhci_devd = +{ + .bLength = sizeof(xhci_devd), + .bDescriptorType = UDESC_DEVICE, /* type */ + HSETW(.bcdUSB, 0x0300), /* USB version */ + .bDeviceClass = UDCLASS_HUB, /* class */ + .bDeviceSubClass = UDSUBCLASS_HUB, /* subclass */ + .bDeviceProtocol = UDPROTO_SSHUB, /* protocol */ + .bMaxPacketSize = 9, /* max packet size */ + HSETW(.idVendor, 0x0000), /* vendor */ + HSETW(.idProduct, 0x0000), /* product */ + HSETW(.bcdDevice, 0x0100), /* device version */ + .iManufacturer = 1, + .iProduct = 2, + .iSerialNumber = 0, + .bNumConfigurations = 1, /* # of configurations */ +}; + +static const +struct xhci_bos_desc xhci_bosd = { + .bosd = { + .bLength = sizeof(xhci_bosd.bosd), + .bDescriptorType = UDESC_BOS, + HSETW(.wTotalLength, sizeof(xhci_bosd)), + .bNumDeviceCaps = 3, + }, + .usb2extd = { + .bLength = sizeof(xhci_bosd.usb2extd), + .bDescriptorType = 1, + .bDevCapabilityType = 2, + .bmAttributes[0] = 2, + }, + .usbdcd = { + .bLength = sizeof(xhci_bosd.usbdcd), + .bDescriptorType = UDESC_DEVICE_CAPABILITY, + .bDevCapabilityType = 3, + .bmAttributes = 0, /* XXX */ + HSETW(.wSpeedsSupported, 0x000C), + .bFunctionalitySupport = 8, + .bU1DevExitLat = 255, /* dummy - not used */ + .wU2DevExitLat = { 0x00, 0x08 }, + }, + .cidd = { + .bLength = sizeof(xhci_bosd.cidd), + .bDescriptorType = 1, + .bDevCapabilityType = 4, + .bReserved = 0, + .bContainerID = 0, /* XXX */ + }, +}; + +static const +struct xhci_config_desc xhci_confd = { + .confd = { + .bLength = sizeof(xhci_confd.confd), + .bDescriptorType = UDESC_CONFIG, + .wTotalLength[0] = sizeof(xhci_confd), + .bNumInterface = 1, + .bConfigurationValue = 1, + .iConfiguration = 0, + .bmAttributes = UC_SELF_POWERED, + .bMaxPower = 0 /* max power */ + }, + .ifcd = { + .bLength = sizeof(xhci_confd.ifcd), + .bDescriptorType = UDESC_INTERFACE, + .bNumEndpoints = 1, + .bInterfaceClass = UICLASS_HUB, + .bInterfaceSubClass = UISUBCLASS_HUB, + .bInterfaceProtocol = 0, + }, + .endpd = { + .bLength = sizeof(xhci_confd.endpd), + .bDescriptorType = UDESC_ENDPOINT, + .bEndpointAddress = UE_DIR_IN | XHCI_INTR_ENDPT, + .bmAttributes = UE_INTERRUPT, + .wMaxPacketSize[0] = 2, /* max 15 ports */ + .bInterval = 255, + }, + .endpcd = { + .bLength = sizeof(xhci_confd.endpcd), + .bDescriptorType = UDESC_ENDPOINT_SS_COMP, + .bMaxBurst = 0, + .bmAttributes = 0, + }, +}; + +static const +struct usb_hub_ss_descriptor xhci_hubd = { + .bLength = sizeof(xhci_hubd), + .bDescriptorType = UDESC_SS_HUB, +}; + +static usb_error_t +xhci_roothub_exec(struct usb_device *udev, + struct usb_device_request *req, const void **pptr, uint16_t *plength) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + const char *str_ptr; + const void *ptr; + uint32_t port; + uint32_t v; + uint16_t len; + uint16_t i; + uint16_t value; + uint16_t index; + uint8_t j; + usb_error_t err; + + USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); + + /* buffer reset */ + ptr = (const void *)&sc->sc_hub_desc; + len = 0; + err = 0; + + value = UGETW(req->wValue); + index = UGETW(req->wIndex); + + DPRINTFN(3, "type=0x%02x request=0x%02x wLen=0x%04x " + "wValue=0x%04x wIndex=0x%04x\n", + req->bmRequestType, req->bRequest, + UGETW(req->wLength), value, index); + +#define C(x,y) ((x) | ((y) << 8)) + switch (C(req->bRequest, req->bmRequestType)) { + case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE): + case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE): + case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT): + /* + * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops + * for the integrated root hub. + */ + break; + case C(UR_GET_CONFIG, UT_READ_DEVICE): + len = 1; + sc->sc_hub_desc.temp[0] = sc->sc_conf; + break; + case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): + switch (value >> 8) { + case UDESC_DEVICE: + if ((value & 0xff) != 0) { + err = USB_ERR_IOERROR; + goto done; + } + len = sizeof(xhci_devd); + ptr = (const void *)&xhci_devd; + break; + + case UDESC_BOS: + if ((value & 0xff) != 0) { + err = USB_ERR_IOERROR; + goto done; + } + len = sizeof(xhci_bosd); + ptr = (const void *)&xhci_bosd; + break; + + case UDESC_CONFIG: + if ((value & 0xff) != 0) { + err = USB_ERR_IOERROR; + goto done; + } + len = sizeof(xhci_confd); + ptr = (const void *)&xhci_confd; + break; + + case UDESC_STRING: + switch (value & 0xff) { + case 0: /* Language table */ + str_ptr = "\001"; + break; + + case 1: /* Vendor */ + str_ptr = sc->sc_vendor; + break; + + case 2: /* Product */ + str_ptr = "XHCI root HUB"; + break; + + default: + str_ptr = ""; + break; + } + + len = usb_make_str_desc( + sc->sc_hub_desc.temp, + sizeof(sc->sc_hub_desc.temp), + str_ptr); + break; + + default: + err = USB_ERR_IOERROR; + goto done; + } + break; + case C(UR_GET_INTERFACE, UT_READ_INTERFACE): + len = 1; + sc->sc_hub_desc.temp[0] = 0; + break; + case C(UR_GET_STATUS, UT_READ_DEVICE): + len = 2; + USETW(sc->sc_hub_desc.stat.wStatus, UDS_SELF_POWERED); + break; + case C(UR_GET_STATUS, UT_READ_INTERFACE): + case C(UR_GET_STATUS, UT_READ_ENDPOINT): + len = 2; + USETW(sc->sc_hub_desc.stat.wStatus, 0); + break; + case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): + if (value >= XHCI_MAX_DEVICES) { + err = USB_ERR_IOERROR; + goto done; + } + break; + case C(UR_SET_CONFIG, UT_WRITE_DEVICE): + if (value != 0 && value != 1) { + err = USB_ERR_IOERROR; + goto done; + } + sc->sc_conf = value; + break; + case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE): + break; + case C(UR_SET_FEATURE, UT_WRITE_DEVICE): + case C(UR_SET_FEATURE, UT_WRITE_INTERFACE): + case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT): + err = USB_ERR_IOERROR; + goto done; + case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE): + break; + case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT): + break; + /* Hub requests */ + case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): + break; + case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): + DPRINTFN(9, "UR_CLEAR_PORT_FEATURE\n"); + + if ((index < 1) || + (index > sc->sc_noport)) { + err = USB_ERR_IOERROR; + goto done; + } + port = XHCI_PORTSC(index); + + v = XREAD4(sc, oper, port); + i = XHCI_PS_PLS_GET(v); + v &= ~XHCI_PS_CLEAR; + + switch (value) { + case UHF_C_BH_PORT_RESET: + XWRITE4(sc, oper, port, v | XHCI_PS_WRC); + break; + case UHF_C_PORT_CONFIG_ERROR: + XWRITE4(sc, oper, port, v | XHCI_PS_CEC); + break; + case UHF_C_PORT_SUSPEND: + case UHF_C_PORT_LINK_STATE: + XWRITE4(sc, oper, port, v | XHCI_PS_PLC); + break; + case UHF_C_PORT_CONNECTION: + XWRITE4(sc, oper, port, v | XHCI_PS_CSC); + break; + case UHF_C_PORT_ENABLE: + XWRITE4(sc, oper, port, v | XHCI_PS_PEC); + break; + case UHF_C_PORT_OVER_CURRENT: + XWRITE4(sc, oper, port, v | XHCI_PS_OCC); + break; + case UHF_C_PORT_RESET: + XWRITE4(sc, oper, port, v | XHCI_PS_PRC); + break; + case UHF_PORT_ENABLE: + XWRITE4(sc, oper, port, v | XHCI_PS_PED); + break; + case UHF_PORT_POWER: + XWRITE4(sc, oper, port, v & ~XHCI_PS_PP); + break; + case UHF_PORT_INDICATOR: + XWRITE4(sc, oper, port, v & ~XHCI_PS_PIC_SET(3)); + break; + case UHF_PORT_SUSPEND: + + /* U3 -> U15 */ + if (i == 3) { + XWRITE4(sc, oper, port, v | + XHCI_PS_PLS_SET(0xF) | XHCI_PS_LWS); + } + + /* wait 20ms for resume sequence to complete */ + usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 50); + + /* U0 */ + XWRITE4(sc, oper, port, v | + XHCI_PS_PLS_SET(0) | XHCI_PS_LWS); + break; + default: + err = USB_ERR_IOERROR; + goto done; + } + break; + + case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): + if ((value & 0xff) != 0) { + err = USB_ERR_IOERROR; + goto done; + } + + v = XREAD4(sc, capa, XHCI_HCSPARAMS0); + + sc->sc_hub_desc.hubd = xhci_hubd; + + sc->sc_hub_desc.hubd.bNbrPorts = sc->sc_noport; + + if (XHCI_HCS0_PPC(v)) + i = UHD_PWR_INDIVIDUAL; + else + i = UHD_PWR_GANGED; + + if (XHCI_HCS0_PIND(v)) + i |= UHD_PORT_IND; + + i |= UHD_OC_INDIVIDUAL; + + USETW(sc->sc_hub_desc.hubd.wHubCharacteristics, i); + + /* see XHCI section 5.4.9: */ + sc->sc_hub_desc.hubd.bPwrOn2PwrGood = 10; + + for (j = 1; j <= sc->sc_noport; j++) { + + v = XREAD4(sc, oper, XHCI_PORTSC(j)); + if (v & XHCI_PS_DR) { + sc->sc_hub_desc.hubd. + DeviceRemovable[j / 8] |= 1U << (j % 8); + } + } + len = sc->sc_hub_desc.hubd.bLength; + break; + + case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): + len = 16; + memset(sc->sc_hub_desc.temp, 0, 16); + break; + + case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): + DPRINTFN(9, "UR_GET_STATUS i=%d\n", index); + + if ((index < 1) || + (index > sc->sc_noport)) { + err = USB_ERR_IOERROR; + goto done; + } + + v = XREAD4(sc, oper, XHCI_PORTSC(index)); + + DPRINTFN(9, "port status=0x%08x\n", v); + + i = UPS_PORT_LINK_STATE_SET(XHCI_PS_PLS_GET(v)); + + switch (XHCI_PS_SPEED_GET(v)) { + case 3: + i |= UPS_HIGH_SPEED; + break; + case 2: + i |= UPS_LOW_SPEED; + break; + case 1: + /* FULL speed */ + break; + default: + i |= UPS_OTHER_SPEED; + break; + } + + if (v & XHCI_PS_CCS) + i |= UPS_CURRENT_CONNECT_STATUS; + if (v & XHCI_PS_PED) + i |= UPS_PORT_ENABLED; + if (v & XHCI_PS_OCA) + i |= UPS_OVERCURRENT_INDICATOR; + if (v & XHCI_PS_PR) + i |= UPS_RESET; + if (v & XHCI_PS_PP) { + /* + * The USB 3.0 RH is using the + * USB 2.0's power bit + */ + i |= UPS_PORT_POWER; + } + USETW(sc->sc_hub_desc.ps.wPortStatus, i); + + i = 0; + if (v & XHCI_PS_CSC) + i |= UPS_C_CONNECT_STATUS; + if (v & XHCI_PS_PEC) + i |= UPS_C_PORT_ENABLED; + if (v & XHCI_PS_OCC) + i |= UPS_C_OVERCURRENT_INDICATOR; + if (v & XHCI_PS_WRC) + i |= UPS_C_BH_PORT_RESET; + if (v & XHCI_PS_PRC) + i |= UPS_C_PORT_RESET; + if (v & XHCI_PS_PLC) + i |= UPS_C_PORT_LINK_STATE; + if (v & XHCI_PS_CEC) + i |= UPS_C_PORT_CONFIG_ERROR; + + USETW(sc->sc_hub_desc.ps.wPortChange, i); + len = sizeof(sc->sc_hub_desc.ps); + break; + + case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): + err = USB_ERR_IOERROR; + goto done; + + case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): + break; + + case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): + + i = index >> 8; + index &= 0x00FF; + + if ((index < 1) || + (index > sc->sc_noport)) { + err = USB_ERR_IOERROR; + goto done; + } + + port = XHCI_PORTSC(index); + v = XREAD4(sc, oper, port) & ~XHCI_PS_CLEAR; + + switch (value) { + case UHF_PORT_U1_TIMEOUT: + if (XHCI_PS_SPEED_GET(v) != 4) { + err = USB_ERR_IOERROR; + goto done; + } + port = XHCI_PORTPMSC(index); + v = XREAD4(sc, oper, port); + v &= ~XHCI_PM3_U1TO_SET(0xFF); + v |= XHCI_PM3_U1TO_SET(i); + XWRITE4(sc, oper, port, v); + break; + case UHF_PORT_U2_TIMEOUT: + if (XHCI_PS_SPEED_GET(v) != 4) { + err = USB_ERR_IOERROR; + goto done; + } + port = XHCI_PORTPMSC(index); + v = XREAD4(sc, oper, port); + v &= ~XHCI_PM3_U2TO_SET(0xFF); + v |= XHCI_PM3_U2TO_SET(i); + XWRITE4(sc, oper, port, v); + break; + case UHF_BH_PORT_RESET: + XWRITE4(sc, oper, port, v | XHCI_PS_WPR); + break; + case UHF_PORT_LINK_STATE: + XWRITE4(sc, oper, port, v | + XHCI_PS_PLS_SET(i) | XHCI_PS_LWS); + /* 4ms settle time */ + usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 250); + break; + case UHF_PORT_ENABLE: + DPRINTFN(3, "set port enable %d\n", index); + break; + case UHF_PORT_SUSPEND: + DPRINTFN(6, "suspend port %u (LPM=%u)\n", index, i); + j = XHCI_PS_SPEED_GET(v); + if ((j < 1) || (j > 3)) { + /* non-supported speed */ + err = USB_ERR_IOERROR; + goto done; + } + XWRITE4(sc, oper, port, v | + XHCI_PS_PLS_SET(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS); + break; + case UHF_PORT_RESET: + DPRINTFN(6, "reset port %d\n", index); + XWRITE4(sc, oper, port, v | XHCI_PS_PR); + break; + case UHF_PORT_POWER: + DPRINTFN(3, "set port power %d\n", index); + XWRITE4(sc, oper, port, v | XHCI_PS_PP); + break; + case UHF_PORT_TEST: + DPRINTFN(3, "set port test %d\n", index); + break; + case UHF_PORT_INDICATOR: + DPRINTFN(3, "set port indicator %d\n", index); + + v &= ~XHCI_PS_PIC_SET(3); + v |= XHCI_PS_PIC_SET(1); + + XWRITE4(sc, oper, port, v); + break; + default: + err = USB_ERR_IOERROR; + goto done; + } + break; + + case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): + case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): + case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): + case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): + break; + default: + err = USB_ERR_IOERROR; + goto done; + } +done: + *plength = len; + *pptr = ptr; + return (err); +} + +static void +xhci_xfer_setup(struct usb_setup_params *parm) +{ + struct usb_page_search page_info; + struct usb_page_cache *pc; + struct usb_xfer *xfer; + void *last_obj; + uint32_t ntd; + uint32_t n; + + xfer = parm->curr_xfer; + + /* + * The proof for the "ntd" formula is illustrated like this: + * + * +------------------------------------+ + * | | + * | |remainder -> | + * | +-----+---+ | + * | | xxx | x | frm 0 | + * | +-----+---++ | + * | | xxx | xx | frm 1 | + * | +-----+----+ | + * | ... | + * +------------------------------------+ + * + * "xxx" means a completely full USB transfer descriptor + * + * "x" and "xx" means a short USB packet + * + * For the remainder of an USB transfer modulo + * "max_data_length" we need two USB transfer descriptors. + * One to transfer the remaining data and one to finalise with + * a zero length packet in case the "force_short_xfer" flag is + * set. We only need two USB transfer descriptors in the case + * where the transfer length of the first one is a factor of + * "max_frame_size". The rest of the needed USB transfer + * descriptors is given by the buffer size divided by the + * maximum data payload. + */ + parm->hc_max_packet_size = 0x400; + parm->hc_max_packet_count = 16 * 3; + parm->hc_max_frame_size = XHCI_TD_PAYLOAD_MAX; + + xfer->flags_int.bdma_enable = 1; + + usbd_transfer_setup_sub(parm); + + if (xfer->flags_int.isochronous_xfr) { + ntd = ((1 * xfer->nframes) + + (xfer->max_data_length / xfer->max_hc_frame_size)); + } else if (xfer->flags_int.control_xfr) { + ntd = ((2 * xfer->nframes) + 1 /* STATUS */ + + (xfer->max_data_length / xfer->max_hc_frame_size)); + } else { + ntd = ((2 * xfer->nframes) + + (xfer->max_data_length / xfer->max_hc_frame_size)); + } + +alloc_dma_set: + + if (parm->err) + return; + + /* + * Allocate queue heads and transfer descriptors + */ + last_obj = NULL; + + if (usbd_transfer_setup_sub_malloc( + parm, &pc, sizeof(struct xhci_td), + XHCI_TD_ALIGN, ntd)) { + parm->err = USB_ERR_NOMEM; + return; + } + if (parm->buf) { + for (n = 0; n != ntd; n++) { + struct xhci_td *td; + + usbd_get_page(pc + n, 0, &page_info); + + td = page_info.buffer; + + /* init TD */ + td->td_self = page_info.physaddr; + td->obj_next = last_obj; + td->page_cache = pc + n; + + last_obj = td; + + usb_pc_cpu_flush(pc + n); + } + } + xfer->td_start[xfer->flags_int.curr_dma_set] = last_obj; + + if (!xfer->flags_int.curr_dma_set) { + xfer->flags_int.curr_dma_set = 1; + goto alloc_dma_set; + } +} + +static usb_error_t +xhci_configure_reset_endpoint(struct usb_xfer *xfer) +{ + struct xhci_softc *sc = XHCI_BUS2SC(xfer->xroot->bus); + struct usb_page_search buf_inp; + struct usb_device *udev; + struct xhci_endpoint_ext *pepext; + struct usb_endpoint_descriptor *edesc; + struct usb_page_cache *pcinp; + usb_error_t err; + usb_stream_t stream_id; + uint8_t index; + uint8_t epno; + + pepext = xhci_get_endpoint_ext(xfer->xroot->udev, + xfer->endpoint->edesc); + + udev = xfer->xroot->udev; + index = udev->controller_slot_id; + + pcinp = &sc->sc_hw.devs[index].input_pc; + + usbd_get_page(pcinp, 0, &buf_inp); + + edesc = xfer->endpoint->edesc; + + epno = edesc->bEndpointAddress; + stream_id = xfer->stream_id; + + if ((edesc->bmAttributes & UE_XFERTYPE) == UE_CONTROL) + epno |= UE_DIR_IN; + + epno = XHCI_EPNO2EPID(epno); + + if (epno == 0) + return (USB_ERR_NO_PIPE); /* invalid */ + + XHCI_CMD_LOCK(sc); + + /* configure endpoint */ + + err = xhci_configure_endpoint_by_xfer(xfer); + + if (err != 0) { + XHCI_CMD_UNLOCK(sc); + return (err); + } + + /* + * Get the endpoint into the stopped state according to the + * endpoint context state diagram in the XHCI specification: + */ + + err = xhci_cmd_stop_ep(sc, 0, epno, index); + + if (err != 0) + DPRINTF("Could not stop endpoint %u\n", epno); + + err = xhci_cmd_reset_ep(sc, 0, epno, index); + + if (err != 0) + DPRINTF("Could not reset endpoint %u\n", epno); + + err = xhci_cmd_set_tr_dequeue_ptr(sc, + (pepext->physaddr + (stream_id * sizeof(struct xhci_trb) * + XHCI_MAX_TRANSFERS)) | XHCI_EPCTX_2_DCS_SET(1), + stream_id, epno, index); + + if (err != 0) + DPRINTF("Could not set dequeue ptr for endpoint %u\n", epno); + + /* + * Get the endpoint into the running state according to the + * endpoint context state diagram in the XHCI specification: + */ + + xhci_configure_mask(udev, (1U << epno) | 1U, 0); + + if (epno > 1) + err = xhci_cmd_configure_ep(sc, buf_inp.physaddr, 0, index); + else + err = xhci_cmd_evaluate_ctx(sc, buf_inp.physaddr, index); + + if (err != 0) + DPRINTF("Could not configure endpoint %u\n", epno); + + XHCI_CMD_UNLOCK(sc); + + return (0); +} + +static void +xhci_xfer_unsetup(struct usb_xfer *xfer) +{ + return; +} + +static void +xhci_start_dma_delay(struct usb_xfer *xfer) +{ + struct xhci_softc *sc = XHCI_BUS2SC(xfer->xroot->bus); + + /* put transfer on interrupt queue (again) */ + usbd_transfer_enqueue(&sc->sc_bus.intr_q, xfer); + + (void)usb_proc_msignal(USB_BUS_CONTROL_XFER_PROC(&sc->sc_bus), + &sc->sc_config_msg[0], &sc->sc_config_msg[1]); +} + +static void +xhci_configure_msg(struct usb_proc_msg *pm) +{ + struct xhci_softc *sc; + struct xhci_endpoint_ext *pepext; + struct usb_xfer *xfer; + + sc = XHCI_BUS2SC(((struct usb_bus_msg *)pm)->bus); + +restart: + TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { + + pepext = xhci_get_endpoint_ext(xfer->xroot->udev, + xfer->endpoint->edesc); + + if ((pepext->trb_halted != 0) || + (pepext->trb_running == 0)) { + + uint16_t i; + + /* clear halted and running */ + pepext->trb_halted = 0; + pepext->trb_running = 0; + + /* nuke remaining buffered transfers */ + + for (i = 0; i != (XHCI_MAX_TRANSFERS * + XHCI_MAX_STREAMS); i++) { + /* + * NOTE: We need to use the timeout + * error code here else existing + * isochronous clients can get + * confused: + */ + if (pepext->xfer[i] != NULL) { + xhci_device_done(pepext->xfer[i], + USB_ERR_TIMEOUT); + } + } + + /* + * NOTE: The USB transfer cannot vanish in + * this state! + */ + + USB_BUS_UNLOCK(&sc->sc_bus); + + xhci_configure_reset_endpoint(xfer); + + USB_BUS_LOCK(&sc->sc_bus); + + /* check if halted is still cleared */ + if (pepext->trb_halted == 0) { + pepext->trb_running = 1; + memset(pepext->trb_index, 0, + sizeof(pepext->trb_index)); + } + goto restart; + } + + if (xfer->flags_int.did_dma_delay) { + + /* remove transfer from interrupt queue (again) */ + usbd_transfer_dequeue(xfer); + + /* we are finally done */ + usb_dma_delay_done_cb(xfer); + + /* queue changed - restart */ + goto restart; + } + } + + TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { + + /* try to insert xfer on HW queue */ + xhci_transfer_insert(xfer); + + /* try to multi buffer */ + xhci_device_generic_multi_enter(xfer->endpoint, + xfer->stream_id, NULL); + } +} + +static void +xhci_ep_init(struct usb_device *udev, struct usb_endpoint_descriptor *edesc, + struct usb_endpoint *ep) +{ + struct xhci_endpoint_ext *pepext; + + DPRINTFN(2, "endpoint=%p, addr=%d, endpt=%d, mode=%d\n", + ep, udev->address, edesc->bEndpointAddress, udev->flags.usb_mode); + + if (udev->parent_hub == NULL) { + /* root HUB has special endpoint handling */ + return; + } + + ep->methods = &xhci_device_generic_methods; + + pepext = xhci_get_endpoint_ext(udev, edesc); + + USB_BUS_LOCK(udev->bus); + pepext->trb_halted = 1; + pepext->trb_running = 0; + USB_BUS_UNLOCK(udev->bus); +} + +static void +xhci_ep_uninit(struct usb_device *udev, struct usb_endpoint *ep) +{ + +} + +static void +xhci_ep_clear_stall(struct usb_device *udev, struct usb_endpoint *ep) +{ + struct xhci_endpoint_ext *pepext; + + DPRINTF("\n"); + + if (udev->flags.usb_mode != USB_MODE_HOST) { + /* not supported */ + return; + } + if (udev->parent_hub == NULL) { + /* root HUB has special endpoint handling */ + return; + } + + pepext = xhci_get_endpoint_ext(udev, ep->edesc); + + USB_BUS_LOCK(udev->bus); + pepext->trb_halted = 1; + pepext->trb_running = 0; + USB_BUS_UNLOCK(udev->bus); +} + +static usb_error_t +xhci_device_init(struct usb_device *udev) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + usb_error_t err; + uint8_t temp; + + /* no init for root HUB */ + if (udev->parent_hub == NULL) + return (0); + + XHCI_CMD_LOCK(sc); + + /* set invalid default */ + + udev->controller_slot_id = sc->sc_noslot + 1; + + /* try to get a new slot ID from the XHCI */ + + err = xhci_cmd_enable_slot(sc, &temp); + + if (err) { + XHCI_CMD_UNLOCK(sc); + return (err); + } + + if (temp > sc->sc_noslot) { + XHCI_CMD_UNLOCK(sc); + return (USB_ERR_BAD_ADDRESS); + } + + if (sc->sc_hw.devs[temp].state != XHCI_ST_DISABLED) { + DPRINTF("slot %u already allocated.\n", temp); + XHCI_CMD_UNLOCK(sc); + return (USB_ERR_BAD_ADDRESS); + } + + /* store slot ID for later reference */ + + udev->controller_slot_id = temp; + + /* reset data structure */ + + memset(&sc->sc_hw.devs[temp], 0, sizeof(sc->sc_hw.devs[0])); + + /* set mark slot allocated */ + + sc->sc_hw.devs[temp].state = XHCI_ST_ENABLED; + + err = xhci_alloc_device_ext(udev); + + XHCI_CMD_UNLOCK(sc); + + /* get device into default state */ + + if (err == 0) + err = xhci_set_address(udev, NULL, 0); + + return (err); +} + +static void +xhci_device_uninit(struct usb_device *udev) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + uint8_t index; + + /* no init for root HUB */ + if (udev->parent_hub == NULL) + return; + + XHCI_CMD_LOCK(sc); + + index = udev->controller_slot_id; + + if (index <= sc->sc_noslot) { + xhci_cmd_disable_slot(sc, index); + sc->sc_hw.devs[index].state = XHCI_ST_DISABLED; + + /* free device extension */ + xhci_free_device_ext(udev); + } + + XHCI_CMD_UNLOCK(sc); +} + +static void +xhci_get_dma_delay(struct usb_device *udev, uint32_t *pus) +{ + /* + * Wait until the hardware has finished any possible use of + * the transfer descriptor(s) + */ + *pus = 2048; /* microseconds */ +} + +static void +xhci_device_resume(struct usb_device *udev) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + uint8_t index; + uint8_t n; + uint8_t p; + + DPRINTF("\n"); + + /* check for root HUB */ + if (udev->parent_hub == NULL) + return; + + index = udev->controller_slot_id; + + XHCI_CMD_LOCK(sc); + + /* blindly resume all endpoints */ + + USB_BUS_LOCK(udev->bus); + + for (n = 1; n != XHCI_MAX_ENDPOINTS; n++) { + for (p = 0; p != XHCI_MAX_STREAMS; p++) { + XWRITE4(sc, door, XHCI_DOORBELL(index), + n | XHCI_DB_SID_SET(p)); + } + } + + USB_BUS_UNLOCK(udev->bus); + + XHCI_CMD_UNLOCK(sc); +} + +static void +xhci_device_suspend(struct usb_device *udev) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + uint8_t index; + uint8_t n; + usb_error_t err; + + DPRINTF("\n"); + + /* check for root HUB */ + if (udev->parent_hub == NULL) + return; + + index = udev->controller_slot_id; + + XHCI_CMD_LOCK(sc); + + /* blindly suspend all endpoints */ + + for (n = 1; n != XHCI_MAX_ENDPOINTS; n++) { + err = xhci_cmd_stop_ep(sc, 1, n, index); + if (err != 0) { + DPRINTF("Failed to suspend endpoint " + "%u on slot %u (ignored).\n", n, index); + } + } + + XHCI_CMD_UNLOCK(sc); +} + +static void +xhci_set_hw_power(struct usb_bus *bus) +{ + DPRINTF("\n"); +} + +static void +xhci_device_state_change(struct usb_device *udev) +{ + struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); + struct usb_page_search buf_inp; + usb_error_t err; + uint8_t index; + + /* check for root HUB */ + if (udev->parent_hub == NULL) + return; + + index = udev->controller_slot_id; + + DPRINTF("\n"); + + if (usb_get_device_state(udev) == USB_STATE_CONFIGURED) { + err = uhub_query_info(udev, &sc->sc_hw.devs[index].nports, + &sc->sc_hw.devs[index].tt); + if (err != 0) + sc->sc_hw.devs[index].nports = 0; + } + + XHCI_CMD_LOCK(sc); + + switch (usb_get_device_state(udev)) { + case USB_STATE_POWERED: + if (sc->sc_hw.devs[index].state == XHCI_ST_DEFAULT) + break; + + /* set default state */ + sc->sc_hw.devs[index].state = XHCI_ST_DEFAULT; + + /* reset number of contexts */ + sc->sc_hw.devs[index].context_num = 0; + + err = xhci_cmd_reset_dev(sc, index); + + if (err != 0) { + DPRINTF("Device reset failed " + "for slot %u.\n", index); + } + break; + + case USB_STATE_ADDRESSED: + if (sc->sc_hw.devs[index].state == XHCI_ST_ADDRESSED) + break; + + sc->sc_hw.devs[index].state = XHCI_ST_ADDRESSED; + + /* set configure mask to slot only */ + xhci_configure_mask(udev, 1, 0); + + /* deconfigure all endpoints, except EP0 */ + err = xhci_cmd_configure_ep(sc, 0, 1, index); + + if (err) { + DPRINTF("Failed to deconfigure " + "slot %u.\n", index); + } + break; + + case USB_STATE_CONFIGURED: + if (sc->sc_hw.devs[index].state == XHCI_ST_CONFIGURED) + break; + + /* set configured state */ + sc->sc_hw.devs[index].state = XHCI_ST_CONFIGURED; + + /* reset number of contexts */ + sc->sc_hw.devs[index].context_num = 0; + + usbd_get_page(&sc->sc_hw.devs[index].input_pc, 0, &buf_inp); + + xhci_configure_mask(udev, 3, 0); + + err = xhci_configure_device(udev); + if (err != 0) { + DPRINTF("Could not configure device " + "at slot %u.\n", index); + } + + err = xhci_cmd_evaluate_ctx(sc, buf_inp.physaddr, index); + if (err != 0) { + DPRINTF("Could not evaluate device " + "context at slot %u.\n", index); + } + break; + + default: + break; + } + XHCI_CMD_UNLOCK(sc); +} + +static usb_error_t +xhci_set_endpoint_mode(struct usb_device *udev, struct usb_endpoint *ep, + uint8_t ep_mode) +{ + switch (ep_mode) { + case USB_EP_MODE_DEFAULT: + return (0); + case USB_EP_MODE_STREAMS: + if (xhcistreams == 0 || + (ep->edesc->bmAttributes & UE_XFERTYPE) != UE_BULK || + udev->speed != USB_SPEED_SUPER) + return (USB_ERR_INVAL); + return (0); + default: + return (USB_ERR_INVAL); + } +} + +static const struct usb_bus_methods xhci_bus_methods = { + .endpoint_init = xhci_ep_init, + .endpoint_uninit = xhci_ep_uninit, + .xfer_setup = xhci_xfer_setup, + .xfer_unsetup = xhci_xfer_unsetup, + .get_dma_delay = xhci_get_dma_delay, + .device_init = xhci_device_init, + .device_uninit = xhci_device_uninit, + .device_resume = xhci_device_resume, + .device_suspend = xhci_device_suspend, + .set_hw_power = xhci_set_hw_power, + .roothub_exec = xhci_roothub_exec, + .xfer_poll = xhci_do_poll, + .start_dma_delay = xhci_start_dma_delay, + .set_address = xhci_set_address, + .clear_stall = xhci_ep_clear_stall, + .device_state_change = xhci_device_state_change, + .set_hw_power_sleep = xhci_set_hw_power_sleep, + .set_endpoint_mode = xhci_set_endpoint_mode, +}; diff --git a/freebsd/sys/dev/usb/controller/xhci.h b/freebsd/sys/dev/usb/controller/xhci.h new file mode 100644 index 00000000..4b7118d2 --- /dev/null +++ b/freebsd/sys/dev/usb/controller/xhci.h @@ -0,0 +1,538 @@ +/* $FreeBSD$ */ + +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2010 Hans Petter Selasky. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _XHCI_H_ +#define _XHCI_H_ + +#define XHCI_MAX_DEVICES MIN(USB_MAX_DEVICES, 128) +#define XHCI_MAX_ENDPOINTS 32 /* hardcoded - do not change */ +#define XHCI_MAX_SCRATCHPADS 256 /* theoretical max is 1023 */ +#define XHCI_MAX_EVENTS (16 * 13) +#define XHCI_MAX_COMMANDS (16 * 1) +#define XHCI_MAX_RSEG 1 +#define XHCI_MAX_TRANSFERS 4 +#if USB_MAX_EP_STREAMS == 8 +#define XHCI_MAX_STREAMS 8 +#define XHCI_MAX_STREAMS_LOG 3 +#elif USB_MAX_EP_STREAMS == 1 +#define XHCI_MAX_STREAMS 1 +#define XHCI_MAX_STREAMS_LOG 0 +#else +#error "The USB_MAX_EP_STREAMS value is not supported." +#endif +#define XHCI_DEV_CTX_ADDR_ALIGN 64 /* bytes */ +#define XHCI_DEV_CTX_ALIGN 64 /* bytes */ +#define XHCI_INPUT_CTX_ALIGN 64 /* bytes */ +#define XHCI_SLOT_CTX_ALIGN 32 /* bytes */ +#define XHCI_ENDP_CTX_ALIGN 32 /* bytes */ +#define XHCI_STREAM_CTX_ALIGN 16 /* bytes */ +#define XHCI_TRANS_RING_SEG_ALIGN 16 /* bytes */ +#define XHCI_CMD_RING_SEG_ALIGN 64 /* bytes */ +#define XHCI_EVENT_RING_SEG_ALIGN 64 /* bytes */ +#define XHCI_SCRATCH_BUF_ARRAY_ALIGN 64 /* bytes */ +#define XHCI_SCRATCH_BUFFER_ALIGN USB_PAGE_SIZE +#define XHCI_TRB_ALIGN 16 /* bytes */ +#define XHCI_TD_ALIGN 64 /* bytes */ +#define XHCI_PAGE_SIZE 4096 /* bytes */ + +struct xhci_dev_ctx_addr { + volatile uint64_t qwBaaDevCtxAddr[USB_MAX_DEVICES + 1]; + struct { + volatile uint64_t dummy; + } __aligned(64) padding; + volatile uint64_t qwSpBufPtr[XHCI_MAX_SCRATCHPADS]; +}; + +#define XHCI_EPNO2EPID(x) \ + ((((x) & UE_DIR_IN) ? 1 : 0) | (2 * ((x) & UE_ADDR))) + +struct xhci_slot_ctx { + volatile uint32_t dwSctx0; +#define XHCI_SCTX_0_ROUTE_SET(x) ((x) & 0xFFFFF) +#define XHCI_SCTX_0_ROUTE_GET(x) ((x) & 0xFFFFF) +#define XHCI_SCTX_0_SPEED_SET(x) (((x) & 0xF) << 20) +#define XHCI_SCTX_0_SPEED_GET(x) (((x) >> 20) & 0xF) +#define XHCI_SCTX_0_MTT_SET(x) (((x) & 0x1) << 25) +#define XHCI_SCTX_0_MTT_GET(x) (((x) >> 25) & 0x1) +#define XHCI_SCTX_0_HUB_SET(x) (((x) & 0x1) << 26) +#define XHCI_SCTX_0_HUB_GET(x) (((x) >> 26) & 0x1) +#define XHCI_SCTX_0_CTX_NUM_SET(x) (((x) & 0x1F) << 27) +#define XHCI_SCTX_0_CTX_NUM_GET(x) (((x) >> 27) & 0x1F) + volatile uint32_t dwSctx1; +#define XHCI_SCTX_1_MAX_EL_SET(x) ((x) & 0xFFFF) +#define XHCI_SCTX_1_MAX_EL_GET(x) ((x) & 0xFFFF) +#define XHCI_SCTX_1_RH_PORT_SET(x) (((x) & 0xFF) << 16) +#define XHCI_SCTX_1_RH_PORT_GET(x) (((x) >> 16) & 0xFF) +#define XHCI_SCTX_1_NUM_PORTS_SET(x) (((x) & 0xFF) << 24) +#define XHCI_SCTX_1_NUM_PORTS_GET(x) (((x) >> 24) & 0xFF) + volatile uint32_t dwSctx2; +#define XHCI_SCTX_2_TT_HUB_SID_SET(x) ((x) & 0xFF) +#define XHCI_SCTX_2_TT_HUB_SID_GET(x) ((x) & 0xFF) +#define XHCI_SCTX_2_TT_PORT_NUM_SET(x) (((x) & 0xFF) << 8) +#define XHCI_SCTX_2_TT_PORT_NUM_GET(x) (((x) >> 8) & 0xFF) +#define XHCI_SCTX_2_TT_THINK_TIME_SET(x) (((x) & 0x3) << 16) +#define XHCI_SCTX_2_TT_THINK_TIME_GET(x) (((x) >> 16) & 0x3) +#define XHCI_SCTX_2_IRQ_TARGET_SET(x) (((x) & 0x3FF) << 22) +#define XHCI_SCTX_2_IRQ_TARGET_GET(x) (((x) >> 22) & 0x3FF) + volatile uint32_t dwSctx3; +#define XHCI_SCTX_3_DEV_ADDR_SET(x) ((x) & 0xFF) +#define XHCI_SCTX_3_DEV_ADDR_GET(x) ((x) & 0xFF) +#define XHCI_SCTX_3_SLOT_STATE_SET(x) (((x) & 0x1F) << 27) +#define XHCI_SCTX_3_SLOT_STATE_GET(x) (((x) >> 27) & 0x1F) + volatile uint32_t dwSctx4; + volatile uint32_t dwSctx5; + volatile uint32_t dwSctx6; + volatile uint32_t dwSctx7; +}; + +struct xhci_endp_ctx { + volatile uint32_t dwEpCtx0; +#define XHCI_EPCTX_0_EPSTATE_SET(x) ((x) & 0x7) +#define XHCI_EPCTX_0_EPSTATE_GET(x) ((x) & 0x7) +#define XHCI_EPCTX_0_MULT_SET(x) (((x) & 0x3) << 8) +#define XHCI_EPCTX_0_MULT_GET(x) (((x) >> 8) & 0x3) +#define XHCI_EPCTX_0_MAXP_STREAMS_SET(x) (((x) & 0x1F) << 10) +#define XHCI_EPCTX_0_MAXP_STREAMS_GET(x) (((x) >> 10) & 0x1F) +#define XHCI_EPCTX_0_LSA_SET(x) (((x) & 0x1) << 15) +#define XHCI_EPCTX_0_LSA_GET(x) (((x) >> 15) & 0x1) +#define XHCI_EPCTX_0_IVAL_SET(x) (((x) & 0xFF) << 16) +#define XHCI_EPCTX_0_IVAL_GET(x) (((x) >> 16) & 0xFF) + volatile uint32_t dwEpCtx1; +#define XHCI_EPCTX_1_CERR_SET(x) (((x) & 0x3) << 1) +#define XHCI_EPCTX_1_CERR_GET(x) (((x) >> 1) & 0x3) +#define XHCI_EPCTX_1_EPTYPE_SET(x) (((x) & 0x7) << 3) +#define XHCI_EPCTX_1_EPTYPE_GET(x) (((x) >> 3) & 0x7) +#define XHCI_EPCTX_1_HID_SET(x) (((x) & 0x1) << 7) +#define XHCI_EPCTX_1_HID_GET(x) (((x) >> 7) & 0x1) +#define XHCI_EPCTX_1_MAXB_SET(x) (((x) & 0xFF) << 8) +#define XHCI_EPCTX_1_MAXB_GET(x) (((x) >> 8) & 0xFF) +#define XHCI_EPCTX_1_MAXP_SIZE_SET(x) (((x) & 0xFFFF) << 16) +#define XHCI_EPCTX_1_MAXP_SIZE_GET(x) (((x) >> 16) & 0xFFFF) + volatile uint64_t qwEpCtx2; +#define XHCI_EPCTX_2_DCS_SET(x) ((x) & 0x1) +#define XHCI_EPCTX_2_DCS_GET(x) ((x) & 0x1) +#define XHCI_EPCTX_2_TR_DQ_PTR_MASK 0xFFFFFFFFFFFFFFF0U + volatile uint32_t dwEpCtx4; +#define XHCI_EPCTX_4_AVG_TRB_LEN_SET(x) ((x) & 0xFFFF) +#define XHCI_EPCTX_4_AVG_TRB_LEN_GET(x) ((x) & 0xFFFF) +#define XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x) (((x) & 0xFFFF) << 16) +#define XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_GET(x) (((x) >> 16) & 0xFFFF) + volatile uint32_t dwEpCtx5; + volatile uint32_t dwEpCtx6; + volatile uint32_t dwEpCtx7; +}; + +struct xhci_input_ctx { +#define XHCI_INCTX_NON_CTRL_MASK 0xFFFFFFFCU + volatile uint32_t dwInCtx0; +#define XHCI_INCTX_0_DROP_MASK(n) (1U << (n)) + volatile uint32_t dwInCtx1; +#define XHCI_INCTX_1_ADD_MASK(n) (1U << (n)) + volatile uint32_t dwInCtx2; + volatile uint32_t dwInCtx3; + volatile uint32_t dwInCtx4; + volatile uint32_t dwInCtx5; + volatile uint32_t dwInCtx6; + volatile uint32_t dwInCtx7; +}; + +struct xhci_input_dev_ctx { + struct xhci_input_ctx ctx_input; + struct xhci_slot_ctx ctx_slot; + struct xhci_endp_ctx ctx_ep[XHCI_MAX_ENDPOINTS - 1]; +}; + +struct xhci_dev_ctx { + struct xhci_slot_ctx ctx_slot; + struct xhci_endp_ctx ctx_ep[XHCI_MAX_ENDPOINTS - 1]; +} __aligned(XHCI_DEV_CTX_ALIGN); + +struct xhci_stream_ctx { + volatile uint64_t qwSctx0; +#define XHCI_SCTX_0_DCS_GET(x) ((x) & 0x1) +#define XHCI_SCTX_0_DCS_SET(x) ((x) & 0x1) +#define XHCI_SCTX_0_SCT_SET(x) (((x) & 0x7) << 1) +#define XHCI_SCTX_0_SCT_GET(x) (((x) >> 1) & 0x7) +#define XHCI_SCTX_0_SCT_SEC_TR_RING 0x0 +#define XHCI_SCTX_0_SCT_PRIM_TR_RING 0x1 +#define XHCI_SCTX_0_SCT_PRIM_SSA_8 0x2 +#define XHCI_SCTX_0_SCT_PRIM_SSA_16 0x3 +#define XHCI_SCTX_0_SCT_PRIM_SSA_32 0x4 +#define XHCI_SCTX_0_SCT_PRIM_SSA_64 0x5 +#define XHCI_SCTX_0_SCT_PRIM_SSA_128 0x6 +#define XHCI_SCTX_0_SCT_PRIM_SSA_256 0x7 +#define XHCI_SCTX_0_TR_DQ_PTR_MASK 0xFFFFFFFFFFFFFFF0U + volatile uint32_t dwSctx2; + volatile uint32_t dwSctx3; +}; + +struct xhci_trb { + volatile uint64_t qwTrb0; +#define XHCI_TRB_0_DIR_IN_MASK (0x80ULL << 0) +#define XHCI_TRB_0_WLENGTH_MASK (0xFFFFULL << 48) + volatile uint32_t dwTrb2; +#define XHCI_TRB_2_ERROR_GET(x) (((x) >> 24) & 0xFF) +#define XHCI_TRB_2_ERROR_SET(x) (((x) & 0xFF) << 24) +#define XHCI_TRB_2_TDSZ_GET(x) (((x) >> 17) & 0x1F) +#define XHCI_TRB_2_TDSZ_SET(x) (((x) & 0x1F) << 17) +#define XHCI_TRB_2_REM_GET(x) ((x) & 0xFFFFFF) +#define XHCI_TRB_2_REM_SET(x) ((x) & 0xFFFFFF) +#define XHCI_TRB_2_BYTES_GET(x) ((x) & 0x1FFFF) +#define XHCI_TRB_2_BYTES_SET(x) ((x) & 0x1FFFF) +#define XHCI_TRB_2_IRQ_GET(x) (((x) >> 22) & 0x3FF) +#define XHCI_TRB_2_IRQ_SET(x) (((x) & 0x3FF) << 22) +#define XHCI_TRB_2_STREAM_GET(x) (((x) >> 16) & 0xFFFF) +#define XHCI_TRB_2_STREAM_SET(x) (((x) & 0xFFFF) << 16) + + volatile uint32_t dwTrb3; +#define XHCI_TRB_3_TYPE_GET(x) (((x) >> 10) & 0x3F) +#define XHCI_TRB_3_TYPE_SET(x) (((x) & 0x3F) << 10) +#define XHCI_TRB_3_CYCLE_BIT (1U << 0) +#define XHCI_TRB_3_TC_BIT (1U << 1) /* command ring only */ +#define XHCI_TRB_3_ENT_BIT (1U << 1) /* transfer ring only */ +#define XHCI_TRB_3_ISP_BIT (1U << 2) +#define XHCI_TRB_3_NSNOOP_BIT (1U << 3) +#define XHCI_TRB_3_CHAIN_BIT (1U << 4) +#define XHCI_TRB_3_IOC_BIT (1U << 5) +#define XHCI_TRB_3_IDT_BIT (1U << 6) +#define XHCI_TRB_3_TBC_GET(x) (((x) >> 7) & 3) +#define XHCI_TRB_3_TBC_SET(x) (((x) & 3) << 7) +#define XHCI_TRB_3_BEI_BIT (1U << 9) +#define XHCI_TRB_3_DCEP_BIT (1U << 9) +#define XHCI_TRB_3_PRSV_BIT (1U << 9) +#define XHCI_TRB_3_BSR_BIT (1U << 9) +#define XHCI_TRB_3_TRT_MASK (3U << 16) +#define XHCI_TRB_3_TRT_NONE (0U << 16) +#define XHCI_TRB_3_TRT_OUT (2U << 16) +#define XHCI_TRB_3_TRT_IN (3U << 16) +#define XHCI_TRB_3_DIR_IN (1U << 16) +#define XHCI_TRB_3_TLBPC_GET(x) (((x) >> 16) & 0xF) +#define XHCI_TRB_3_TLBPC_SET(x) (((x) & 0xF) << 16) +#define XHCI_TRB_3_EP_GET(x) (((x) >> 16) & 0x1F) +#define XHCI_TRB_3_EP_SET(x) (((x) & 0x1F) << 16) +#define XHCI_TRB_3_FRID_GET(x) (((x) >> 20) & 0x7FF) +#define XHCI_TRB_3_FRID_SET(x) (((x) & 0x7FF) << 20) +#define XHCI_TRB_3_ISO_SIA_BIT (1U << 31) +#define XHCI_TRB_3_SUSP_EP_BIT (1U << 23) +#define XHCI_TRB_3_SLOT_GET(x) (((x) >> 24) & 0xFF) +#define XHCI_TRB_3_SLOT_SET(x) (((x) & 0xFF) << 24) + +/* Commands */ +#define XHCI_TRB_TYPE_RESERVED 0x00 +#define XHCI_TRB_TYPE_NORMAL 0x01 +#define XHCI_TRB_TYPE_SETUP_STAGE 0x02 +#define XHCI_TRB_TYPE_DATA_STAGE 0x03 +#define XHCI_TRB_TYPE_STATUS_STAGE 0x04 +#define XHCI_TRB_TYPE_ISOCH 0x05 +#define XHCI_TRB_TYPE_LINK 0x06 +#define XHCI_TRB_TYPE_EVENT_DATA 0x07 +#define XHCI_TRB_TYPE_NOOP 0x08 +#define XHCI_TRB_TYPE_ENABLE_SLOT 0x09 +#define XHCI_TRB_TYPE_DISABLE_SLOT 0x0A +#define XHCI_TRB_TYPE_ADDRESS_DEVICE 0x0B +#define XHCI_TRB_TYPE_CONFIGURE_EP 0x0C +#define XHCI_TRB_TYPE_EVALUATE_CTX 0x0D +#define XHCI_TRB_TYPE_RESET_EP 0x0E +#define XHCI_TRB_TYPE_STOP_EP 0x0F +#define XHCI_TRB_TYPE_SET_TR_DEQUEUE 0x10 +#define XHCI_TRB_TYPE_RESET_DEVICE 0x11 +#define XHCI_TRB_TYPE_FORCE_EVENT 0x12 +#define XHCI_TRB_TYPE_NEGOTIATE_BW 0x13 +#define XHCI_TRB_TYPE_SET_LATENCY_TOL 0x14 +#define XHCI_TRB_TYPE_GET_PORT_BW 0x15 +#define XHCI_TRB_TYPE_FORCE_HEADER 0x16 +#define XHCI_TRB_TYPE_NOOP_CMD 0x17 + +/* Events */ +#define XHCI_TRB_EVENT_TRANSFER 0x20 +#define XHCI_TRB_EVENT_CMD_COMPLETE 0x21 +#define XHCI_TRB_EVENT_PORT_STS_CHANGE 0x22 +#define XHCI_TRB_EVENT_BW_REQUEST 0x23 +#define XHCI_TRB_EVENT_DOORBELL 0x24 +#define XHCI_TRB_EVENT_HOST_CTRL 0x25 +#define XHCI_TRB_EVENT_DEVICE_NOTIFY 0x26 +#define XHCI_TRB_EVENT_MFINDEX_WRAP 0x27 + +/* Error codes */ +#define XHCI_TRB_ERROR_INVALID 0x00 +#define XHCI_TRB_ERROR_SUCCESS 0x01 +#define XHCI_TRB_ERROR_DATA_BUF 0x02 +#define XHCI_TRB_ERROR_BABBLE 0x03 +#define XHCI_TRB_ERROR_XACT 0x04 +#define XHCI_TRB_ERROR_TRB 0x05 +#define XHCI_TRB_ERROR_STALL 0x06 +#define XHCI_TRB_ERROR_RESOURCE 0x07 +#define XHCI_TRB_ERROR_BANDWIDTH 0x08 +#define XHCI_TRB_ERROR_NO_SLOTS 0x09 +#define XHCI_TRB_ERROR_STREAM_TYPE 0x0A +#define XHCI_TRB_ERROR_SLOT_NOT_ON 0x0B +#define XHCI_TRB_ERROR_ENDP_NOT_ON 0x0C +#define XHCI_TRB_ERROR_SHORT_PKT 0x0D +#define XHCI_TRB_ERROR_RING_UNDERRUN 0x0E +#define XHCI_TRB_ERROR_RING_OVERRUN 0x0F +#define XHCI_TRB_ERROR_VF_RING_FULL 0x10 +#define XHCI_TRB_ERROR_PARAMETER 0x11 +#define XHCI_TRB_ERROR_BW_OVERRUN 0x12 +#define XHCI_TRB_ERROR_CONTEXT_STATE 0x13 +#define XHCI_TRB_ERROR_NO_PING_RESP 0x14 +#define XHCI_TRB_ERROR_EV_RING_FULL 0x15 +#define XHCI_TRB_ERROR_INCOMPAT_DEV 0x16 +#define XHCI_TRB_ERROR_MISSED_SERVICE 0x17 +#define XHCI_TRB_ERROR_CMD_RING_STOP 0x18 +#define XHCI_TRB_ERROR_CMD_ABORTED 0x19 +#define XHCI_TRB_ERROR_STOPPED 0x1A +#define XHCI_TRB_ERROR_LENGTH 0x1B +#define XHCI_TRB_ERROR_BAD_MELAT 0x1D +#define XHCI_TRB_ERROR_ISOC_OVERRUN 0x1F +#define XHCI_TRB_ERROR_EVENT_LOST 0x20 +#define XHCI_TRB_ERROR_UNDEFINED 0x21 +#define XHCI_TRB_ERROR_INVALID_SID 0x22 +#define XHCI_TRB_ERROR_SEC_BW 0x23 +#define XHCI_TRB_ERROR_SPLIT_XACT 0x24 +} __aligned(4); + +struct xhci_dev_endpoint_trbs { + struct xhci_trb trb[(XHCI_MAX_STREAMS * + XHCI_MAX_TRANSFERS) + XHCI_MAX_STREAMS]; +}; + +#if (USB_PAGE_SIZE < 4096) +#error "The XHCI driver needs a pagesize above or equal to 4K" +#endif + +/* Define the maximum payload which we will handle in a single TRB */ +#define XHCI_TD_PAYLOAD_MAX 65536 /* bytes */ + +/* Define the maximum payload of a single scatter-gather list element */ +#define XHCI_TD_PAGE_SIZE \ + ((USB_PAGE_SIZE < XHCI_TD_PAYLOAD_MAX) ? USB_PAGE_SIZE : XHCI_TD_PAYLOAD_MAX) + +/* Define the maximum length of the scatter-gather list */ +#define XHCI_TD_PAGE_NBUF \ + (((XHCI_TD_PAYLOAD_MAX + XHCI_TD_PAGE_SIZE - 1) / XHCI_TD_PAGE_SIZE) + 1) + +struct xhci_td { + /* one LINK TRB has been added to the TRB array */ + struct xhci_trb td_trb[XHCI_TD_PAGE_NBUF + 1]; + +/* + * Extra information needed: + */ + uint64_t td_self; + struct xhci_td *next; + struct xhci_td *alt_next; + struct xhci_td *obj_next; + struct usb_page_cache *page_cache; + uint32_t len; + uint32_t remainder; + uint8_t ntrb; + uint8_t status; +} __aligned(XHCI_TRB_ALIGN); + +struct xhci_command { + struct xhci_trb trb; + TAILQ_ENTRY(xhci_command) entry; +}; + +struct xhci_event_ring_seg { + volatile uint64_t qwEvrsTablePtr; + volatile uint32_t dwEvrsTableSize; + volatile uint32_t dwEvrsReserved; +}; + +struct xhci_hw_root { + struct xhci_event_ring_seg hwr_ring_seg[XHCI_MAX_RSEG]; + struct { + volatile uint64_t dummy; + } __aligned(64) padding; + struct xhci_trb hwr_events[XHCI_MAX_EVENTS]; + struct xhci_trb hwr_commands[XHCI_MAX_COMMANDS]; +}; + +struct xhci_endpoint_ext { + struct xhci_trb *trb; + struct usb_xfer *xfer[XHCI_MAX_TRANSFERS * XHCI_MAX_STREAMS]; + struct usb_page_cache *page_cache; + uint64_t physaddr; + uint8_t trb_used[XHCI_MAX_STREAMS]; + uint8_t trb_index[XHCI_MAX_STREAMS]; + uint8_t trb_halted; + uint8_t trb_running; + uint8_t trb_ep_mode; + uint8_t trb_ep_maxp; +}; + +enum { + XHCI_ST_DISABLED, + XHCI_ST_ENABLED, + XHCI_ST_DEFAULT, + XHCI_ST_ADDRESSED, + XHCI_ST_CONFIGURED, + XHCI_ST_MAX +}; + +struct xhci_hw_dev { + struct usb_page_cache device_pc; + struct usb_page_cache input_pc; + struct usb_page_cache endpoint_pc[XHCI_MAX_ENDPOINTS]; + + struct usb_page device_pg; + struct usb_page input_pg; + struct usb_page endpoint_pg[XHCI_MAX_ENDPOINTS]; + + struct xhci_endpoint_ext endp[XHCI_MAX_ENDPOINTS]; + + uint8_t state; + uint8_t nports; + uint8_t tt; + uint8_t context_num; +}; + +struct xhci_hw_softc { + struct usb_page_cache root_pc; + struct usb_page_cache ctx_pc; + struct usb_page_cache scratch_pc[XHCI_MAX_SCRATCHPADS]; + + struct usb_page root_pg; + struct usb_page ctx_pg; + struct usb_page scratch_pg[XHCI_MAX_SCRATCHPADS]; + + struct xhci_hw_dev devs[XHCI_MAX_DEVICES + 1]; +}; + +struct xhci_config_desc { + struct usb_config_descriptor confd; + struct usb_interface_descriptor ifcd; + struct usb_endpoint_descriptor endpd; + struct usb_endpoint_ss_comp_descriptor endpcd; +} __packed; + +struct xhci_bos_desc { + struct usb_bos_descriptor bosd; + struct usb_devcap_usb2ext_descriptor usb2extd; + struct usb_devcap_ss_descriptor usbdcd; + struct usb_devcap_container_id_descriptor cidd; +} __packed; + +union xhci_hub_desc { + struct usb_status stat; + struct usb_port_status ps; + struct usb_hub_ss_descriptor hubd; + uint8_t temp[128]; +}; + +typedef int (xhci_port_route_t)(device_t, uint32_t, uint32_t); + +struct xhci_softc { + struct xhci_hw_softc sc_hw; + /* base device */ + struct usb_bus sc_bus; + /* configure message */ + struct usb_bus_msg sc_config_msg[2]; + + struct usb_callout sc_callout; + + xhci_port_route_t *sc_port_route; + + union xhci_hub_desc sc_hub_desc; + + struct cv sc_cmd_cv; + struct sx sc_cmd_sx; + + struct usb_device *sc_devices[XHCI_MAX_DEVICES]; + struct resource *sc_io_res; + struct resource *sc_irq_res; + struct resource *sc_msix_res; + + void *sc_intr_hdl; + bus_size_t sc_io_size; + bus_space_tag_t sc_io_tag; + bus_space_handle_t sc_io_hdl; + /* last pending command address */ + uint64_t sc_cmd_addr; + /* result of command */ + uint32_t sc_cmd_result[2]; + /* copy of cmd register */ + uint32_t sc_cmd; + /* worst case exit latency */ + uint32_t sc_exit_lat_max; + + /* offset to operational registers */ + uint32_t sc_oper_off; + /* offset to capability registers */ + uint32_t sc_capa_off; + /* offset to runtime registers */ + uint32_t sc_runt_off; + /* offset to doorbell registers */ + uint32_t sc_door_off; + + /* chip specific */ + uint16_t sc_erst_max; + uint16_t sc_event_idx; + uint16_t sc_command_idx; + uint16_t sc_imod_default; + + /* number of scratch pages */ + uint16_t sc_noscratch; + + uint8_t sc_event_ccs; + uint8_t sc_command_ccs; + /* number of XHCI device slots */ + uint8_t sc_noslot; + /* number of ports on root HUB */ + uint8_t sc_noport; + /* root HUB device configuration */ + uint8_t sc_conf; + /* step status stage of all control transfers */ + uint8_t sc_ctlstep; + /* root HUB port event bitmap, max 256 ports */ + uint8_t sc_hub_idata[32]; + + /* size of context */ + uint8_t sc_ctx_is_64_byte; + + /* vendor string for root HUB */ + char sc_vendor[16]; +}; + +#define XHCI_CMD_LOCK(sc) sx_xlock(&(sc)->sc_cmd_sx) +#define XHCI_CMD_UNLOCK(sc) sx_xunlock(&(sc)->sc_cmd_sx) +#define XHCI_CMD_ASSERT_LOCKED(sc) sx_assert(&(sc)->sc_cmd_sx, SA_LOCKED) + +/* prototypes */ + +uint8_t xhci_use_polling(void); +usb_error_t xhci_halt_controller(struct xhci_softc *); +usb_error_t xhci_reset_controller(struct xhci_softc *); +usb_error_t xhci_init(struct xhci_softc *, device_t, uint8_t); +usb_error_t xhci_start_controller(struct xhci_softc *); +void xhci_interrupt(struct xhci_softc *); +void xhci_uninit(struct xhci_softc *); + +#endif /* _XHCI_H_ */ diff --git a/freebsd/sys/dev/usb/controller/xhci_pci.c b/freebsd/sys/dev/usb/controller/xhci_pci.c new file mode 100644 index 00000000..5c56b652 --- /dev/null +++ b/freebsd/sys/dev/usb/controller/xhci_pci.c @@ -0,0 +1,473 @@ +#include + +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2010 Hans Petter Selasky. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef __rtems__ +#include +#endif + +static device_probe_t xhci_pci_probe; +static device_attach_t xhci_pci_attach; +static device_detach_t xhci_pci_detach; +static usb_take_controller_t xhci_pci_take_controller; + +static device_method_t xhci_device_methods[] = { + /* device interface */ + DEVMETHOD(device_probe, xhci_pci_probe), + DEVMETHOD(device_attach, xhci_pci_attach), + DEVMETHOD(device_detach, xhci_pci_detach), + DEVMETHOD(device_suspend, bus_generic_suspend), + DEVMETHOD(device_resume, bus_generic_resume), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + DEVMETHOD(usb_take_controller, xhci_pci_take_controller), + + DEVMETHOD_END +}; + +static driver_t xhci_driver = { + .name = "xhci", + .methods = xhci_device_methods, + .size = sizeof(struct xhci_softc), +}; + +static devclass_t xhci_devclass; + +DRIVER_MODULE(xhci, pci, xhci_driver, xhci_devclass, NULL, NULL); +MODULE_DEPEND(xhci, usb, 1, 1, 1); + +static const char * +xhci_pci_match(device_t self) +{ + uint32_t device_id = pci_get_devid(self); + + switch (device_id) { + case 0x145c1022: + return ("AMD KERNCZ USB 3.0 controller"); + case 0x43ba1022: + return ("AMD X399 USB 3.0 controller"); + case 0x43b91022: /* X370 */ + case 0x43bb1022: /* B350 */ + return ("AMD 300 Series USB 3.0 controller"); + case 0x78141022: + return ("AMD FCH USB 3.0 controller"); + + case 0x01941033: + return ("NEC uPD720200 USB 3.0 controller"); + case 0x00151912: + return ("NEC uPD720202 USB 3.0 controller"); + + case 0x10001b73: + return ("Fresco Logic FL1000G USB 3.0 controller"); + case 0x11001b73: + return ("Fresco Logic FL1100 USB 3.0 controller"); + + case 0x10421b21: + return ("ASMedia ASM1042 USB 3.0 controller"); + case 0x11421b21: + return ("ASMedia ASM1042A USB 3.0 controller"); + + case 0x0f358086: + return ("Intel BayTrail USB 3.0 controller"); + case 0x19d08086: + return ("Intel Denverton USB 3.0 controller"); + case 0x9c318086: + case 0x1e318086: + return ("Intel Panther Point USB 3.0 controller"); + case 0x22b58086: + return ("Intel Braswell USB 3.0 controller"); + case 0x5aa88086: + return ("Intel Apollo Lake USB 3.0 controller"); + case 0x8c318086: + return ("Intel Lynx Point USB 3.0 controller"); + case 0x8cb18086: + return ("Intel Wildcat Point USB 3.0 controller"); + case 0x8d318086: + return ("Intel Wellsburg USB 3.0 controller"); + case 0x9cb18086: + return ("Broadwell Integrated PCH-LP chipset USB 3.0 controller"); + case 0x9d2f8086: + return ("Intel Sunrise Point-LP USB 3.0 controller"); + case 0xa12f8086: + return ("Intel Sunrise Point USB 3.0 controller"); + case 0xa1af8086: + return ("Intel Lewisburg USB 3.0 controller"); + case 0xa2af8086: + return ("Intel Union Point USB 3.0 controller"); + + case 0xa01b177d: + return ("Cavium ThunderX USB 3.0 controller"); + + default: + break; + } + + if ((pci_get_class(self) == PCIC_SERIALBUS) + && (pci_get_subclass(self) == PCIS_SERIALBUS_USB) + && (pci_get_progif(self) == PCIP_SERIALBUS_USB_XHCI)) { + return ("XHCI (generic) USB 3.0 controller"); + } + return (NULL); /* dunno */ +} + +static int +xhci_pci_probe(device_t self) +{ + const char *desc = xhci_pci_match(self); + + if (desc) { + device_set_desc(self, desc); + return (BUS_PROBE_DEFAULT); + } else { + return (ENXIO); + } +} + +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) +static int xhci_use_msi = 1; +#else +static int xhci_use_msi = 0; +#endif +TUNABLE_INT("hw.usb.xhci.msi", &xhci_use_msi); +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) +static int xhci_use_msix = 1; +#else +static int xhci_use_msix = 0; +#endif +TUNABLE_INT("hw.usb.xhci.msix", &xhci_use_msix); + +static void +xhci_interrupt_poll(void *_sc) +{ + struct xhci_softc *sc = _sc; + USB_BUS_UNLOCK(&sc->sc_bus); + xhci_interrupt(sc); + USB_BUS_LOCK(&sc->sc_bus); + usb_callout_reset(&sc->sc_callout, 1, (void *)&xhci_interrupt_poll, sc); +} + +static int +xhci_pci_port_route(device_t self, uint32_t set, uint32_t clear) +{ + uint32_t temp; + uint32_t usb3_mask; + uint32_t usb2_mask; + + temp = pci_read_config(self, PCI_XHCI_INTEL_USB3_PSSEN, 4) | + pci_read_config(self, PCI_XHCI_INTEL_XUSB2PR, 4); + + temp |= set; + temp &= ~clear; + + /* Don't set bits which the hardware doesn't support */ + usb3_mask = pci_read_config(self, PCI_XHCI_INTEL_USB3PRM, 4); + usb2_mask = pci_read_config(self, PCI_XHCI_INTEL_USB2PRM, 4); + + pci_write_config(self, PCI_XHCI_INTEL_USB3_PSSEN, temp & usb3_mask, 4); + pci_write_config(self, PCI_XHCI_INTEL_XUSB2PR, temp & usb2_mask, 4); + + device_printf(self, "Port routing mask set to 0x%08x\n", temp); + + return (0); +} + +static int +xhci_pci_attach(device_t self) +{ + struct xhci_softc *sc = device_get_softc(self); + int count, err, msix_table, rid; + uint8_t usemsi = 1; + uint8_t usedma32 = 0; + + rid = PCI_XHCI_CBMEM; + sc->sc_io_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, + RF_ACTIVE); + if (!sc->sc_io_res) { + device_printf(self, "Could not map memory\n"); + return (ENOMEM); + } + sc->sc_io_tag = rman_get_bustag(sc->sc_io_res); + sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res); + sc->sc_io_size = rman_get_size(sc->sc_io_res); + + switch (pci_get_devid(self)) { + case 0x01941033: /* NEC uPD720200 USB 3.0 controller */ + case 0x00141912: /* NEC uPD720201 USB 3.0 controller */ + /* Don't use 64-bit DMA on these controllers. */ + usedma32 = 1; + break; + case 0x10001b73: /* FL1000G */ + /* Fresco Logic host doesn't support MSI. */ + usemsi = 0; + break; + case 0x0f358086: /* BayTrail */ + case 0x9c318086: /* Panther Point */ + case 0x1e318086: /* Panther Point */ + case 0x8c318086: /* Lynx Point */ + case 0x8cb18086: /* Wildcat Point */ + case 0x9cb18086: /* Broadwell Mobile Integrated */ + /* + * On Intel chipsets, reroute ports from EHCI to XHCI + * controller and use a different IMOD value. + */ + sc->sc_port_route = &xhci_pci_port_route; + sc->sc_imod_default = XHCI_IMOD_DEFAULT_LP; + sc->sc_ctlstep = 1; + break; + } + + if (xhci_init(sc, self, usedma32)) { + device_printf(self, "Could not initialize softc\n"); + bus_release_resource(self, SYS_RES_MEMORY, PCI_XHCI_CBMEM, + sc->sc_io_res); + return (ENXIO); + } + + pci_enable_busmaster(self); + + usb_callout_init_mtx(&sc->sc_callout, &sc->sc_bus.bus_mtx, 0); + + rid = 0; + if (xhci_use_msix && (msix_table = pci_msix_table_bar(self)) >= 0) { + sc->sc_msix_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, + &msix_table, RF_ACTIVE); + if (sc->sc_msix_res == NULL) { + /* May not be enabled */ + device_printf(self, + "Unable to map MSI-X table \n"); + } else { + count = 1; + if (pci_alloc_msix(self, &count) == 0) { + if (bootverbose) + device_printf(self, "MSI-X enabled\n"); + rid = 1; + } else { + bus_release_resource(self, SYS_RES_MEMORY, + msix_table, sc->sc_msix_res); + sc->sc_msix_res = NULL; + } + } + } + if (rid == 0 && xhci_use_msi && usemsi) { + count = 1; + if (pci_alloc_msi(self, &count) == 0) { + if (bootverbose) + device_printf(self, "MSI enabled\n"); + rid = 1; + } + } +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) + sc->sc_irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid, + RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); + if (sc->sc_irq_res == NULL) { + pci_release_msi(self); + device_printf(self, "Could not allocate IRQ\n"); + /* goto error; FALLTHROUGH - use polling */ + } +#endif /* Phytium BSP only support INTX, reserve INTA for NVMe, XHCI just use polling */ + sc->sc_bus.bdev = device_add_child(self, "usbus", -1); + if (sc->sc_bus.bdev == NULL) { + device_printf(self, "Could not add USB device\n"); + goto error; + } + device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus); + + sprintf(sc->sc_vendor, "0x%04x", pci_get_vendor(self)); + +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) + if (sc->sc_irq_res != NULL) { + err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, + NULL, (driver_intr_t *)xhci_interrupt, sc, &sc->sc_intr_hdl); + if (err != 0) { + bus_release_resource(self, SYS_RES_IRQ, + rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); + sc->sc_irq_res = NULL; + pci_release_msi(self); + device_printf(self, "Could not setup IRQ, err=%d\n", err); + sc->sc_intr_hdl = NULL; + } + } +#endif + + if (sc->sc_irq_res == NULL || sc->sc_intr_hdl == NULL) { +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) + if (xhci_use_polling() != 0) { +#else + if (1) { /* XHCI platform using interrupt, XHCI pcie just using polling */ +#endif + device_printf(self, "Interrupt polling at %dHz\n", hz); + USB_BUS_LOCK(&sc->sc_bus); + xhci_interrupt_poll(sc); + USB_BUS_UNLOCK(&sc->sc_bus); + } else + goto error; + } + + xhci_pci_take_controller(self); + + err = xhci_halt_controller(sc); + + if (err == 0) + err = xhci_start_controller(sc); + + if (err == 0) + err = device_probe_and_attach(sc->sc_bus.bdev); + + if (err) { + device_printf(self, "XHCI halt/start/probe failed err=%d\n", err); + goto error; + } + return (0); + +error: + xhci_pci_detach(self); + return (ENXIO); +} + +static int +xhci_pci_detach(device_t self) +{ + struct xhci_softc *sc = device_get_softc(self); + + /* during module unload there are lots of children leftover */ + device_delete_children(self); + + usb_callout_drain(&sc->sc_callout); + xhci_halt_controller(sc); + xhci_reset_controller(sc); + + pci_disable_busmaster(self); + + if (sc->sc_irq_res && sc->sc_intr_hdl) { + bus_teardown_intr(self, sc->sc_irq_res, sc->sc_intr_hdl); + sc->sc_intr_hdl = NULL; + } + if (sc->sc_irq_res) { + bus_release_resource(self, SYS_RES_IRQ, + rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); + sc->sc_irq_res = NULL; + pci_release_msi(self); + } + if (sc->sc_io_res) { + bus_release_resource(self, SYS_RES_MEMORY, PCI_XHCI_CBMEM, + sc->sc_io_res); + sc->sc_io_res = NULL; + } + if (sc->sc_msix_res) { + bus_release_resource(self, SYS_RES_MEMORY, + rman_get_rid(sc->sc_msix_res), sc->sc_msix_res); + sc->sc_msix_res = NULL; + } + + xhci_uninit(sc); + + return (0); +} + +static int +xhci_pci_take_controller(device_t self) +{ + struct xhci_softc *sc = device_get_softc(self); + uint32_t cparams; + uint32_t eecp; + uint32_t eec; + uint16_t to; + uint8_t bios_sem; + + cparams = XREAD4(sc, capa, XHCI_HCSPARAMS0); + + eec = -1; + + /* Synchronise with the BIOS if it owns the controller. */ + for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec); + eecp += XHCI_XECP_NEXT(eec) << 2) { + eec = XREAD4(sc, capa, eecp); + + if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY) + continue; + bios_sem = XREAD1(sc, capa, eecp + + XHCI_XECP_BIOS_SEM); + if (bios_sem == 0) + continue; + device_printf(sc->sc_bus.bdev, "waiting for BIOS " + "to give up control\n"); + XWRITE1(sc, capa, eecp + + XHCI_XECP_OS_SEM, 1); + to = 500; + while (1) { + bios_sem = XREAD1(sc, capa, eecp + + XHCI_XECP_BIOS_SEM); + if (bios_sem == 0) + break; + + if (--to == 0) { + device_printf(sc->sc_bus.bdev, + "timed out waiting for BIOS\n"); + break; + } + usb_pause_mtx(NULL, hz / 100); /* wait 10ms */ + } + } + return (0); +} diff --git a/freebsd/sys/dev/usb/controller/xhci_plat.c b/freebsd/sys/dev/usb/controller/xhci_plat.c new file mode 100644 index 00000000..ae8a2825 --- /dev/null +++ b/freebsd/sys/dev/usb/controller/xhci_plat.c @@ -0,0 +1,238 @@ +#include + +/*- + * Copyright (c) 2015 Semihalf. + * Copyright (c) 2015 Stormshield. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef __rtems__ +#include +#include +#endif /* __rtems__ */ + +#define XHCI_HC_DEVSTR "Phytium Integrated USB 3.0 controller" +#define XHCI_HC_VENDOR "Phytium" + +#define IS_DMA_32B 0 + +static device_attach_t xhci_attach; +static device_detach_t xhci_detach; + +static struct ofw_compat_data compat_data[] = { + {"marvell,armada-380-xhci", true}, + {"marvell,armada3700-xhci", true}, + {"marvell,armada-8k-xhci", true}, + {"phytium,xhci", true}, + {NULL, false} +}; + +static int +xhci_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) + return (ENXIO); + + device_set_desc(dev, XHCI_HC_DEVSTR); + + return (BUS_PROBE_DEFAULT); +} + +static int +xhci_attach(device_t dev) +{ + struct xhci_softc *sc = device_get_softc(dev); + int err = 0, rid = 0; + + sc->sc_bus.parent = dev; + sc->sc_bus.devices = sc->sc_devices; + sc->sc_bus.devices_max = XHCI_MAX_DEVICES; + + sc->sc_io_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE); + if (sc->sc_io_res == NULL) { + device_printf(dev, "Failed to map memory\n"); + xhci_detach(dev); + return (ENXIO); + } + + sc->sc_io_tag = rman_get_bustag(sc->sc_io_res); + sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res); + sc->sc_io_size = rman_get_size(sc->sc_io_res); + + device_printf(dev, "xhci@0x%x ~ 0x%x attaching ... \n", + sc->sc_io_hdl, sc->sc_io_hdl + sc->sc_io_size); + + sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_SHAREABLE | RF_ACTIVE); + if (sc->sc_irq_res == NULL) { + device_printf(dev, "Failed to allocate IRQ\n"); + xhci_detach(dev); + return (ENXIO); + } + + sc->sc_bus.bdev = device_add_child(dev, "usbus", -1); + if (sc->sc_bus.bdev == NULL) { + device_printf(dev, "Failed to add USB device\n"); + xhci_detach(dev); + return (ENXIO); + } + + device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus); + + sprintf(sc->sc_vendor, XHCI_HC_VENDOR); + device_set_desc(sc->sc_bus.bdev, XHCI_HC_DEVSTR); + + err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, + NULL, (driver_intr_t *)xhci_interrupt, sc, &sc->sc_intr_hdl); + if (err != 0) { + device_printf(dev, "Failed to setup error IRQ, %d\n", err); + sc->sc_intr_hdl = NULL; + xhci_detach(dev); + return (err); + } + + err = xhci_init(sc, dev, IS_DMA_32B); + if (err != 0) { + device_printf(dev, "Failed to init XHCI, with error %d\n", err); + xhci_detach(dev); + return (ENXIO); + } + + err = xhci_start_controller(sc); + if (err != 0) { + device_printf(dev, "Failed to start XHCI controller, with error %d\n", err); + xhci_detach(dev); + return (ENXIO); + } + + err = device_probe_and_attach(sc->sc_bus.bdev); + if (err != 0) { + device_printf(dev, "Failed to initialize USB, with error %d\n", err); + xhci_detach(dev); + return (ENXIO); + } + + return (0); +} + +static int +xhci_detach(device_t dev) +{ + struct xhci_softc *sc = device_get_softc(dev); + int err; + + /* during module unload there are lots of children leftover */ + device_delete_children(dev); + + if (sc->sc_irq_res != NULL && sc->sc_intr_hdl != NULL) { + err = bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intr_hdl); + if (err != 0) + device_printf(dev, "Could not tear down irq, %d\n", + err); + sc->sc_intr_hdl = NULL; + } + + if (sc->sc_irq_res != NULL) { + bus_release_resource(dev, SYS_RES_IRQ, + rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); + sc->sc_irq_res = NULL; + } + + if (sc->sc_io_res != NULL) { + bus_release_resource(dev, SYS_RES_MEMORY, + rman_get_rid(sc->sc_io_res), sc->sc_io_res); + sc->sc_io_res = NULL; + } + + xhci_uninit(sc); + + return (0); +} + +static device_method_t xhci_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, xhci_probe), + DEVMETHOD(device_attach, xhci_attach), + DEVMETHOD(device_detach, xhci_detach), + DEVMETHOD(device_suspend, bus_generic_suspend), + DEVMETHOD(device_resume, bus_generic_resume), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + + DEVMETHOD_END +}; + +static driver_t xhci_driver = { + "xhci", + xhci_methods, + sizeof(struct xhci_softc), +}; + +static devclass_t xhci_devclass; + +DRIVER_MODULE(xhci, simplebus, xhci_driver, xhci_devclass, 0, 0); +MODULE_DEPEND(xhci, usb, 1, 1, 1); diff --git a/freebsd/sys/dev/usb/controller/xhcireg.h b/freebsd/sys/dev/usb/controller/xhcireg.h index b49bc09a..0f451be4 100644 --- a/freebsd/sys/dev/usb/controller/xhcireg.h +++ b/freebsd/sys/dev/usb/controller/xhcireg.h @@ -207,20 +207,38 @@ #define XREAD1(sc, what, a) \ bus_space_read_1((sc)->sc_io_tag, (sc)->sc_io_hdl, \ (a) + (sc)->sc_##what##_off) +#ifndef __rtems__ #define XREAD2(sc, what, a) \ bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, \ (a) + (sc)->sc_##what##_off) #define XREAD4(sc, what, a) \ bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, \ (a) + (sc)->sc_##what##_off) +#else +#define XREAD2(sc, what, a) \ + le16toh(bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, \ + (a) + (sc)->sc_##what##_off)) +#define XREAD4(sc, what, a) \ + le32toh(bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, \ + (a) + (sc)->sc_##what##_off)) +#endif #define XWRITE1(sc, what, a, x) \ bus_space_write_1((sc)->sc_io_tag, (sc)->sc_io_hdl, \ (a) + (sc)->sc_##what##_off, (x)) +#ifndef __rtems__ #define XWRITE2(sc, what, a, x) \ bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, \ (a) + (sc)->sc_##what##_off, (x)) #define XWRITE4(sc, what, a, x) \ bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, \ (a) + (sc)->sc_##what##_off, (x)) +#else +#define XWRITE2(sc, what, a, x) \ + bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, \ + (a) + (sc)->sc_##what##_off, htole16(x)) +#define XWRITE4(sc, what, a, x) \ + bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, \ + (a) + (sc)->sc_##what##_off, htole32(x)) +#endif #endif /* _XHCIREG_H_ */ diff --git a/libbsd.py b/libbsd.py index db390df0..9c7890dd 100644 --- a/libbsd.py +++ b/libbsd.py @@ -718,6 +718,42 @@ class mmc_ti(builder.Module): mm.generator['source']() ) +# +# MMC (Phytium SDIF) +# +class mmc_phytium_sdif(builder.Module): + def __init__(self, manager): + super(mmc_phytium_sdif, self).__init__(manager, type(self).__name__) + + def generate(self): + mm = self.manager + print("add phytium sdif source code") + + self.addKernelSpaceSourceFiles( + [ + 'sys/dev/phytium/phytium_sdif.c', + ], + mm.generator['source']() + ) + +# +# MMC (Phytium SDMMC) +# +class mmc_phytium_sdmmc(builder.Module): + def __init__(self, manager): + super(mmc_phytium_sdmmc, self).__init__(manager, type(self).__name__) + + def generate(self): + mm = self.manager + print("add phytium sdmmc source code") + + self.addKernelSpaceSourceFiles( + [ + 'sys/dev/phytium/phytium_sdmmc.c', + ], + mm.generator['source']() + ) + # # Input # @@ -1217,6 +1253,31 @@ class dev_usb_controller_bbb(builder.Module): mm.generator['source']() ) +# +# XHCI USB +# +class dev_usb_controller_xhci(builder.Module): + def __init__(self, manager): + super(dev_usb_controller_xhci, self).__init__(manager, type(self).__name__) + + def generate(self): + mm = self.manager + self.addDependency('dev_usb') + self.addKernelSpaceHeaderFiles( + [ + 'sys/dev/usb/controller/xhci.h', + 'sys/dev/usb/controller/xhcireg.h', + ] + ) + self.addKernelSpaceSourceFiles( + [ + 'sys/dev/usb/controller/xhci.c', + 'sys/dev/usb/controller/xhci_plat.c', + 'sys/dev/usb/controller/xhci_pci.c', + ], + mm.generator['source']() + ) + # # USB WLAN # @@ -1513,7 +1574,6 @@ class dev_net(builder.Module): 'sys/arm/ti/cpsw/if_cpswreg.h', 'sys/arm/ti/cpsw/if_cpswvar.h', 'sys/arm/xilinx/zy7_slcr.h', - 'sys/dev/cadence/if_cgem_hw.h', 'sys/dev/dwc/if_dwc.h', 'sys/dev/dwc/if_dwcvar.h', 'sys/dev/ffec/if_ffecreg.h', @@ -1562,7 +1622,6 @@ class dev_net(builder.Module): 'sys/dev/mii/ukphy_subr.c', 'sys/dev/tsec/if_tsec.c', 'sys/dev/tsec/if_tsec_fdt.c', - 'sys/dev/cadence/if_cgem.c', 'sys/dev/dwc/if_dwc.c', 'sys/arm/xilinx/zy7_slcr.c', ], @@ -1575,6 +1634,51 @@ class dev_net(builder.Module): mm.generator['source']() ) +# +# Networking Devices (CGEM32) +# +class dev_cgem32(builder.Module): + + def __init__(self, manager): + super(dev_cgem32, self).__init__(manager, type(self).__name__) + + def generate(self): + mm = self.manager + self.addKernelSpaceHeaderFiles( + [ + 'sys/dev/cadence/if_cgem_hw.h', + ] + ) + self.addKernelSpaceSourceFiles( + [ + 'sys/dev/cadence/if_cgem.c', + ], + mm.generator['source']() + ) + +# +# Networking Devices (CGEM64) +# +class dev_cgem64(builder.Module): + + def __init__(self, manager): + super(dev_cgem64, self).__init__(manager, type(self).__name__) + + def generate(self): + mm = self.manager + self.addKernelSpaceHeaderFiles( + [ + 'sys/dev/cadence64/if_cgem_hw.h', + ] + ) + self.addKernelSpaceSourceFiles( + [ + 'sys/dev/cadence64/if_cgem.c', + ], + mm.generator['source']() + ) + + # # Network Interface Controllers (NIC) # @@ -2967,6 +3071,30 @@ class pci(builder.Module): ) +# +# PCI Host +# +class pci_host(builder.Module): + + def __init__(self, manager): + super(pci_host, self).__init__(manager, type(self).__name__) + + def generate(self): + mm = self.manager + self.addKernelSpaceSourceFiles( + [ + 'sys/dev/pci/pci_host_generic.c', + 'sys/dev/pci/pci_host_generic_fdt.c', + ], + mm.generator['source']() + ) + self.addKernelSpaceHeaderFiles( + [ + 'sys/dev/pci/pci_host_generic.h', + 'sys/dev/pci/pci_host_generic_fdt.h', + ] + ) + # # User space # @@ -5487,6 +5615,8 @@ def load(mm): mm.addModule(tty(mm)) mm.addModule(mmc(mm)) mm.addModule(mmc_ti(mm)) + mm.addModule(mmc_phytium_sdif(mm)) + mm.addModule(mmc_phytium_sdmmc(mm)) mm.addModule(dev_input(mm)) mm.addModule(evdev(mm)) mm.addModule(iic(mm)) @@ -5507,8 +5637,11 @@ def load(mm): mm.addModule(cam(mm)) mm.addModule(dev_usb_storage(mm)) mm.addModule(dev_usb_controller_bbb(mm)) + mm.addModule(dev_usb_controller_xhci(mm)) mm.addModule(net(mm)) + mm.addModule(dev_cgem32(mm)) + mm.addModule(dev_cgem64(mm)) mm.addModule(netinet(mm)) mm.addModule(netinet6(mm)) mm.addModule(netipsec(mm)) @@ -5524,6 +5657,7 @@ def load(mm): # Add PCI mm.addModule(pci(mm)) + mm.addModule(pci_host(mm)) # Add NIC devices mm.addModule(dev_nic(mm)) diff --git a/rtemsbsd/include/bsp/nexus-devices.h b/rtemsbsd/include/bsp/nexus-devices.h index 37008cc6..0c2b5a80 100644 --- a/rtemsbsd/include/bsp/nexus-devices.h +++ b/rtemsbsd/include/bsp/nexus-devices.h @@ -122,6 +122,59 @@ RTEMS_BSD_DRIVER_E1000PHY; RTEMS_BSD_DRIVER_XILINX_VERSAL_GEM0(VERSAL_IRQ_ETHERNET_0); RTEMS_BSD_DRIVER_E1000PHY; +#elif defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) + +#include + +RTEMS_BSD_DEFINE_NEXUS_DEVICE(ofwbus, 0, 0, NULL); +SYSINIT_DRIVER_REFERENCE(simplebus, ofwbus); + +#if defined(RTEMS_BSD_MODULE_MMC_PHYTIUM_SDIF) && (RTEMS_BSD_MODULE_MMC_PHYTIUM_SDIF) +SYSINIT_DRIVER_REFERENCE(sdif_phytium, simplebus); +SYSINIT_DRIVER_REFERENCE(mmcsd, mmc); +#endif + +#if defined(RTEMS_BSD_MODULE_DEV_CGEM64) && (RTEMS_BSD_MODULE_DEV_CGEM64) +#if !defined(RTEMS_BSD_DRIVER_PHYTIUM_CGEM) + #define RTEMS_BSD_DRIVER_PHYTIUM_CGEM(_num, _base, _irq) \ + static const rtems_bsd_device_resource cgem ## _num ## _res[] = { \ + { \ + .type = RTEMS_BSD_RES_MEMORY, \ + .start_request = 0, \ + .start_actual = (_base) \ + }, { \ + .type = RTEMS_BSD_RES_IRQ, \ + .start_request = 0, \ + .start_actual = (_irq) \ + } \ + }; \ + RTEMS_BSD_DEFINE_NEXUS_DEVICE(cgem, _num, \ + RTEMS_ARRAY_SIZE(cgem ## _num ## _res), \ + &cgem ## _num ## _res[0]) +#endif + +RTEMS_BSD_DRIVER_PHYTIUM_CGEM(0, BSP_PHYTIUM_XMAC0_BASE, BSP_PHYTIUM_XMAC0_IRQ); +#if defined(PHYTIUM_BSP_TYPE_PHYTIUM_PI) && (PHYTIUM_BSP_TYPE_PHYTIUM_PI) +RTEMS_BSD_DRIVER_PHYTIUM_CGEM(1, BSP_PHYTIUM_XMAC1_BASE, BSP_PHYTIUM_XMAC1_IRQ); +#endif +RTEMS_BSD_DRIVER_REPHY; +#endif + +#if defined(RTEMS_BSD_MODULE_PCI_HOST) && (RTEMS_BSD_MODULE_PCI_HOST) +SYSINIT_DRIVER_REFERENCE(pcib, simplebus); +SYSINIT_DRIVER_REFERENCE(pci, pcib); +SYSINIT_DRIVER_REFERENCE(nvme, pci); +SYSINIT_MODULE_REFERENCE(nvd); +#endif + +#if defined(RTEMS_BSD_MODULE_DEV_USB_CONTROLLER_XHCI) && (RTEMS_BSD_MODULE_DEV_USB_CONTROLLER_XHCI) +#if defined(RTEMS_BSD_MODULE_PCI_HOST) && (RTEMS_BSD_MODULE_PCI_HOST) +SYSINIT_DRIVER_REFERENCE(xhci, pci); +#endif +SYSINIT_DRIVER_REFERENCE(xhci, simplebus); +SYSINIT_DRIVER_REFERENCE(usbus, xhci); +#endif + #elif defined(LIBBSP_ARM_ATSAM_BSP_H) RTEMS_BSD_DRIVER_USB; diff --git a/rtemsbsd/sys/dev/nvd/nvd.c b/rtemsbsd/sys/dev/nvd/nvd.c index 4a8e8ac1..628d5b3e 100644 --- a/rtemsbsd/sys/dev/nvd/nvd.c +++ b/rtemsbsd/sys/dev/nvd/nvd.c @@ -44,6 +44,7 @@ #include #include #include +#include #define NVD_STR "nvd" @@ -96,7 +97,11 @@ moduledata_t nvd_mod = { 0 }; +#ifdef __rtems__ +DECLARE_MODULE(nvd, nvd_mod, SI_SUB_KPROF, SI_ORDER_ANY); +#else DECLARE_MODULE(nvd, nvd_mod, SI_SUB_DRIVERS, SI_ORDER_ANY); +#endif MODULE_VERSION(nvd, 1); MODULE_DEPEND(nvd, nvme, 1, 1, 1); @@ -261,6 +266,20 @@ nvd_ioctl(rtems_disk_device *dd, uint32_t req, void *arg) return (rtems_blkdev_ioctl(dd, req, arg)); } +static rtems_status_code +rtems_bsd_nvd_attach_worker(rtems_media_state state, const char *src, char **dest, void *arg) +{ + struct nvd_disk *ndisk = arg; + char disk[16]; + + snprintf(disk, sizeof(disk), "/dev/nvd%i", ndisk->unit); + if (state == RTEMS_MEDIA_STATE_READY) { + *dest = strdup(disk, M_RTEMS_HEAP); + } + + return RTEMS_SUCCESSFUL; +} + static void * nvd_new_disk(struct nvme_namespace *ns, void *arg) { @@ -314,12 +333,25 @@ nvd_new_disk(struct nvme_namespace *ns, void *arg) panic("nvd_new_disk"); } + sc = rtems_media_server_disk_attach(path, rtems_bsd_nvd_attach_worker, ndisk); + if (sc != RTEMS_SUCCESSFUL) { + panic("nvd_attach_disk"); + } + return (ndisk); } static void nvd_gone(struct nvd_disk *ndisk) { + rtems_status_code sc; + char path[16]; + + snprintf(path, sizeof(path), "/dev/nvd%i", ndisk->unit); + sc = rtems_media_server_disk_detach(path); + if (sc != RTEMS_SUCCESSFUL) { + panic("nvd_deattach_disk"); + } panic("nvd_gone"); } -- Gitee From 59eb5a3b8d7f5e42c2f77bbc485ff877064f933f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9C=B1=E8=80=BF=E5=AE=87?= Date: Tue, 10 Sep 2024 01:41:40 +0000 Subject: [PATCH 2/5] =?UTF-8?q?=E6=B6=88=E9=99=A4=E4=B8=8D=E6=8F=92=20SD?= =?UTF-8?q?=20=E5=8D=A1=E6=97=B6=E5=A4=B1=E8=B4=A5=E7=9A=84=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- freebsd/sys/dev/phytium/phytium_sdif.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/freebsd/sys/dev/phytium/phytium_sdif.c b/freebsd/sys/dev/phytium/phytium_sdif.c index 2b47af0b..23f1fdad 100644 --- a/freebsd/sys/dev/phytium/phytium_sdif.c +++ b/freebsd/sys/dev/phytium/phytium_sdif.c @@ -440,6 +440,7 @@ static int phytium_sdif_attach(device_t dev) if (err != 0) { device_printf(dev, "Failed to init Sdif host\n"); phytium_sdif_detach(dev); + return (err); } /* install IRQ handle */ @@ -933,4 +934,4 @@ static driver_t phytium_sdif_driver = { }; DRIVER_MODULE(sdif_phytium, simplebus, phytium_sdif_driver, phytium_sdif_devclass, NULL, NULL); -MMC_DECLARE_BRIDGE(sdif_phytium); \ No newline at end of file +MMC_DECLARE_BRIDGE(sdif_phytium); -- Gitee From 335eaa272645c01ab061e2852a6cdc753272269c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=99=93=E4=B8=9C?= Date: Tue, 10 Sep 2024 08:22:55 +0000 Subject: [PATCH 3/5] =?UTF-8?q?=E9=80=82=E9=85=8Dgmac?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- freebsd/sys/dev/dwc/if_dwc.c | 12 ++++++++++-- rtemsbsd/include/bsp/nexus-devices.h | 7 ++++++- rtemsbsd/local/if_dwc_if.c | 6 +++++- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/freebsd/sys/dev/dwc/if_dwc.c b/freebsd/sys/dev/dwc/if_dwc.c index 895fdfe5..57bcc162 100644 --- a/freebsd/sys/dev/dwc/if_dwc.c +++ b/freebsd/sys/dev/dwc/if_dwc.c @@ -80,7 +80,7 @@ __FBSDID("$FreeBSD$"); #include #include #include - +#include #define READ4(_sc, _reg) \ bus_read_4((_sc)->res[0], _reg) #define WRITE4(_sc, _reg, _val) \ @@ -149,8 +149,13 @@ struct dwc_hwdesc */ #define DWC_DESC_RING_ALIGN 2048 +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) #define DWC_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ CSUM_TCP_IPV6 | CSUM_UDP_IPV6) +#else +#define DWC_CKSUM_ASSIST (CSUM_TCP | CSUM_UDP | \ + CSUM_TCP_IPV6 | CSUM_UDP_IPV6) +#endif static struct resource_spec dwc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, @@ -220,8 +225,11 @@ dwc_setup_txdesc(struct dwc_softc *sc, int csum_flags, int idx, if (i == nsegs - 1) flags |= DDESC_CNTL_TXLAST; - + #if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) sc->txdesc_ring[idx].tdes0 = 0; + #else + sc->txdesc_ring[idx].tdes0 = DDESC_TDES0_OWN; + #endif sc->txdesc_ring[idx].tdes1 = flags | len; } else { flags = DDESC_TDES0_TXCHAIN | DDESC_TDES0_TXINT | diff --git a/rtemsbsd/include/bsp/nexus-devices.h b/rtemsbsd/include/bsp/nexus-devices.h index 0c2b5a80..cc329b51 100644 --- a/rtemsbsd/include/bsp/nexus-devices.h +++ b/rtemsbsd/include/bsp/nexus-devices.h @@ -154,12 +154,17 @@ SYSINIT_DRIVER_REFERENCE(mmcsd, mmc); #endif RTEMS_BSD_DRIVER_PHYTIUM_CGEM(0, BSP_PHYTIUM_XMAC0_BASE, BSP_PHYTIUM_XMAC0_IRQ); -#if defined(PHYTIUM_BSP_TYPE_PHYTIUM_PI) && (PHYTIUM_BSP_TYPE_PHYTIUM_PI) +#if defined(PHYTIUM_BSP_TYPE_PHYTIUM_PI) RTEMS_BSD_DRIVER_PHYTIUM_CGEM(1, BSP_PHYTIUM_XMAC1_BASE, BSP_PHYTIUM_XMAC1_IRQ); #endif RTEMS_BSD_DRIVER_REPHY; #endif +#if defined(PHYTIUM_BSP_TYPE_D2000_TEST) || defined(PHYTIUM_BSP_TYPE_FT2004_DSK) +RTEMS_BSD_DRIVER_DW_ETH; +RTEMS_BSD_DRIVER_REPHY; +#endif + #if defined(RTEMS_BSD_MODULE_PCI_HOST) && (RTEMS_BSD_MODULE_PCI_HOST) SYSINIT_DRIVER_REFERENCE(pcib, simplebus); SYSINIT_DRIVER_REFERENCE(pci, pcib); diff --git a/rtemsbsd/local/if_dwc_if.c b/rtemsbsd/local/if_dwc_if.c index 67b3ea5c..fb9acf6d 100644 --- a/rtemsbsd/local/if_dwc_if.c +++ b/rtemsbsd/local/if_dwc_if.c @@ -19,7 +19,7 @@ #include #include - +#include static int if_dwc_default_init(device_t dev) { @@ -29,7 +29,11 @@ if_dwc_default_init(device_t dev) static int if_dwc_default_mac_type(device_t dev) { +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) return (DWC_GMAC); +#else + return (DWC_GMAC_ALT_DESC); +#endif } static int -- Gitee From f5ca4c75f008487ac6b5c12dc00c6721c684eff8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=99=93=E4=B8=9C?= Date: Tue, 24 Sep 2024 05:51:33 +0000 Subject: [PATCH 4/5] adapt libbsd testsuite --- freebsd/sys/dev/dwc/if_dwc.c | 9 ++++----- rtemsbsd/ftpd/ftpd-service.c | 2 +- rtemsbsd/rtems/rtems-bsd-rc-conf.c | 1 + rtemsbsd/rtems/rtems-routes.c | 2 +- testsuite/rcconf02/test_main.c | 3 +++ 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/freebsd/sys/dev/dwc/if_dwc.c b/freebsd/sys/dev/dwc/if_dwc.c index 57bcc162..25a2fde6 100644 --- a/freebsd/sys/dev/dwc/if_dwc.c +++ b/freebsd/sys/dev/dwc/if_dwc.c @@ -149,13 +149,8 @@ struct dwc_hwdesc */ #define DWC_DESC_RING_ALIGN 2048 -#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) #define DWC_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ CSUM_TCP_IPV6 | CSUM_UDP_IPV6) -#else -#define DWC_CKSUM_ASSIST (CSUM_TCP | CSUM_UDP | \ - CSUM_TCP_IPV6 | CSUM_UDP_IPV6) -#endif static struct resource_spec dwc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, @@ -1408,7 +1403,11 @@ dwc_attach(device_t dev) ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; ifp->if_capenable = ifp->if_capabilities; +#if !defined(LIBBSP_AARCH64_PHYTIUM_BSP_H) ifp->if_hwassist = DWC_CKSUM_ASSIST; +#else + ifp->if_hwassist &= (~DWC_CKSUM_ASSIST); +#endif ifp->if_start = dwc_txstart; ifp->if_ioctl = dwc_ioctl; ifp->if_init = dwc_init; diff --git a/rtemsbsd/ftpd/ftpd-service.c b/rtemsbsd/ftpd/ftpd-service.c index e2ee73d0..f29db69f 100644 --- a/rtemsbsd/ftpd/ftpd-service.c +++ b/rtemsbsd/ftpd/ftpd-service.c @@ -45,7 +45,7 @@ #include #include #include - +#include #define __need_getopt_newlib #include diff --git a/rtemsbsd/rtems/rtems-bsd-rc-conf.c b/rtemsbsd/rtems/rtems-bsd-rc-conf.c index 88d98c3e..2dc3e408 100644 --- a/rtemsbsd/rtems/rtems-bsd-rc-conf.c +++ b/rtemsbsd/rtems/rtems-bsd-rc-conf.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include diff --git a/rtemsbsd/rtems/rtems-routes.c b/rtemsbsd/rtems/rtems-routes.c index 0b5250f0..42b54d3c 100644 --- a/rtemsbsd/rtems/rtems-routes.c +++ b/rtemsbsd/rtems/rtems-routes.c @@ -30,7 +30,7 @@ #include #include - +#include #include #include #include diff --git a/testsuite/rcconf02/test_main.c b/testsuite/rcconf02/test_main.c index c40392de..46eb00d8 100644 --- a/testsuite/rcconf02/test_main.c +++ b/testsuite/rcconf02/test_main.c @@ -84,6 +84,7 @@ "ifconfig_" # iface "_alias1=\"inet 10.1.1.111 netmask 0xffffffff\"\n" #define RC_CONF_IFACES_IPV4 \ + IFACE_IPV4(dwc0) \ IFACE_IPV4(dmc0) \ IFACE_IPV4(sm0) \ IFACE_IPV4(cgem0) \ @@ -92,6 +93,7 @@ IFACE_IPV4(re0) #define RC_CONF_IFACES_ALIAS \ + IFACE_ALIAS(dwc0) \ IFACE_ALIAS(dmc0) \ IFACE_ALIAS(sm0) \ IFACE_ALIAS(cgem0) \ @@ -109,6 +111,7 @@ "ifconfig_" # iface "_102=\"DHCP\"\n" #define RC_CONF_VLANS \ + IFACE_VLAN(dwc0) \ IFACE_VLAN(dmc0) \ IFACE_VLAN(sm0) \ IFACE_VLAN(cgem0) \ -- Gitee From c4f458de4e6e376254e98337ddd7cc1c6ebced94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=99=93=E4=B8=9C?= Date: Fri, 27 Sep 2024 06:13:39 +0000 Subject: [PATCH 5/5] Refine symbol expansion and sdif driver --- freebsd/sys/dev/phytium/phytium_sdif.c | 6 +++--- rtemsbsd/rtems/rtems-routes.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/freebsd/sys/dev/phytium/phytium_sdif.c b/freebsd/sys/dev/phytium/phytium_sdif.c index 23f1fdad..40952827 100644 --- a/freebsd/sys/dev/phytium/phytium_sdif.c +++ b/freebsd/sys/dev/phytium/phytium_sdif.c @@ -824,7 +824,7 @@ static int phytium_sdif_request(device_t brdev, device_t reqdev, struct mmc_requ if (FSDIF_SUCCESS != FSdifPollWaitDMAEnd(&sc->hc, cmd_data)) { device_printf(brdev, "Failed to wait DMA transfer timeout\n"); err = (ENXIO); - goto err_exit; + sc->err_occur = 1; } #endif @@ -868,10 +868,10 @@ static int phytium_sdif_request(device_t brdev, device_t reqdev, struct mmc_requ goto err_exit; } #endif - - (*req->done)(req); } + (*req->done)(req); + err_exit: PHYTIUM_MMC_UNLOCK(sc); return err; diff --git a/rtemsbsd/rtems/rtems-routes.c b/rtemsbsd/rtems/rtems-routes.c index 42b54d3c..bab54b34 100644 --- a/rtemsbsd/rtems/rtems-routes.c +++ b/rtemsbsd/rtems/rtems-routes.c @@ -27,7 +27,7 @@ * Useful functions to access the routing tables. Based on unpv13e from * Stevens. */ - +#include #include #include #include -- Gitee