| Updated: October 28, 2024 |
This is the sample code that's discussed in Writing Network Drivers for io-sock.
/*
io-sock ofwbus sample driver code
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/taskqueue.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <machine/bus.h>
#include <dev/fdt/fdt_common.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <qnx/qnx_modload.h>
#include "sample.h"
#include "miibus_if.h"
#define SAMPLE_LOCK(sc) mtx_lock(&(sc)->mtx)
#define SAMPLE_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
#define SAMPLE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
#define SAMPLE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
int drvr_ver = IOSOCK_VERSION_CUR;
SYSCTL_INT(_qnx_driver, OID_AUTO, sam, CTLFLAG_RD, &drvr_ver, 0,
"Version");
static struct resource_spec sample_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0 }
};
static void
sample_txfinish_locked(struct sample_softc *sc)
{
struct sample_bufmap *bmap = NULL;
mtx_assert(&(sc)->tx_mtx, MA_OWNED);
bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTREAD);
/*
* Hardware-specific code here: You may want to clear internal
* buffers, update memory pointers, etc.
*/
while (1) {
/*
* Hardware-specific code here: exit the loop when all
* transferred packets have been cleared
*/
if (bmap->mbuf) {
bus_dmamap_sync(sc->txbuf_tag, bmap->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txbuf_tag, bmap->map);
m_freem(bmap->mbuf);
bmap->mbuf = NULL;
/*
* If we use software packet counters, we can increase them here.
* If we use hardware counters, we can use the sample_harvest_stats() function.
* For example: if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
*/
}
}
/*
* Hardware-specific code here: if there are no additional uncompleted
* transfers, we don't need a watchdog timer
*/
sc->tx_watchdog_count = 0;
}
static struct bus_dma_segment seg[TX_DMA_MFUF_CHUNK];
static void
sample_txstart_locked(struct sample_softc *sc)
{
struct ifnet *ifp;
struct mbuf *m0;
struct mbuf *m_temp;
int tq_pkt_xbytes;
int error;
int nsegs;
int i;
int enqueued = 0;
mtx_assert(&(sc)->tx_mtx, MA_OWNED);
if (((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
!sc->link_is_up) {
return;
}
ifp = sc->ifp;
while(1) {
/*
* Hardware-specific code here: execute this if you don't have
* enough memory on the hardware
*/
sample_txfinish_locked(sc);
/*
* Hardware-specific code here: check if hardware can transfer,
* return if not
*/
m0 = drbr_peek(sc->ifp, sc->br);
tq_pkt_xbytes = 0;
if (m0 == NULL) {
break;
}
error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag,
sc->txbuf_map[sc->txbuf_idx_head].map,
m0, seg, &nsegs, 0);
/*
* If bus_dmamap_load_mbuf_sg() wasn't successfull in mapping all
* elements, we will first try to "collapse" the list,
* and if collapse didn't help, we will try to defragment it.
*/
if (error == EFBIG) {
m_temp = m_collapse(m0, M_NOWAIT, TX_DMA_MFUF_CHUNK);
if (m_temp != NULL) {
m0 = m_temp;
}
error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag,
sc->txbuf_map[sc->txbuf_idx_head].map,
m0, seg, &nsegs, 0);
if (error == EFBIG) {
m_temp = m_defrag(m0, M_NOWAIT);
if (m_temp == NULL) {
device_printf(sc->dev,
"%s Defrag failed\n",
__FUNCTION__);
drbr_putback(sc->ifp, sc->br, m0);
break;
}
m0 = m_temp;
error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag,
sc->txbuf_map[sc->txbuf_idx_head].map,
m0, seg, &nsegs, 0);
/*
* We try to remap only once.
* If we fail, we try defrag
* (which is time consuming).
* After that, bus_dmamap_load_mbuf_sg()
* should not return EFBIG error
*/
KASSERT((error != EFBIG),
("%s: Too many segments after defrag",
__func__));
}
}
if (error != 0) {
device_printf(sc->dev,
"%s an attempt to load mbuf failed\n",
__FUNCTION__);
drbr_putback(sc->ifp, sc->br, m0);
break;
}
bus_dmamap_sync(sc->txbuf_tag,
sc->txbuf_map[sc->txbuf_idx_head].map,
BUS_DMASYNC_PREWRITE);
sc->txbuf_map[sc->txbuf_idx_head].mbuf = m0;
BPF_MTAP(ifp, m0);
++enqueued;
drbr_advance(sc->ifp, sc->br);
m_temp = m0;
for (i=0; i < nsegs; i++) {
/*
* Hardware-specific code here: Prepare memory
* for the transfer from the hardware
*/
}
/* Hardware-specific code here: start transfer on device */
}
if (enqueued != 0) {
sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
}
if (!drbr_empty(sc->ifp, sc->br)) {
taskqueue_enqueue(sc->tq, &sc->tx);
}
}
static int
sample_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct sample_softc *sc = ifp->if_softc;
int error;
error = drbr_enqueue(ifp, sc->br, m);
if (error != 0) {
goto done;
}
if (mtx_trylock(&sc->tx_mtx)) {
sample_txstart_locked(sc);
mtx_unlock(&sc->tx_mtx);
} else {
/* Wake up the task queue */
taskqueue_enqueue(sc->tq, &sc->tx);
}
done:
return error;
}
static void
sample_tq_tx(void *arg, int pending)
{
struct sample_softc *sc;
sc = arg;
mtx_lock(&sc->tx_mtx);
if (!drbr_empty(sc->ifp, sc->br)) {
sample_txstart_locked(sc);
}
mtx_unlock(&sc->tx_mtx);
}
static void
sample_qflush(struct ifnet *ifp)
{
struct sample_softc *sc = ifp->if_softc;
struct mbuf *m;
mtx_lock(&sc->tx_mtx);
while ((m = buf_ring_dequeue_sc(sc->br)) != NULL) {
m_freem(m);
}
mtx_unlock(&sc->tx_mtx);
if_qflush(ifp);
}
static void
sample_stop_locked(struct sample_softc *sc)
{
struct ifnet *ifp;
SAMPLE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
sc->tx_watchdog_count = 0;
sc->stats_harvest_count = 0;
callout_stop(&sc->sample_callout);
/* Hardware-specific code here: stop hardware */
}
static void
sample_harvest_stats(struct sample_softc *sc)
{
struct ifnet *ifp = sc->ifp;
int value_for_example = 0;
if_inc_counter(ifp, IFCOUNTER_IPACKETS,
value_for_example /* internal or hardware counter */);
if_inc_counter(ifp, IFCOUNTER_IMCASTS,
value_for_example /* internal or hardware counter */);
if_inc_counter(ifp, IFCOUNTER_IERRORS,
value_for_example /* internal or hardware counter */);
if_inc_counter(ifp, IFCOUNTER_OPACKETS,
value_for_example /* internal or hardware counter */);
if_inc_counter(ifp, IFCOUNTER_OMCASTS,
value_for_example /* internal or hardware counter */);
if_inc_counter(ifp, IFCOUNTER_OERRORS,
value_for_example /* internal or hardware counter */);
if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
value_for_example/* internal or hardware counter */);
}
static void
sample_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
if (error != 0)
return;
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static void
sample_tick(void *arg)
{
struct sample_softc *sc;
struct ifnet *ifp;
int link_was_up;
sc = arg;
SAMPLE_ASSERT_LOCKED(sc);
ifp = sc->ifp;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
/*
* Typical Tx watchdog. If this fires, it indicates that we enqueued
* packets for output and never got a txdone interrupt for them.
*/
if (sc->tx_watchdog_count > 0) {
if (--sc->tx_watchdog_count == 0) {
taskqueue_enqueue(sc->tq, &sc->tx);
}
}
/*
* Gather statistics from hardware counters. Depending on the frequency of
* sample_tick, the developer may consider reducing how often this function
* executes.
*/
sample_harvest_stats(sc);
/* Check the media status. */
link_was_up = sc->link_is_up;
mii_tick(sc->mii_softc);
if (sc->link_is_up && !link_was_up)
taskqueue_enqueue(sc->tq, &sc->tx);
/* Schedule another check one second from now. */
callout_reset(&sc->sample_callout, hz, sample_tick, sc);
}
static void
sample_setup_rxfilter(struct sample_softc *sc)
{
/* Hardware-specific code here: enable hardware filtering */
}
static void
sample_init_locked(struct sample_softc *sc)
{
struct ifnet *ifp = sc->ifp;
SAMPLE_ASSERT_LOCKED(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
ifp->if_drv_flags |= IFF_DRV_RUNNING;
sample_setup_rxfilter(sc);
/* Hardware-specific code here: enable the DMA Rx/Tx interrupts */
/* Hardware-specific code here: enable transmitters */
/*
* Call mii_mediachg(), which will call back into sample_miibus_statchg()
* to set up the remaining configuration registers based on current media.
*/
mii_mediachg(sc->mii_softc);
callout_reset(&sc->sample_callout, hz, sample_tick, sc);
}
static void
sample_init(void *if_softc)
{
struct sample_softc *sc = if_softc;
SAMPLE_LOCK(sc);
sample_init_locked(sc);
SAMPLE_UNLOCK(sc);
}
static struct mbuf *
sample_alloc_mbufcl(struct sample_softc *sc)
{
struct mbuf *m;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m != NULL)
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
return (m);
}
static struct mbuf *
sample_rxfinish_one(struct sample_softc *sc, struct sample_hwdesc *desc,
struct sample_bufmap *map)
{
struct ifnet *ifp;
struct mbuf *m, *m0;
int len = 0;
m = map->mbuf;
ifp = sc->ifp;
/*
* Hardware-specific code here: check length, return if length doesn't
* make sense
*/
/* Allocate new buffer */
m0 = sample_alloc_mbufcl(sc);
if (m0 == NULL) {
/* no new mbuf available, recycle old */
if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
return (NULL);
}
/* Synchronize DMA for newly received packet */
bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxbuf_tag, map->map);
/* Received packet is valid, process it */
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = len;
m->m_len = len;
/*
* If we use software packet counters, we can increase them here.
* If we use hardware counters, we can use sample_harvest_stats().
* For example: if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
*/
SAMPLE_UNLOCK(sc);
(*ifp->if_input)(ifp, m);
SAMPLE_LOCK(sc);
return (m0);
}
static void
sample_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
{
struct sample_softc *sc;
struct mii_data *mii;
sc = ifp->if_softc;
mii = sc->mii_softc;
SAMPLE_LOCK(sc);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
SAMPLE_UNLOCK(sc);
}
static int
sample_media_change(struct ifnet * ifp)
{
struct sample_softc *sc;
int error;
sc = ifp->if_softc;
SAMPLE_LOCK(sc);
error = mii_mediachg(sc->mii_softc);
SAMPLE_UNLOCK(sc);
return (error);
}
static int
sample_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct sample_softc *sc;
struct mii_data *mii;
struct ifreq *ifr;
int mask, error;
sc = ifp->if_softc;
ifr = (struct ifreq *)data;
error = 0;
switch (cmd) {
case SIOCSIFFLAGS:
SAMPLE_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if ((ifp->if_flags ^ sc->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
sample_setup_rxfilter(sc);
} else {
if (!sc->is_detaching)
sample_init_locked(sc);
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
sample_stop_locked(sc);
}
sc->if_flags = ifp->if_flags;
SAMPLE_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
SAMPLE_LOCK(sc);
sample_setup_rxfilter(sc);
SAMPLE_UNLOCK(sc);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
mii = sc->mii_softc;
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break;
case SIOCSIFCAP:
mask = ifp->if_capenable ^ ifr->ifr_reqcap;
if (mask & IFCAP_VLAN_MTU) {
/*
* Hardware-specific code here: Set capabilities,
* such as hardware offload features.
* In this example we just acknowledge the change.
*/
ifp->if_capenable ^= IFCAP_VLAN_MTU;
}
break;
default:
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static void
sample_rxfinish_locked(struct sample_softc *sc)
{
struct sample_hwdesc *desc = NULL;
struct mbuf *m;
int idx = 0;
SAMPLE_ASSERT_LOCKED(sc);
/* Hardware-specific code here: loop on hardware buffers, assign value to desc */
bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_POSTREAD);
m = sample_rxfinish_one(sc, desc, &(sc->rxbuf_map[idx]));
/*
* Hardware-specific code here: let the hardware know that you finished
* the current transfer
*/
}
static void
sample_intr(void *arg)
{
struct sample_softc *sc = arg;
/*
* Hardware-specific code here: your ISR. Typically, the Rx routine will
* be called from here
*/
SAMPLE_LOCK(sc);
sample_rxfinish_locked(sc);
SAMPLE_UNLOCK(sc);
}
static int
sample_setup_rxbuf(struct sample_softc *sc, int idx, struct mbuf *m)
{
struct bus_dma_segment seg;
int error, nsegs;
m_adj(m, ETHER_ALIGN);
error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
m, &seg, &nsegs, 0);
if (error != 0)
return (error);
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
BUS_DMASYNC_PREREAD);
sc->rxbuf_map[idx].mbuf = m;
/*
* Hardware-specific code here: set hardware pointers for the buffer.
* Use seg.ds_addr as the physical address pointer
*/
return (0);
}
static int
setup_dma(struct sample_softc *sc)
{
struct mbuf *m;
int error;
int idx;
/*
* Set up Tx descriptor ring, descriptors, and DMA maps
*/
/* Please see the next call to bus_dma_tag_create() for more comments */
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* Parent tag */
SAMPLE_DESC_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
TX_DESC_SIZE, 1, /* maxsize, nsegments */
TX_DESC_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->txdesc_tag);
if (error != 0) {
device_printf(sc->dev,
"could not create TX ring DMA tag.\n");
goto out;
}
error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
&sc->txdesc_map);
if (error != 0) {
device_printf(sc->dev,
"could not allocate TX descriptor ring.\n");
goto out;
}
error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
sc->txdesc_ring, TX_DESC_SIZE, sample_get1paddr,
&sc->txdesc_ring_paddr, 0);
if (error != 0) {
device_printf(sc->dev,
"could not load TX descriptor ring map.\n");
goto out;
}
/*
* reference:
* https://www.freebsd.org/cgi/man.cgi?query=bus_dma_tag_create
*/
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* Parent tag */
/*
* Hardware that allows DMA transfers to start at
* any address would specify 1 for byte alignment.
* Hardware that requires DMA transfers
* to start on a multiple of 4K would specify 4096.
*/
1,
/*
* The boundary indicates the set of
* addresses, all multiples of the boundary argument,
* that cannot be crossed by a single bus_dma_segment_t.
* The boundary must be a power of 2 and must be no smaller
* than the maximum segment size. '0' indicates that there
* are no boundary restrictions.
*/
0, /* alignment, boundary */
/*
* Bounds of the window of bus address space that
* cannot be directly accessed by the device
*/
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, /* max size */
/*
* If the hardware supports the DMA scatter/gather mechanism,
* it is very convenient to work with segments.
* You can map a segmented mbuf into a number of segments, without
* copying the data
*/
TX_DMA_MFUF_CHUNK, /* nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->txbuf_tag);
if (error != 0) {
device_printf(sc->dev,
"could not create TX ring DMA tag.\n");
goto out;
}
for (idx = 0; idx < TX_MAP_BUFFER_LEN; idx++) {
error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
&sc->txbuf_map[idx].map);
if (error != 0) {
device_printf(sc->dev,
"could not create TX buffer DMA map.\n");
goto out;
}
sc->txbuf_map[idx].last_desc = -1;
}
memset(sc->txdesc_ring, 0, TX_DESC_SIZE);
/*
* Set up Rx descriptor ring, descriptors, DMA maps, and mbufs.
*/
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* Parent tag. */
SAMPLE_DESC_RING_ALIGN, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
RX_DESC_SIZE, 1, /* maxsize, nsegments */
RX_DESC_SIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->rxdesc_tag);
if (error != 0) {
device_printf(sc->dev,
"could not create RX ring DMA tag.\n");
goto out;
}
error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
BUS_DMA_COHERENT | BUS_DMA_WAITOK |
BUS_DMA_ZERO | BUS_DMA_NOCACHE,
&sc->rxdesc_map);
if (error != 0) {
device_printf(sc->dev,
"could not allocate RX descriptor ring.\n");
goto out;
}
error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
sc->rxdesc_ring, RX_DESC_SIZE, sample_get1paddr,
&sc->rxdesc_ring_paddr, 0);
if (error != 0) {
device_printf(sc->dev,
"could not load RX descriptor ring map.\n");
goto out;
}
error = bus_dma_tag_create(
bus_get_dma_tag(sc->dev), /* Parent tag. */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MCLBYTES, 1, /* maxsize, nsegments */
MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->rxbuf_tag);
if (error != 0) {
device_printf(sc->dev,
"could not create RX buf DMA tag.\n");
goto out;
}
for (idx = 0; idx < RX_DESC_COUNT; idx++) {
error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
&sc->rxbuf_map[idx].map);
if (error != 0) {
device_printf(sc->dev,
"could not create RX buffer DMA map.\n");
goto out;
}
if ((m = sample_alloc_mbufcl(sc)) == NULL) {
device_printf(sc->dev, "Could not alloc mbuf\n");
error = ENOMEM;
goto out;
}
if ((error = sample_setup_rxbuf(sc, idx, m)) != 0) {
device_printf(sc->dev,
"could not create new RX buffer.\n");
goto out;
}
}
out:
if (error != 0)
return (ENXIO);
return (0);
}
static int
sample_reset(device_t dev)
{
/* Hardware-specific code here: reset the chip */
return (0);
}
static int
sample_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev)) {
return (ENXIO);
}
if (!ofw_bus_is_compatible(dev, "Hardware Descriptor string from DTB file")) {
return (ENXIO);
}
device_set_desc(dev, "Sample Controller");
return (BUS_PROBE_DEFAULT);
}
static int
sample_shutdown(device_t dev)
{
/*
* Hardware-specific code here: shutdown is called when io-sock is
* terminated or crashes. It is not intended to be a graceful stop
* (that is what detach is for). Therefore, the code should disable hardware
* DMA using the simplest method possible.
*/
return 0;
}
static int
sample_detach(device_t dev)
{
struct sample_softc *sc;
bus_dmamap_t map;
int idx;
sc = device_get_softc(dev);
if (sc->is_attached) {
SAMPLE_LOCK(sc);
sc->is_detaching = true;
sample_stop_locked(sc);
SAMPLE_UNLOCK(sc);
callout_drain(&sc->sample_callout);
ether_ifdetach(sc->ifp);
}
/* Detach the miibus */
if (sc->miibus) {
device_delete_child(dev, sc->miibus);
}
bus_generic_detach(dev);
if (sc->ifp != NULL) {
if_free(sc->ifp);
}
/* Clean up Rx DMA resources and free mbufs. */
for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
if ((map = sc->rxbuf_map[idx].map) != NULL) {
bus_dmamap_unload(sc->rxbuf_tag, map);
bus_dmamap_destroy(sc->rxbuf_tag, map);
m_freem(sc->rxbuf_map[idx].mbuf);
}
}
if (sc->rxbuf_tag != NULL) {
bus_dma_tag_destroy(sc->rxbuf_tag);
}
if (sc->rxdesc_map != NULL) {
bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
sc->rxdesc_map);
}
if (sc->rxdesc_tag != NULL) {
bus_dma_tag_destroy(sc->rxdesc_tag);
}
/* Clean up Tx DMA resources. */
for (idx = 0; idx < TX_MAP_BUFFER_LEN; ++idx) {
if ((map = sc->txbuf_map[idx].map) != NULL) {
/* TX maps are already unloaded. */
bus_dmamap_destroy(sc->txbuf_tag, map);
}
}
if (sc->txbuf_tag != NULL) {
bus_dma_tag_destroy(sc->txbuf_tag);
}
if (sc->txdesc_map != NULL) {
bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
sc->txdesc_map);
}
if (sc->txdesc_tag != NULL)
bus_dma_tag_destroy(sc->txdesc_tag);
/* Release bus resources. */
bus_release_resources(dev, sample_spec, sc->res);
if (sc->tx_mtx_is_up) {
mtx_lock(&sc->tx_mtx);
if (sc->tq) {
taskqueue_drain(sc->tq, &sc->tx);
taskqueue_free(sc->tq);
sc->tq = NULL;
}
if (sc->br) {
buf_ring_free(sc->br, M_DEVBUF);
sc->br = NULL;
}
mtx_destroy(&sc->tx_mtx);
}
if (sc->intr_cookie != NULL) {
bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
sc->intr_cookie = NULL;
}
mtx_destroy(&sc->mtx);
return (0);
}
static int
sample_attach(device_t dev)
{
uint8_t macaddr[ETHER_ADDR_LEN];
struct sample_softc *sc;
struct ifnet *ifp;
int error, phynum;
char *phy_mode;
phandle_t node;
void *dummy;
char td_name[32];
sc = device_get_softc(dev);
node = ofw_bus_get_node(dev);
if ((node = ofw_bus_get_node(dev)) == -1) {
device_printf(dev, "Impossible: Can't find ofw bus node\n");
error = ENXIO;
goto fail;
}
/*
* Hardware-specific code here: an example of how to read some
* property from a DTB file
*/
if (OF_getprop_alloc(node, "phy-mode", (void **)&phy_mode)) {
if (strcmp(phy_mode, "rgmii") == 0) {
sc->phy_mode = PHY_MODE_RGMII;
}
if (strcmp(phy_mode, "rmii") == 0) {
sc->phy_mode = PHY_MODE_RMII;
}
OF_prop_free(phy_mode);
}
if (bus_alloc_resources(dev, sample_spec, sc->res)) {
device_printf(dev, "could not allocate resources\n");
error = ENXIO;
goto fail;
}
/* Reset the PHY, if needed */
if (sample_reset(dev) != 0) {
device_printf(dev, "Can't reset the PHY\n");
error = ENXIO;
goto fail;
}
mtx_init(&sc->tx_mtx, td_name, NULL, MTX_DEF);
sc->br = buf_ring_alloc(TX_QUEUE_LEN, M_DEVBUF, M_NOWAIT, &sc->tx_mtx);
if (sc->br == NULL) {
device_printf(dev, "buf_ring_alloc failed");
error = ENOMEM;
goto fail;
}
TASK_INIT(&sc->tx, 0, sample_tq_tx, sc);
sc->tq = taskqueue_create(td_name, M_NOWAIT, taskqueue_thread_enqueue,
&sc->tq);
if (sc->tq == NULL) {
device_printf(dev, "taskqueue_create failed");
error = ENOMEM;
goto fail;
}
taskqueue_start_threads(&sc->tq, 1, PI_NET, td_name);
if (setup_dma(sc)) {
error = ENXIO;
goto fail;
}
mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&sc->sample_callout, &sc->mtx, 0);
/* Setup interrupt handler. */
error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
NULL, sample_intr, sc, &sc->intr_cookie);
if (error != 0) {
device_printf(dev, "could not setup interrupt handler.\n");
error = ENXIO;
goto fail;
}
/* Set up the Ethernet interface. */
sc->ifp = ifp = if_alloc(IFT_ETHER);
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_capabilities = IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
ifp->if_transmit = sample_transmit;
ifp->if_qflush = sample_qflush;
ifp->if_ioctl = sample_ioctl;
ifp->if_init = sample_init;
IFQ_SET_MAXLEN(&ifp->if_snd, TX_DESC_COUNT - 1);
ifp->if_snd.ifq_drv_maxlen = TX_DESC_COUNT - 1;
IFQ_SET_READY(&ifp->if_snd);
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
/* Attach the mii driver. */
if (fdt_get_phyaddr(node, dev, &phynum, &dummy) != 0) {
phynum = MII_PHY_ANY;
}
error = mii_attach(dev, &sc->miibus, ifp, sample_media_change,
sample_media_status, BMSR_DEFCAPMASK, phynum,
MII_OFFSET_ANY, MIIF_FORCEANEG);
if (error != 0) {
device_printf(dev, "PHY attach failed\n");
error = ENXIO;
goto fail;
}
sc->mii_softc = device_get_softc(sc->miibus);
/* All ready to run, attach the Ethernet interface. */
ether_ifattach(ifp, macaddr);
sc->is_attached = true;
fail:
if (error != 0) {
device_printf(dev, "%s failed with error %d\n",
__FUNCTION__, error);
sample_detach(dev);
}
return error;
}
static int
sample_miibus_read_reg(device_t dev, int phy, int reg)
{
/* Hardware-specific code here: read PHY register here */
return 0xdeadbeef;
}
static int
sample_miibus_write_reg(device_t dev, int phy, int reg, int val)
{
/* Hardware-specific code here: write PHY register here */
return 0;
}
static void
sample_miibus_statchg(device_t dev)
{
struct sample_softc *sc;
struct mii_data *mii;
/*
* Called by the MII bus driver when the PHY establishes
* link to set the MAC interface registers.
*/
sc = device_get_softc(dev);
SAMPLE_ASSERT_LOCKED(sc);
mii = sc->mii_softc;
if (mii->mii_media_status & IFM_ACTIVE) {
sc->link_is_up = true;
}
else {
sc->link_is_up = false;
}
switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T:
case IFM_1000_SX:
case IFM_100_TX:
case IFM_100_T2:
case IFM_10_T:
case IFM_100_T:
/* Hardware-specific code here: set speed */
break;
case IFM_NONE:
sc->link_is_up = false;
return;
default:
sc->link_is_up = false;
device_printf(dev, "Unsupported media %u\n",
IFM_SUBTYPE(mii->mii_media_active));
return;
}
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
/* Hardware-specific code here: set full duplex */
}
else {
/* Hardware-specific code here: clear full duplex */
}
/* Hardware-specific code here: implement media-specific hardware initialization */
}
static device_method_t sample_methods[] = {
DEVMETHOD(device_probe, sample_probe),
DEVMETHOD(device_attach, sample_attach),
DEVMETHOD(device_detach, sample_detach),
DEVMETHOD(device_shutdown, sample_shutdown),
/* MII Interface */
DEVMETHOD(miibus_readreg, sample_miibus_read_reg),
DEVMETHOD(miibus_writereg, sample_miibus_write_reg),
DEVMETHOD(miibus_statchg, sample_miibus_statchg),
DEVMETHOD_END
};
driver_t sample_driver = {
"sample",
sample_methods,
sizeof(struct sample_softc),
};
static devclass_t sample_devclass;
DRIVER_MODULE(sample, simplebus, sample_driver, sample_devclass, 0, 0);
DRIVER_MODULE(miibus, sample, miibus_driver, miibus_devclass, 0, 0);
MODULE_DEPEND(sample, ether, 1, 1, 1);
MODULE_DEPEND(sample, miibus, 1, 1, 1);
struct _iosock_module_version iosock_module_version = IOSOCK_MODULE_VER_SYM_INIT;
static void
sample_uninit(void *arg)
{
}
SYSUNINIT(sample_uninit, SI_SUB_DUMMY, SI_ORDER_ANY, sample_uninit, NULL);