Commit 153b386d authored by Jeffrey Lee's avatar Jeffrey Lee

Incorporate patch to add support for scheduling of transfers on a per-microframe basis

Detail:
  Makefile, dwc/driver/c/dwc_otg_hcd, dwc/driver/c/dwc_otg_hcd_intr, dwc/driver/c/dwc_otg_hcd_queue, dwc/driver/h/dwc_otg_hcd - Adapted the microframe scheduling patch at http://git.denx.de/?p=linux-denx.git;a=commit;h=9796e39e7a513d8a4acde759ec5d0023645143d8 to work with our newer version of the DWC driver. Enabled by SCHEDULE_PATCH #define.
  dwc/driver/c/dwc_otg_hcd_ddma - Fix up descriptor DMA support to compile (but not run properly) when the scheduling patch is active
  dwc/dwc_common_port/h/dwc_os - Provide local_irq_save() and local_irq_restore() implementations, as used by the scheduling patch
Admin:
  Tested on Raspberry Pi with high processor vectors
  Appears to fix problems with devices becoming unresponsive once too many periodic transfers are active (e.g. too many interrupt pipes open)


Version 0.06. Tagged as 'DWCDriver-0_06'
parent c0223aea
......@@ -46,13 +46,19 @@ LDFLAGS = -Symbols rm.sym${TARGET}
VPATH = ^.^.build ^.^.dev.usb dwc.driver dwc.dwc_common_port
# DWC bits
CFLAGS += -DDWC_EN_ISOC -Idwc.dwc_common_port -DDWC_HOST_ONLY -DBCM2835
CFLAGS += -DDWC_EN_ISOC -Idwc.dwc_common_port -DDWC_HOST_ONLY
OBJS += dwc_otg_cil dwc_otg_cil_intr dwc_otg_hcd dwc_otg_hcd_intr dwc_otg_hcd_queue dwc_otg_hcd_ddma
OBJS += dwc_cc dwc_modpow dwc_notifier dwc_mem
# Use this flag to enable Broadcom SoC/BCM2835-specific changes
CFLAGS += -DBCM2835
# Use this flag to reduce SOF interrupt service overhead
#CFLAGS += -DSOF_FIX
# Use this flag to enable the microframe scheduling patch. Note - will break controllers that use descriptor DMA
CFLAGS += -DSCHEDULE_PATCH
include CModule
${DIRS}::
......
/* (0.05)
/* (0.06)
*
* This file is automatically maintained by srccommit, do not edit manually.
* Last processed by srccommit version: 1.1.
*
*/
#define Module_MajorVersion_CMHG 0.05
#define Module_MajorVersion_CMHG 0.06
#define Module_MinorVersion_CMHG
#define Module_Date_CMHG 21 Jul 2012
#define Module_Date_CMHG 22 Jul 2012
#define Module_MajorVersion "0.05"
#define Module_Version 5
#define Module_MajorVersion "0.06"
#define Module_Version 6
#define Module_MinorVersion ""
#define Module_Date "21 Jul 2012"
#define Module_Date "22 Jul 2012"
#define Module_ApplicationDate "21-Jul-12"
#define Module_ApplicationDate "22-Jul-12"
#define Module_ComponentName "DWCDriver"
#define Module_ComponentPath "mixed/RiscOS/Sources/HWSupport/USB/Controllers/DWCDriver"
#define Module_FullVersion "0.05"
#define Module_HelpVersion "0.05 (21 Jul 2012)"
#define Module_LibraryVersionInfo "0:5"
#define Module_FullVersion "0.06"
#define Module_HelpVersion "0.06 (22 Jul 2012)"
#define Module_LibraryVersionInfo "0:6"
......@@ -764,6 +764,10 @@ static void dwc_otg_hcd_free(dwc_otg_hcd_t * dwc_otg_hcd)
dwc_free(dwc_otg_hcd);
}
#ifdef SCHEDULE_PATCH
extern int init_hcd_usecs(dwc_otg_hcd_t *hcd);
#endif
int dwc_otg_hcd_init(dwc_otg_hcd_t * hcd, dwc_otg_core_if_t * core_if)
{
int retval = 0;
......@@ -832,6 +836,10 @@ int dwc_otg_hcd_init(dwc_otg_hcd_t * hcd, dwc_otg_core_if_t * core_if)
/* Initialize reset tasklet. */
hcd->reset_tasklet = DWC_TASK_ALLOC(reset_tasklet_func, hcd);
#ifdef SCHEDULE_PATCH
init_hcd_usecs(hcd);
#endif
/*
* Allocate space for storing data on status transactions. Normally no
* data is sent, but this space acts as a bit bucket. This must be
......@@ -886,8 +894,12 @@ static void dwc_otg_hcd_reinit(dwc_otg_hcd_t * hcd)
hcd->flags.d32 = 0;
hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
#ifdef SCHEDULE_PATCH
hcd->available_host_channels = hcd->core_if->core_params->host_channels;
#else
hcd->non_periodic_channels = 0;
hcd->periodic_channels = 0;
#endif
/*
* Put all channels in the free channel list and clean up channel
......@@ -1329,6 +1341,22 @@ int dwc_otg_hcd_update_transaction_mode(dwc_otg_hcd_t * hcd)
#endif /* HW2937_WORKAROUND */
#ifdef SCHEDULE_PATCH
#ifdef DEBUG_HOST_CHANNELS
static int last_sel_trans_num_per_scheduled = 0;
module_param(last_sel_trans_num_per_scheduled, int, 0444);
static int last_sel_trans_num_nonper_scheduled = 0;
module_param(last_sel_trans_num_nonper_scheduled, int, 0444);
static int last_sel_trans_num_avail_hc_at_start = 0;
module_param(last_sel_trans_num_avail_hc_at_start, int, 0444);
static int last_sel_trans_num_avail_hc_at_end = 0;
module_param(last_sel_trans_num_avail_hc_at_end, int, 0444);
#endif /* DEBUG_HOST_CHANNELS */
#endif /* SCHEDULE_PATCH */
/**
* This function selects transactions from the HCD transfer schedule and
* assigns them to available host channels. It is called from HCD interrupt
......@@ -1343,6 +1371,9 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t * hcd)
dwc_list_link_t *qh_ptr;
dwc_otg_qh_t *qh;
int num_channels;
#ifdef SCHEDULE_PATCH
unsigned long flags;
#endif
dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
#ifdef SOF_FIX
dwc_otg_core_if_t *core_if = hcd->core_if;
......@@ -1353,6 +1384,12 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t * hcd)
#ifdef DEBUG_SOF
DWC_DEBUGPL(DBG_HCD, " Select Transactions\n");
#endif
#ifdef DEBUG_HOST_CHANNELS
last_sel_trans_num_per_scheduled = 0;
last_sel_trans_num_nonper_scheduled = 0;
last_sel_trans_num_avail_hc_at_start = hcd->available_host_channels;
#endif /* DEBUG_HOST_CHANNELS */
#ifdef HW2937_WORKAROUND
if (!dwc_otg_hcd_update_transaction_mode(hcd))
......@@ -1362,10 +1399,28 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t * hcd)
#endif
/* Process entries in the periodic ready list. */
#ifdef SCHEDULE_PATCH
num_channels = hcd->core_if->core_params->host_channels;
#endif
qh_ptr = DWC_LIST_FIRST(&hcd->periodic_sched_ready);
while (qh_ptr != &hcd->periodic_sched_ready &&
!DWC_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
#ifdef SCHEDULE_PATCH
// Make sure we leave one channel for non periodic transactions.
local_irq_save(flags);
if (hcd->available_host_channels <= 1) {
local_irq_restore(flags);
break;
}
hcd->available_host_channels--;
local_irq_restore(flags);
#ifdef DEBUG_HOST_CHANNELS
last_sel_trans_num_per_scheduled++;
#endif /* DEBUG_HOST_CHANNELS */
#endif
qh = DWC_LIST_ENTRY(qh_ptr, dwc_otg_qh_t, qh_list_entry);
#ifdef HW2937_WORKAROUND
if (assign_and_init_hc(hcd, qh)) {
......@@ -1378,9 +1433,14 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t * hcd)
* periodic assigned schedule.
*/
qh_ptr = DWC_LIST_NEXT(qh_ptr);
#ifdef SCHEDULE_PATCH
local_irq_save(flags);
#endif
DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
&qh->qh_list_entry);
#ifdef SCHEDULE_PATCH
local_irq_restore(flags);
#endif
ret_val = DWC_OTG_TRANSACTION_PERIODIC;
#ifdef HW2937_WORKAROUND
} else {
......@@ -1410,10 +1470,25 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t * hcd)
qh_ptr = hcd->non_periodic_sched_inactive.next;
num_channels = hcd->core_if->core_params->host_channels;
while (qh_ptr != &hcd->non_periodic_sched_inactive &&
#ifndef SCHEDULE_PATCH
(hcd->non_periodic_channels <
num_channels - hcd->periodic_channels) &&
#endif
!DWC_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
#ifdef SCHEDULE_PATCH
local_irq_save(flags);
if (hcd->available_host_channels < 1) {
local_irq_restore(flags);
break;
}
hcd->available_host_channels--;
local_irq_restore(flags);
#ifdef DEBUG_HOST_CHANNELS
last_sel_trans_num_nonper_scheduled++;
#endif /* DEBUG_HOST_CHANNELS */
#endif
qh = DWC_LIST_ENTRY(qh_ptr, dwc_otg_qh_t, qh_list_entry);
#ifdef HW2937_WORKAROUND
......@@ -1427,16 +1502,23 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t * hcd)
* non-periodic active schedule.
*/
qh_ptr = DWC_LIST_NEXT(qh_ptr);
#ifdef SCHEDULE_PATCH
local_irq_save(flags);
#endif
DWC_LIST_MOVE_HEAD(&hcd->non_periodic_sched_active,
&qh->qh_list_entry);
#ifdef SCHEDULE_PATCH
local_irq_restore(flags);
#endif
if (ret_val == DWC_OTG_TRANSACTION_NONE) {
ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
} else {
ret_val = DWC_OTG_TRANSACTION_ALL;
}
#ifndef SCHEDULE_PATCH
hcd->non_periodic_channels++;
#endif
#ifdef HW2937_WORKAROUND
} else {
qh_ptr = DWC_LIST_NEXT(qh_ptr);
......@@ -1444,6 +1526,12 @@ dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t * hcd)
#endif
}
#ifdef SCHEDULE_PATCH
#ifdef DEBUG_HOST_CHANNELS
last_sel_trans_num_avail_hc_at_end = _hcd->available_host_channels;
#endif /* DEBUG_HOST_CHANNELS */
#endif
return ret_val;
}
/**
......@@ -3244,8 +3332,10 @@ void dwc_otg_hcd_dump_state(dwc_otg_hcd_t * hcd)
}
}
}
#ifndef SCHEDULE_PATCH
DWC_PRINTF(" non_periodic_channels: %d\n", hcd->non_periodic_channels);
DWC_PRINTF(" periodic_channels: %d\n", hcd->periodic_channels);
#endif
DWC_PRINTF(" periodic_usecs: %d\n", hcd->periodic_usecs);
np_tx_status.d32 =
dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts);
......
......@@ -256,7 +256,11 @@ static void release_channel_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
{
dwc_hc_t *hc = qh->channel;
if (dwc_qh_is_non_per(qh)) {
hcd->non_periodic_channels--;
#ifdef SCHEDULE_PATCH
dwc_assert(0,"Schedule patch incompatible with descriptor DMA\n");
#else
hcd->non_periodic_channels--;
#endif
}
else {
update_frame_list(hcd, qh, 0);
......@@ -338,12 +342,16 @@ void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
if (qh->channel)
release_channel_ddma(hcd, qh);
if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
#ifdef SCHEDULE_PATCH
dwc_assert(0,"Schedule patch incompatible with descriptor DMA\n");
#else
if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
&& !hcd->periodic_channels && hcd->frame_list) {
per_sched_disable(hcd);
frame_list_free(hcd);
}
#endif
}
static uint8_t frame_to_desc_idx(dwc_otg_qh_t * qh, uint16_t frame_idx)
......
......@@ -849,6 +849,9 @@ static void release_channel(dwc_otg_hcd_t * hcd,
{
dwc_otg_transaction_type_e tr_type;
int free_qtd;
#ifdef SCHEDULE_PATCH
unsigned long flags;
#endif
DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d, xfer_len %d\n",
__func__, hc->hc_num, halt_status, hc->xfer_len);
......@@ -913,6 +916,11 @@ static void release_channel(dwc_otg_hcd_t * hcd,
dwc_otg_hc_cleanup(hcd->core_if, hc);
DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
#ifdef SCHEDULE_PATCH
local_irq_save(flags);
hcd->available_host_channels++;
local_irq_restore(flags);
#else
switch (hc->ep_type) {
case DWC_OTG_EP_TYPE_CONTROL:
case DWC_OTG_EP_TYPE_BULK:
......@@ -927,6 +935,7 @@ static void release_channel(dwc_otg_hcd_t * hcd,
*/
break;
}
#endif
/* Try to queue more transfers now that there's a free channel. */
tr_type = dwc_otg_hcd_select_transactions(hcd);
......
......@@ -182,6 +182,10 @@ void qh_init(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
dev_speed = hcd->fops->speed(hcd, urb->priv);
hcd->fops->hub_info(hcd, urb->priv, &hub_addr, &hub_port);
qh->do_split = 0;
#ifdef SCHEDULE_PATCH
qh->speed = dev_speed;
#endif
if (((dev_speed == USB_SPEED_LOW) ||
(dev_speed == USB_SPEED_FULL)) &&
(hub_addr != 0 && hub_addr != 1)) {
......@@ -315,6 +319,161 @@ dwc_otg_qh_t *dwc_otg_hcd_qh_create(dwc_otg_hcd_t * hcd,
return qh;
}
#ifdef SCHEDULE_PATCH
/**
* Microframe scheduler
* track the total use in hcd->frame_usecs
* keep each qh use in qh->frame_usecs
* when surrendering the qh then donate the time back
*/
const unsigned short max_uframe_usecs[]={ 100, 100, 100, 100, 100, 100, 30, 0 };
/*
* called from dwc_otg_hcd.c:dwc_otg_hcd_init
*/
int init_hcd_usecs(dwc_otg_hcd_t *_hcd)
{
int i;
for (i=0; i<8; i++) {
_hcd->frame_usecs[i] = max_uframe_usecs[i];
}
return 0;
}
static int find_single_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
{
int i;
unsigned short utime;
int t_left;
int ret;
int done;
ret = -1;
utime = _qh->usecs;
t_left = utime;
i = 0;
done = 0;
while (done == 0) {
/* At the start _hcd->frame_usecs[i] = max_uframe_usecs[i]; */
if (utime <= _hcd->frame_usecs[i]) {
_hcd->frame_usecs[i] -= utime;
_qh->frame_usecs[i] += utime;
t_left -= utime;
ret = i;
done = 1;
return ret;
} else {
i++;
if (i == 8) {
done = 1;
ret = -1;
}
}
}
return ret;
}
/*
* use this for FS apps that can span multiple uframes
*/
static int find_multi_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
{
int i;
int j;
unsigned short utime;
int t_left;
int ret;
int done;
unsigned short xtime;
ret = -1;
utime = _qh->usecs;
t_left = utime;
i = 0;
done = 0;
loop:
while (done == 0) {
if(_hcd->frame_usecs[i] <= 0) {
i++;
if (i == 8) {
done = 1;
ret = -1;
}
goto loop;
}
/*
* we need n consequtive slots
* so use j as a start slot j plus j+1 must be enough time (for now)
*/
xtime= _hcd->frame_usecs[i];
for (j = i+1 ; j < 8 ; j++ ) {
/*
* if we add this frame remaining time to xtime we may
* be OK, if not we need to test j for a complete frame
*/
if ((xtime+_hcd->frame_usecs[j]) < utime) {
if (_hcd->frame_usecs[j] < max_uframe_usecs[j]) {
j = 8;
ret = -1;
continue;
}
}
if (xtime >= utime) {
ret = i;
j = 8; /* stop loop with a good value ret */
continue;
}
/* add the frame time to x time */
xtime += _hcd->frame_usecs[j];
/* we must have a fully available next frame or break */
if ((xtime < utime)
&& (_hcd->frame_usecs[j] == max_uframe_usecs[j])) {
ret = -1;
j = 8; /* stop loop with a bad value ret */
continue;
}
}
if (ret >= 0) {
t_left = utime;
for (j = i; (t_left>0) && (j < 8); j++ ) {
t_left -= _hcd->frame_usecs[j];
if ( t_left <= 0 ) {
_qh->frame_usecs[j] += _hcd->frame_usecs[j] + t_left;
_hcd->frame_usecs[j]= -t_left;
ret = i;
done = 1;
} else {
_qh->frame_usecs[j] += _hcd->frame_usecs[j];
_hcd->frame_usecs[j] = 0;
}
}
} else {
i++;
if (i == 8) {
done = 1;
ret = -1;
}
}
}
return ret;
}
static int find_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
{
int ret;
ret = -1;
if (_qh->speed == USB_SPEED_HIGH) {
/* if this is a hs transaction we need a full frame */
ret = find_single_uframe(_hcd, _qh);
} else {
/* if this is a fs transaction we may need a sequence of frames */
ret = find_multi_uframe(_hcd, _qh);
}
return ret;
}
#else
/**
* Checks that a channel is available for a periodic transfer.
*
......@@ -382,6 +541,7 @@ static int check_periodic_bandwidth(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
return status;
}
#endif
/**
* Checks that the max transfer size allowed in a host channel is large enough
......@@ -426,6 +586,26 @@ static int schedule_periodic(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
{
int status = 0;
#ifdef SCHEDULE_PATCH
int frame;
status = find_uframe(hcd, qh);
frame = -1;
if (status == 0) {
frame = 7;
} else {
if (status > 0 )
frame = status-1;
}
/* Set the new frame up */
if (frame > -1) {
qh->sched_frame &= ~0x7;
qh->sched_frame |= (frame & 7);
}
if (status != -1 )
status = 0;
#else
status = periodic_channel_available(hcd);
if (status) {
DWC_INFO("%s: No host channel available for periodic " "transfer.\n", __func__); //NOTICE
......@@ -433,6 +613,7 @@ static int schedule_periodic(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
}
status = check_periodic_bandwidth(hcd, qh);
#endif
if (status) {
DWC_INFO("%s: Insufficient periodic bandwidth for " "periodic transfer.\n", __func__); //NOTICE
return status;
......@@ -453,8 +634,10 @@ static int schedule_periodic(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
DWC_LIST_INSERT_TAIL(&hcd->periodic_sched_inactive, &qh->qh_list_entry);
}
#ifndef SCHEDULE_PATCH
/* Reserve the periodic channel. */
hcd->periodic_channels++;
#endif
/* Update claimed usecs per (micro)frame. */
hcd->periodic_usecs += qh->usecs;
......@@ -506,11 +689,20 @@ static void deschedule_periodic(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
{
DWC_LIST_REMOVE_INIT(&qh->qh_list_entry);
#ifndef SCHEDULE_PATCH
/* Release the periodic channel reservation. */
hcd->periodic_channels--;
#endif
/* Update claimed usecs per (micro)frame. */
hcd->periodic_usecs -= qh->usecs;
#ifdef SCHEDULE_PATCH
for (int i = 0; i < 8; i++) {
hcd->frame_usecs[i] += qh->frame_usecs[i];
qh->frame_usecs[i] = 0;
}
#endif
}
/**
......@@ -619,7 +811,11 @@ void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
* Remove from periodic_sched_queued and move to
* appropriate queue.
*/
#ifdef SCHEDULE_PATCH
if (dwc_frame_num_le(qh->sched_frame, frame_number)) {
#else
if (qh->sched_frame == frame_number) {
#endif
DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_ready,
&qh->qh_list_entry);
} else {
......
......@@ -339,6 +339,11 @@ typedef struct dwc_otg_qh {
*/
uint8_t *dw_align_buf;
dwc_dma_t dw_align_buf_dma;
#ifdef SCHEDULE_PATCH
uint16_t speed;
uint16_t frame_usecs[8];
#endif
/** Entry for QH in either the periodic or non-periodic schedule. */
dwc_list_link_t qh_list_entry;
......@@ -481,6 +486,20 @@ struct dwc_otg_hcd {
*/
uint16_t periodic_usecs;
#ifdef SCHEDULE_PATCH
/**
* Total bandwidth claimed so far for all periodic transfers
* in a frame.
* This will include a mixture of HS and FS transfers.
* Units are microseconds per (micro)frame.
* We have a budget per frame and have to schedule
* transactions accordingly.
* Watch out for the fact that things are actually scheduled for the
* "next frame".
*/
uint16_t frame_usecs[8];
#endif
/**
* Frame number read from the core at SOF. The value ranges from 0 to
* DWC_HFNUM_MAX_FRNUM.
......@@ -492,6 +511,13 @@ struct dwc_otg_hcd {
* dwc_hc_t items.
*/
struct hc_list free_hc_list;
#ifdef SCHEDULE_PATCH
/**
* Number of available host channels.
*/
int available_host_channels;
#else
/**
* Number of host channels assigned to periodic transfers. Currently
* assuming that there is a dedicated host channel for each periodic
......@@ -504,6 +530,7 @@ struct dwc_otg_hcd {
* Number of host channels assigned to non-periodic transfers.
*/
int non_periodic_channels;
#endif
/**
* Array of pointers to the host channel descriptors. Allows accessing
......
......@@ -76,6 +76,12 @@ typedef uint8_t __u8;
typedef uint32_t dma_addr_t;
typedef uint32_t __le32;
#ifdef SCHEDULE_PATCH
#include "AsmUtils/irqs.h"
#define local_irq_save(flags) flags = ensure_irqs_off()
#define local_irq_restore restore_irqs
#endif
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment