Source
...
Target
Commits (5)
  • Robert Sprowson's avatar
    Give a little extra grace to mechanical drives · 404dada8
    Robert Sprowson authored
    Some mechanical drives don't reply until spin up after COMRESET. Give them 50% longer to complete.
    Solves a missing Seagate ST250DM000 drive issue at power on as init was too fast.
    
    Version 0.04. Tagged as 'SATADriver-0_04'
    404dada8
  • Robert Sprowson's avatar
    Adapt the capacity calculation based on reported sector size · efedfdce
    Robert Sprowson authored
    Read the sector size from ATA Identify, so that the capacity reported is right when the sector size != 512B.
    Increase the COMRESET recovery time a bit more to account for long spin up mechanical drives - the timeout's only used if a drive is present, so is harmless on fast (eg. SSD) drives which recover well before the timeout.
    
    Tested on Titanium with an assortment of manufacturer drives, capacities, and sector sizes.
    
    Version 0.05. Tagged as 'SATADriver-0_05'
    efedfdce
  • Robert Sprowson's avatar
    Don't punish controllers that report DET_DEV_NE · eaf81c83
    Robert Sprowson authored
    For those controllers relying on the retry loop to get the top level port to detect, the change in SATADriver 0.05 made the retries take 25s.
    Roll back the COMRESET_RECOVERY_TIME to 50cs like it was in revision 1.1.1.1 of op.c.
    Then, pull out the subsequent PM rescan code into a function, and wrap it in a retry loop so the net result is the same as SATADriver 0.05 achieved. However, it is possible to escape the loop early with this arrangement, rather than just idling.
    
    Version 0.06. Tagged as 'SATADriver-0_06'
    eaf81c83
  • Jeffrey Lee's avatar
    Add support for using OS_Memory 19 for DMA prep · ebbdfd26
    Jeffrey Lee authored
    Detail:
      Makefile - Add OSMEM19 option to control whether OS_Memory 0 or OS_Memory 19 is used. Defaults to OS_Memory 0, since OS_Memory 19 is currently only available on the SMP kernel branch.
      c/op, c/osmem0, h/dmaprep - Move OS_Memory 0 related code out into its own file, creating the dmaprep interface to abstract over whether OS_Memory 0 or OS_Memory 19 is in use
      c/osmem19, cmhg/SATADriverHdr - Code for performing DMA prep using OS_Memory 19
      h/globals - Adjust what variables ahciop_t stores, depending on DMA prep approach being used
    Admin:
      Tested on IGEPv5
      OS_Memory 19 version (for use with SMP kernel/module) can be enabled via components file, e.g. '-options OSMEM19=TRUE'
    
    
    Version 0.07. Tagged as 'SATADriver-0_07'
    ebbdfd26
  • Robert Sprowson's avatar
    Add braces for clarity · 131a63c1
    Robert Sprowson authored
    Static analysis highlighted a do/while as a potential error
      https://www.riscosopen.org/forum/forums/4/topics/9503#posts-72623
    in SDIODriver, and by pattern matching the same code appears in SATADriver.
    Clarify the original intent with some braces.
    Retagged as SATADriver-0_07; binary unchanged.
    131a63c1
......@@ -16,6 +16,18 @@ ROMCDEFINES = -DROM_MODULE
#CFLAGS += -DDEBUG_ENABLED -DDEBUGLIB -DDEBUGLIB_NOBRACKETS
#LIBS += ${DEBUGLIBS} ${NET5LIBS}
# Decide whether to use OS_Memory 0 or OS_Memory 19 for DMA prep
OSMEM19 ?= FALSE
ifeq ($(OSMEM19),TRUE)
CFLAGS += -DOSMEM19
CMHGFLAGS += -DOSMEM19
OBJS += osmem19
CMHGDEPENDS += osmem19
else
OBJS += osmem0
endif
include CModule
expasmc.AHCIDevice: hdr.AHCIDevice h.AHCIDevice
......
/* (0.03)
/* (0.07)
*
* This file is automatically maintained by srccommit, do not edit manually.
* Last processed by srccommit version: 1.1.
*
*/
#define Module_MajorVersion_CMHG 0.03
#define Module_MajorVersion_CMHG 0.07
#define Module_MinorVersion_CMHG
#define Module_Date_CMHG 24 Mar 2016
#define Module_Date_CMHG 22 Aug 2017
#define Module_MajorVersion "0.03"
#define Module_Version 3
#define Module_MajorVersion "0.07"
#define Module_Version 7
#define Module_MinorVersion ""
#define Module_Date "24 Mar 2016"
#define Module_Date "22 Aug 2017"
#define Module_ApplicationDate "24-Mar-16"
#define Module_ApplicationDate "22-Aug-17"
#define Module_ComponentName "SATADriver"
#define Module_ComponentPath "cddl/RiscOS/Sources/HWSupport/ATA/SATADriver"
#define Module_FullVersion "0.03"
#define Module_HelpVersion "0.03 (24 Mar 2016)"
#define Module_LibraryVersionInfo "0:3"
#define Module_FullVersion "0.07"
#define Module_HelpVersion "0.07 (22 Aug 2017)"
#define Module_LibraryVersionInfo "0:7"
......@@ -38,8 +38,6 @@
#include "message.h"
#include "op.h"
#define SECTOR_SIZE (512) // for now
#define RETRIES (4)
#define OFFSET_MODEL_NUMBER (27)
......@@ -47,11 +45,20 @@
#define OFFSET_MAX_LBA (60)
#define OFFSET_COMMAND_SET (83)
#define OFFSET_MAX_LBA48 (100)
#define OFFSET_PHY_LOG_SSZ (106)
#define OFFSET_SSZ_LO (117)
#define OFFSET_SSZ_HI (118)
#define COMMAND_SET_MBZ (1u<<15)
#define COMMAND_SET_MBO (1u<<14)
#define COMMAND_SET_LBA48 (1u<<10)
#define PHY_LOG_SSZ_MBZ (1u<<15)
#define PHY_LOG_SSZ_MBO (1u<<14)
#define PHY_LOG_SSZ_GT1_LOG_PER_PHY (1u<<13)
#define PHY_LOG_SSZ_SIZE_NOT_512 (1u<<12) /* In which case see 117-118 */
#define PHY_LOG_SSZ_COUNT_SHIFT 0
#define PHY_LOG_SSZ_COUNT_MASK (0xF<<PHY_LOG_SSZ_COUNT_SHIFT)
static void extract_model_number(const uint16_t *restrict identify_block, char *restrict buffer)
{
......@@ -128,10 +135,18 @@ _kernel_oserror *command_satadevices(void)
bool lba48 = (identify_block[OFFSET_COMMAND_SET] &
(COMMAND_SET_MBZ | COMMAND_SET_MBO | COMMAND_SET_LBA48)) ==
(COMMAND_SET_MBO | COMMAND_SET_LBA48);
uint64_t capacity = lba48 ?
SECTOR_SIZE * *(uint64_t *)&identify_block[OFFSET_MAX_LBA48] :
SECTOR_SIZE * (uint64_t) *(uint32_t *)&identify_block[OFFSET_MAX_LBA];
char capacity_string[12] = "";
size_t sector_size = 256;
if ((identify_block[OFFSET_PHY_LOG_SSZ] &
(PHY_LOG_SSZ_MBZ | PHY_LOG_SSZ_MBO | PHY_LOG_SSZ_SIZE_NOT_512)) ==
(PHY_LOG_SSZ_MBO | PHY_LOG_SSZ_SIZE_NOT_512))
{
sector_size = identify_block[OFFSET_SSZ_LO] | (identify_block[OFFSET_SSZ_HI] << 16);
}
sector_size = sector_size << 1; /* Expressed in 16b words */
uint64_t capacity = lba48 ?
sector_size * *(uint64_t *)&identify_block[OFFSET_MAX_LBA48] :
sector_size * (uint64_t) *(uint32_t *)&identify_block[OFFSET_MAX_LBA];
_swix(OS_ConvertVariform, _INR(0,4), &capacity, capacity_string, sizeof capacity_string, 8, 8);
printf("%s %s %s\n", message_lookup_direct("SATATypeATA"), name, capacity_string);
}
......
This diff is collapsed.
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "Licence").
* You may not use this file except in compliance with the Licence.
*
* You can obtain a copy of the licence at
* cddl/RiscOS/Sources/HWSupport/ATA/SATADriver/LICENCE.
* See the Licence for the specific language governing permissions
* and limitations under the Licence.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the Licence file. If applicable, add the
* following below this CDDL HEADER, with the fields enclosed by
* brackets "[]" replaced with your own identifying information:
* Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Ben Avison. All rights reserved.
* Portions Copyright 2017 Jeffrey Lee
* Use is subject to license terms.
*/
#include <string.h>
#include "kernel.h"
#include "swis.h"
#include "Global/NewErrors.h"
#include "globals.h"
#include "dmaprep.h"
#include "message.h"
#define PAGE_SIZE 4096
/* Circular dependencies requires pre-declaration of the following: */
static _kernel_oserror *for_scatter_list(ahciop_t *op, size_t limit, _kernel_oserror *(*callback)(ahciop_t *, size_t, size_t *, void *), void *context);
static _kernel_oserror *mark_uncacheable(void *log, paddr_t *phys)
{
uint32_t buffer[3];
buffer[1] = (uintptr_t) log;
_kernel_oserror *e = _swix(OS_Memory, _INR(0,2), (1u<<9) | (1u<<13) | (2u<<14), buffer, 1);
*phys = (paddr_t) buffer[2];
return e;
}
static void mark_cacheable(void *log)
{
uint32_t buffer[3];
buffer[1] = (uintptr_t) log;
_swix(OS_Memory, _INR(0,2), (1u<<9) | (3u<<14), buffer, 1);
}
static _kernel_oserror *postprocess_bounce(ahciop_t *restrict op, size_t byte_index, size_t *restrict advance_by, void *context)
{
/* A bounced operation has completed */
/* Assume we're only called in the read case - nothing to do otherwise */
uint8_t *bounce_buffer = context;
memcpy(op->list->address, bounce_buffer + byte_index, *advance_by);
return NULL;
}
static _kernel_oserror *preprocess_bounce(ahciop_t *restrict op, size_t byte_index, size_t *restrict advance_by, void *context)
{
/* A bounced operation has completed */
/* Assume we're only called in the write case - nothing to do otherwise */
uint8_t *bounce_buffer = context;
memcpy(bounce_buffer + byte_index, op->list->address, *advance_by);
return NULL;
}
static _kernel_oserror *postprocess_direct(ahciop_t *restrict op, size_t byte_index, size_t *restrict advance_by, void *context)
{
/* We've finished a direct operation (or it never started) so restore page state */
(void) byte_index;
(void) context;
size_t bytes_to_page_boundary = PAGE_SIZE - (((uintptr_t) op->list->address + op->scatter_offset) & (PAGE_SIZE - 1));
*advance_by = MIN(*advance_by, bytes_to_page_boundary);
mark_cacheable(op->list->address + op->scatter_offset);
return NULL;
}
typedef struct
{
ahci_prdt_entry_t *prdt_entry;
ataop_block_t *b;
scatter_t *list_start;
}
preprocess_direct_context_t;
/** Magic error pointer used to halt iteration through a scatter list once we've
* determined that we have to use a bounce buffer */
#define ESCAPE_FROM_SCATTER_LIST_ITERATOR ((_kernel_oserror *) 1)
static _kernel_oserror *preprocess_direct(ahciop_t *restrict op, size_t byte_index, size_t *restrict advance_by, void *context)
{
_kernel_oserror *e = NULL;
ahci_prdt_entry_t *p;
size_t bytes_to_page_boundary;
paddr_t paddr;
if (((uintptr_t) op->list->address & 1) != 0 ||
(*advance_by & 1) != 0)
{
/* DMA system can only cope with 16-bit transfers of an even number of bytes */
goto failed;
}
/* Mark pages as temporarily uncached and build the PRDT at the same time */
p = ((preprocess_direct_context_t *)context)->prdt_entry;
bytes_to_page_boundary = PAGE_SIZE - (((uintptr_t) op->list->address + op->scatter_offset) & (PAGE_SIZE - 1));
*advance_by = MIN(*advance_by, bytes_to_page_boundary);
if ((e = mark_uncacheable(op->list->address + op->scatter_offset, &paddr)) != NULL)
{
/* Considering FileSwitch has already validated the addresses, the most
* likely reason for OS_Memory 0 to have failed is that the memory block
* is ROM or IO (e.g. screen buffers on some platforms). We have to handle
* these using the bounce buffer, just as for unaligned RAM transfers.
* Some earlier scatter list entries may have been OK, so we'll have to
* restore their cacheability before returning even for error-free case */
goto failed;
}
/* Look to see if we can merge this into the previous PRDT entry */
if (p != op->prdt &&
paddr == (paddr_t) p[-1].dba + p[-1].dbc + 1 &&
*advance_by < AHCI_DBC_MAX - (p[-1].dbc + 1))
{
p[-1].dbc += *advance_by;
return NULL;
}
/* Check for PRDT overflow */
if (p == op->prdt + AHCI_MAX_PRDT_ENTRIES)
{
goto failed;
}
/* Extend PRDT by one entry */
p->dba = paddr;
p->dbc = *advance_by - 1;
((preprocess_direct_context_t *)context)->prdt_entry = ++p;
return NULL;
failed:
/* Unless the transfer is already for 4K or less, we'll need the caller to
* truncate the transfer to the next-smallest multiple of 4K (to achieve a
* wholly aligned transfer to/from RAM) or to 4K (to achieve an unaligned/ROM/IO
* transfer that uses the bounce buffer only once). It has to be the caller's
* responsibility to do this because we don't understand the contents of the
* parameter block at this level. */
if (op->total_length <= BOUNCE_BUFFER_SIZE)
{
op->use_bounce_buffer = true;
e = ESCAPE_FROM_SCATTER_LIST_ITERATOR;
}
else
{
ataop_block_t *b = ((preprocess_direct_context_t *)context)->b;
b->data_len = byte_index &~ (BOUNCE_BUFFER_SIZE - 1);
if (b->data_len == 0)
b->data_len = BOUNCE_BUFFER_SIZE;
e = MESSAGE_ERRORLOOKUP(true, TooComplex, 0);
}
/* Need to undo any cacheability changes up to this point */
op->list = ((preprocess_direct_context_t *)context)->list_start;
for_scatter_list(op, byte_index, postprocess_direct, NULL);
return e;
}
static void advance_scatter_pointer(ahciop_t *op)
{
do
{
if (op->list->length == 0 && (uintptr_t) op->list->address >= SCATTER_THRESHOLD)
op->list = (scatter_t *) ((uintptr_t) op->list + (uintptr_t) op->list->address);
else
op->list++;
op->scatter_offset = 0;
}
while (op->scatter_offset >= op->list->length);
}
static _kernel_oserror *for_scatter_list(ahciop_t *op, size_t limit, _kernel_oserror *(*callback)(ahciop_t *, size_t, size_t *, void *), void *context)
{
_kernel_oserror *e = NULL;
size_t byte_index = 0;
size_t advance_by;
scatter_t *list_start = op->list; // so we can restore it on exit
op->scatter_offset = 0;
while (byte_index < limit)
{
if (op->scatter_offset >= op->list->length)
advance_scatter_pointer(op);
advance_by = MIN(op->list->length - op->scatter_offset, limit - byte_index);
if ((e = callback(op, byte_index, &advance_by, context)) != NULL)
{
op->list = list_start;
return e;
}
op->scatter_offset += advance_by;
byte_index += advance_by;
}
op->list = list_start;
return NULL;
}
dmaprep_result_t dmaprep_prep(ahciop_t *op, ataop_block_t *b, scatter_t *scat, ahciport_t *port)
{
dmaprep_result_t res = {0};
op->use_bounce_buffer = false; // until we determine otherwise
op->list = scat;
if (op->total_length > 0)
{
/* Check to see whether the transfer can be achieved or if it needs to be
* truncated. If it can be achieved, determine whether we need to use the
* bounce buffer or not. If not using the bounce buffer, mark pages as
* temporarily uncacheable and build the PRDT. */
preprocess_direct_context_t context = { op->prdt, b, op->list };
res.e = for_scatter_list(op, op->total_length, preprocess_direct, &context);
/* By now we know for sure whether we're using the bounce buffer or not */
if (op->use_bounce_buffer) // implies that the transfer doesn't need truncating
{
op->prdt[0].dba = port->bounce_buffer_phy;
op->prdt[0].dbc = (op->total_length - 1) | AHCI_DBC_I;
res.e = NULL;
res.prdt_len = 1;
}
else if (res.e == NULL)
{
context.prdt_entry[-1].dbc |= AHCI_DBC_I;
res.prdt_len = context.prdt_entry - op->prdt;
}
}
return res;
}
void dmaprep_on_queue(ahciop_t *op, ahciport_t *port)
{
if (op->use_bounce_buffer && (op->description_info & COMMANDHDR_W) != 0)
for_scatter_list(op, op->total_length, preprocess_bounce, port->bounce_buffer); /* can't error */
memcpy(&port->command_table->prdt, &op->prdt,
sizeof (ahci_prdt_entry_t) * ((op->description_info & COMMANDHDR_PRDTL_MASK) >> COMMANDHDR_PRDTL_SHIFT));
}
void dmaprep_on_abort(ahciop_t *op)
{
if (!op->use_bounce_buffer)
for_scatter_list(op, op->total_length, postprocess_direct, NULL);
}
void dmaprep_on_complete(ahciop_t *op, ahciport_t *port)
{
if (op->use_bounce_buffer && (op->description_info & COMMANDHDR_W) == 0)
for_scatter_list(op, op->length_done, postprocess_bounce, port->bounce_buffer); /* can't error */
else if (!op->use_bounce_buffer)
for_scatter_list(op, op->length_done + op->total_length, postprocess_direct, NULL);
}
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "Licence").
* You may not use this file except in compliance with the Licence.
*
* You can obtain a copy of the licence at
* cddl/RiscOS/Sources/HWSupport/ATA/SATADriver/LICENCE.
* See the Licence for the specific language governing permissions
* and limitations under the Licence.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the Licence file. If applicable, add the
* following below this CDDL HEADER, with the fields enclosed by
* brackets "[]" replaced with your own identifying information:
* Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Ben Avison. All rights reserved.
* Portions Copyright 2017 Jeffrey Lee
* Use is subject to license terms.
*/
#include <string.h>
#include "kernel.h"
#include "swis.h"
#include "Global/OSMem.h"
#include "Global/NewErrors.h"
#include "Interface/ATA.h"
#include "globals.h"
#include "dmaprep.h"
#include "message.h"
#include "SATADriverHdr.h"
typedef struct
{
size_t length; /**< Transfer length to use */
size_t offset; /**< How much of xfer has been consumed (out of length) */
scatter_t *scat; /**< Current scatter list block */
size_t scatter_offset; /**< Offset into current scatter list block */
}
scatter_walker_t;
typedef struct
{
ahciop_t *op; /**< Op being prepared */
int prdt_index; /**< Current PRDT index we're writing to, -1 on init */
scatter_walker_t walker; /**< Current progress through input scatter list */
size_t bounce_offset; /**< Current offset into bounce buffer */
paddr_t bounce_addr; /**< Current bounce buffer phys address */
bool bouncing; /**< True if current PRDT entry is in bounce buffer */
size_t out_len; /**< Output length; for when we need the caller to truncate */
}
mem19_ctx_t;
/* Advance to first valid scatter list entry */
static scatter_t *scatter_init(scatter_t *scat)
{
while (scat->length == 0 && (uintptr_t) scat->address >= SCATTER_THRESHOLD)
{
scat = (scatter_t *) ((uintptr_t) scat + (uintptr_t) scat->address);
}
return scat;
}
/* Return current scatter list chunk */
static scatter_t scatter_current_chunk(scatter_walker_t *walker)
{
scatter_t res = *walker->scat;
size_t remain = walker->length - walker->offset;
res.address += walker->scatter_offset;
res.length -= walker->scatter_offset;
if (res.length > remain)
{
res.length = remain;
}
return res;
}
/* Consume bytes from scatter list */
static void scatter_consume(scatter_walker_t *walker, size_t bytes)
{
walker->offset += bytes;
if (walker->offset == walker->length)
{
/* Skip scatter list update if we've reached the end */
return;
}
bytes += walker->scatter_offset;
scatter_t *scat = walker->scat;
while (bytes >= scat->length)
{
bytes -= scat->length;
walker->scat = scatter_init(walker->scat+1);
}
walker->scatter_offset = bytes;
}
/* Return details of the next block of memory in the DMA transfer
*
* In:
* R9 -> scatter walker
* Out:
* R0 = log addr
* R1 = length
* R2 = flags
*/
_kernel_oserror *mem19_in_handler(_kernel_swi_regs *r, void *pw)
{
scatter_walker_t *walker = (scatter_walker_t *) r->r[9];
IGNORE(pw);
/* Work out how much of current chunk we can return */
scatter_t chunk = scatter_current_chunk(walker);
r->r[2] = 0;
if (chunk.length == 0)
{
r->r[1] = 0;
return NULL;
}
/* If the start addr or current offset aren't aligned, we need to use a bounce buffer */
if ((((uintptr_t) chunk.address) & 1) || (walker->offset & 1))
{
/* If they're both offset we can get by with only sending one byte via the bounce buffer */
if ((((uintptr_t) chunk.address) & 1) && (walker->offset & 1))
{
chunk.length = 1;
}
r->r[2] = DMAPrep_UseBounceBuffer;
}
/* If the end isn't aligned, force last byte via bounce buffer */
else if ((((uintptr_t) chunk.address)+chunk.length) & 1)
{
if (chunk.length == 1)
{
r->r[2] = DMAPrep_UseBounceBuffer;
}
else
{
chunk.length--;
}
}
/* Consume bytes */
scatter_consume(walker, chunk.length);
r->r[0] = (int) chunk.address;
r->r[1] = chunk.length;
return NULL;
}
/* Receive address translation + bounce buffer usage information for a block
* of memory
*
* In:
* R0 = log addr
* R1 = phys addr
* R2 = length
* R3 = flags
* R9 -> scatter walker
*/
_kernel_oserror *mem19_out_handler(_kernel_swi_regs *r, void *pw)
{
paddr_t phys = r->r[1];
size_t length = r->r[2];
bool bounce = r->r[3] & DMAPrep_UseBounceBuffer;
mem19_ctx_t *ctx = (mem19_ctx_t *) r->r[9];
ahci_prdt_entry_t *prdt = &ctx->op->prdt[ctx->prdt_index];
IGNORE(pw);
/* Claim some space from the bounce buffer if appropriate */
if (bounce)
{
phys = ctx->bounce_addr;
ctx->bounce_addr += length;
ctx->bounce_offset += length;
if (ctx->bounce_offset > BOUNCE_BUFFER_SIZE)
{
ctx->out_len += length - (ctx->bounce_offset - BOUNCE_BUFFER_SIZE);
return MESSAGE_ERRORLOOKUP(true, TooComplex, 0);
}
}
while (length > 0)
{
/* Start a new PRDT entry if: */
if ((ctx->prdt_index == -1) /* We don't have any entries yet */
|| (bounce != ctx->bouncing) /* We're switching from bounce to non-bounce */
|| (prdt->dbc+1 == AHCI_DBC_MAX) /* Current entry is full */
|| (!bounce && (prdt->dba + prdt->dbc + 1 != phys))) /* Non-contiguous non-bounce */
{
if (ctx->prdt_index+1 == AHCI_MAX_PRDT_ENTRIES)
{
return MESSAGE_ERRORLOOKUP(true, TooComplex, 0);
}
ctx->prdt_index++;
prdt++;
prdt->dba = phys;
prdt->dbc = (uint32_t) -1;
ctx->bouncing = bounce;
}
/* Don't overflow max size */
size_t amt = length;
if (amt + prdt->dbc >= AHCI_DBC_MAX)
{
amt = AHCI_DBC_MAX - (prdt->dbc + 1);
}
prdt->dbc += amt;
length -= amt;
phys += amt;
ctx->out_len += amt;
}
return NULL;
}
dmaprep_result_t dmaprep_prep(ahciop_t *op, ataop_block_t *b, scatter_t *scat, ahciport_t *port)
{
dmaprep_result_t res = {0};
if (!op->total_length)
{
return res;
}
bool dmawrite = !(b->r0.bits.dir == ATAOp_TransWrite >> ATAOp_TransShift);
scat = scatter_init(scat);
op->force_bounce = false;
op->list = scat;
mem19_ctx_t ctx = (mem19_ctx_t) {
.op = op,
.prdt_index = -1,
.bounce_offset = 0,
.bounce_addr = port->bounce_buffer_phy,
};
/* N.B. apparent compiler bug prevents this from being placed directly in the mem19_ctx_t declaration above
https://www.riscosopen.org/tracker/tickets/440
*/
ctx.walker = (scatter_walker_t) {
.length = op->total_length,
.scat = scat,
};
/* If the first scatter entry is a long unaligned section, reject immediately (minor speed optimisation) */
scatter_t first = scatter_current_chunk(&ctx.walker);
if ((((uintptr_t) first.address) & 1) && (first.length > BOUNCE_BUFFER_SIZE)) {
res.e = MESSAGE_ERRORLOOKUP(true, TooComplex, 0);
ctx.out_len = BOUNCE_BUFFER_SIZE;
}
else
{
res.e = _swix(OS_Memory,_INR(0,5),
OSMemReason_DMAPrep | (dmawrite ? DMAPrep_Write : 0),
g_module_pw,
&ctx.walker,
mem19_in_veneer,
&ctx,
mem19_out_veneer);
}
if (res.e != NULL)
{
/* If we got an "Address not recognised" error then we're probably being asked to transfer to/from an area which the kernel doesn't consider to be regular RAM, e.g. ROM or an IO region. */
if (res.e->errnum == ErrorNumber_BadAddress)
{
/* Since the bounce buffer has a fixed size, we just need a single PRDT to contain the transfer */
if (op->total_length > BOUNCE_BUFFER_SIZE)
{
res.e = MESSAGE_ERRORLOOKUP(true, TooComplex, 0);
ctx.out_len = BOUNCE_BUFFER_SIZE;
}
else
{
op->force_bounce = op->use_bounce_buffer = true;
op->prdt[0].dba = port->bounce_buffer_phy;
op->prdt[0].dbc = (op->total_length - 1) | AHCI_DBC_I;
res.e = NULL;
res.prdt_len = 1;
return res;
}
}
if (res.e->errnum == ErrorNumber_TooComplex)
{
/* Suggest a transfer length which will work */
b->data_len = ctx.out_len;
}
return res;
}
op->use_bounce_buffer = (ctx.bounce_offset != 0);
op->prdt[ctx.prdt_index].dbc |= AHCI_DBC_I;
res.prdt_len = ctx.prdt_index+1;
return res;
}
void dmaprep_on_queue(ahciop_t *op, ahciport_t *port)
{
uint32_t prdt_count = (op->description_info & COMMANDHDR_PRDTL_MASK) >> COMMANDHDR_PRDTL_SHIFT;
if (!prdt_count)
{
return;
}
if (op->use_bounce_buffer && (op->description_info & COMMANDHDR_W) != 0)
{
/* Populate the bounce buffer with data */
scatter_walker_t walker = (scatter_walker_t) {
.length = op->total_length,
.scat = op->list,
};
for (uint32_t i = 0; i < prdt_count; i++)
{
paddr_t phys = (paddr_t) (op->prdt[i].dba);
size_t len = (op->prdt[i].dbc & ~AHCI_DBC_I)+1;
/* Rely on the fact that the bounce buffer is physically contiguous to determine whether a chunk is located in the buffer or not */
paddr_t bounce_offset = phys - port->bounce_buffer_phy;
if (bounce_offset < BOUNCE_BUFFER_SIZE)
{
while (len)
{
scatter_t chunk = scatter_current_chunk(&walker);
size_t amt = len;
if (amt > chunk.length)
{
amt = chunk.length;
}
memcpy(port->bounce_buffer + bounce_offset, chunk.address, amt);
len -= amt;
bounce_offset += amt;
scatter_consume(&walker, amt);
}
}
else
{
scatter_consume(&walker, len);
}
}
}
memcpy(&port->command_table->prdt, &op->prdt, sizeof (ahci_prdt_entry_t) * prdt_count);
}
static void complete_internal(ahciop_t *op, uint8_t *bounce_buffer, paddr_t bounce_buffer_phy)
{
uint32_t prdt_count = (op->description_info & COMMANDHDR_PRDTL_MASK) >> COMMANDHDR_PRDTL_SHIFT;
if (!prdt_count)
{
return;
}
/* Do post-op cache maintenance */
scatter_walker_t walker;
if (!op->force_bounce)
{
walker = (scatter_walker_t) {
.length = op->length_done + op->total_length,
.scat = op->list,
};
_swix(OS_Memory,_INR(0,3),
OSMemReason_DMAPrep | ((op->description_info & COMMANDHDR_W) ? 0 : DMAPrep_Write) | DMAPrep_End,
g_module_pw,
&walker,
mem19_in_veneer);
}
/* Copy any data out of the bounce buffer */
if (!bounce_buffer || !op->use_bounce_buffer || (op->description_info & COMMANDHDR_W))
{
return;
}
walker = (scatter_walker_t) {
.length = op->length_done + op->total_length,
.scat = op->list,
};
for (uint32_t i = 0; i < prdt_count; i++)
{
paddr_t phys = (paddr_t) (op->prdt[i].dba);
size_t len = (op->prdt[i].dbc & ~AHCI_DBC_I)+1;
/* Rely on the fact that the bounce buffer is physically contiguous to determine whether a chunk is located in the buffer or not */
paddr_t bounce_offset = phys - bounce_buffer_phy;
if (bounce_offset < BOUNCE_BUFFER_SIZE)
{
while (len)
{
scatter_t chunk = scatter_current_chunk(&walker);
size_t amt = len;
if (amt > chunk.length)
{
amt = chunk.length;
}
memcpy(chunk.address, bounce_buffer + bounce_offset, amt);
len -= amt;
bounce_offset += amt;
scatter_consume(&walker, amt);
}
}
else
{
scatter_consume(&walker, len);
}
}
}
void dmaprep_on_abort(ahciop_t *op)
{
complete_internal(op, NULL, 0);
}
void dmaprep_on_complete(ahciop_t *op, ahciport_t *port)
{
complete_internal(op, (uint8_t *) port->bounce_buffer, port->bounce_buffer_phy);
}
......@@ -19,6 +19,7 @@
; CDDL HEADER END
;
; Copyright 2015 Ben Avison. All rights reserved.
; Portions Copyright 2017 Jeffrey Lee
; Use is subject to license terms.
;
......@@ -48,6 +49,12 @@ international-help-file:"Resources:$.Resources.SATADriver.Messages"
date-string: Module_Date_CMHG
#ifdef OSMEM19
generic-veneers: module_irq_veneer/module_irq_handler,
mem19_in_veneer/mem19_in_handler,
mem19_out_veneer/mem19_out_handler
#else
generic-veneers: module_irq_veneer/module_irq_handler
#endif
vector-handlers: module_tickerv_veneer/module_tickerv_handler
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "Licence").
* You may not use this file except in compliance with the Licence.
*
* You can obtain a copy of the licence at
* cddl/RiscOS/Sources/HWSupport/ATA/SATADriver/LICENCE.
* See the Licence for the specific language governing permissions
* and limitations under the Licence.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the Licence file. If applicable, add the
* following below this CDDL HEADER, with the fields enclosed by
* brackets "[]" replaced with your own identifying information:
* Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Ben Avison. All rights reserved.
* Portions Copyright 2017 Jeffrey Lee
* Use is subject to license terms.
*/
#ifndef DMAPREP_H
#define DMAPREP_H
#include "op.h"
typedef struct
{
uint32_t prdt_len; /**< Number of entries in PRDT */
_kernel_oserror *e; /**< Error that caused the prep operation to fail (if any) */
}
dmaprep_result_t;
/* Prepare an op
* op->total_length will be length of transfer, guaranteed at least halfword multiple
* op->length_done will be zero
* scat is the scatter list to use
* b->r0.bits.dir gives transfer direction
* port contains the bounce buffer that can be used for this op
If the transfer is too long, set b->data_len to the desired length and return a TooComplex error.
*/
extern dmaprep_result_t dmaprep_prep(ahciop_t *op, ataop_block_t *b, scatter_t *scat, ahciport_t *port);
/* Queue a prepared op
* If this is a DMA read, copy any necessary data to the shared bounce buffer
* Copy the PRDT to the port's command table
*/
extern void dmaprep_on_queue(ahciop_t *op, ahciport_t *port);
/* Abort an op
* op->total_length will be the length of transfer
* op->length_done will be zero
*/
extern void dmaprep_on_abort(ahciop_t *op);
/* Complete an op
* op->length_done represents completed length of transfer
* op->total_length represents remaining length of transfer
*/
extern void dmaprep_on_complete(ahciop_t *op, ahciport_t *port);
#endif
......@@ -20,6 +20,7 @@
*/
/*
* Copyright 2015 Ben Avison. All rights reserved.
* Portions Copyright 2017 Jeffrey Lee
* Use is subject to license terms.
*/
......@@ -96,6 +97,9 @@
*/
#define MAX_RECEIVED_FIS_SLOTS 1
/** FileCore scatter list threshold */
#define SCATTER_THRESHOLD (0xFFFF0000u)
/* Types */
/** Physical address as defined by RISC OS APIs. */
......@@ -162,15 +166,12 @@ enum op_type
typedef struct
{
bool nodrdy; /**< don't wait for DRDY */
bool use_bounce_buffer; /**< go via bounce buffer because AHCI isn't flexible enough */
uint32_t description_info; /**< value to go in first word of command header - also used to store whether it's a read or write operation */
enum op_type op_type; /**< remember this so we know how to update the parameter block afterwards */
bool atapi_errors; /**< interpret task file error register with ATAPI semantics */
volatile uint32_t state; /**< actually holds an enum op_state, forced to uint32 to allow atomic operations */
int32_t timeout; /**< monotonic time at which current state times out */
void *response; /**< pointer to parameter/response buffer */
size_t scatter_offset; /**< current byte index into current block on scatter list */
scatter_t *list; /**< current block in scatter list for data transfer */
scatter_t surrogate; /**< surrogate scatter list when caller didn't provide one */
size_t total_length; /**< length of data transfer remaining, bytes */
size_t length_done; /**< length of data transfer done, bytes */
......@@ -181,7 +182,17 @@ typedef struct
uint32_t slot; /**< corresponding slot in command list, or -1 if not yet assigned */
register_fis_t cfis; /**< command FIS */
uint8_t acmd[16]; /**< ATAPI Command */
#ifndef OSMEM19
size_t scatter_offset; /**< current byte index into current block on scatter list */
scatter_t *list; /**< current block in scatter list for data transfer */
bool use_bounce_buffer; /**< go via bounce buffer because AHCI isn't flexible enough */
ahci_prdt_entry_t prdt[AHCI_MAX_PRDT_ENTRIES]; /**< copy of PRDT in cacheable RAM - note once background transfers are implemented, probably want to be a bit smarter about allocating this since it's only needed until it's copied over to uncached memory */
#else
scatter_t *list; /**< original scatter list */
bool use_bounce_buffer; /**< true if bounce buffer has been used */
bool force_bounce; /**< true if we're forcing the entire transfer via the bounce buffer (OS_Memory 19 not used) */
ahci_prdt_entry_t prdt[AHCI_MAX_PRDT_ENTRIES]; /**< copy of PRDT in cacheable RAM - note once background transfers are implemented, probably want to be a bit smarter about allocating this since it's only needed until it's copied over to uncached memory */
#endif
}
ahciop_t;
......