re-add examples

This commit is contained in:
Sergio R. Caprile 2023-10-24 17:35:41 -03:00
parent 9196aac259
commit 5f1f3d629f
380 changed files with 111 additions and 161446 deletions

File diff suppressed because one or more lines are too long

View File

@ -1,353 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#include "FreeRTOS.h"
#include "task.h"
#include "croutine.h"
/* Remove the whole file is co-routines are not being used. */
#if( configUSE_CO_ROUTINES != 0 )
/*
* Some kernel aware debuggers require data to be viewed to be global, rather
* than file scope.
*/
#ifdef portREMOVE_STATIC_QUALIFIER
#define static
#endif
/* Lists for ready and blocked co-routines. --------------------*/
static List_t pxReadyCoRoutineLists[ configMAX_CO_ROUTINE_PRIORITIES ]; /*< Prioritised ready co-routines. */
static List_t xDelayedCoRoutineList1; /*< Delayed co-routines. */
static List_t xDelayedCoRoutineList2; /*< Delayed co-routines (two lists are used - one for delays that have overflowed the current tick count. */
static List_t * pxDelayedCoRoutineList; /*< Points to the delayed co-routine list currently being used. */
static List_t * pxOverflowDelayedCoRoutineList; /*< Points to the delayed co-routine list currently being used to hold co-routines that have overflowed the current tick count. */
static List_t xPendingReadyCoRoutineList; /*< Holds co-routines that have been readied by an external event. They cannot be added directly to the ready lists as the ready lists cannot be accessed by interrupts. */
/* Other file private variables. --------------------------------*/
CRCB_t * pxCurrentCoRoutine = NULL;
static UBaseType_t uxTopCoRoutineReadyPriority = 0;
static TickType_t xCoRoutineTickCount = 0, xLastTickCount = 0, xPassedTicks = 0;
/* The initial state of the co-routine when it is created. */
#define corINITIAL_STATE ( 0 )
/*
* Place the co-routine represented by pxCRCB into the appropriate ready queue
* for the priority. It is inserted at the end of the list.
*
* This macro accesses the co-routine ready lists and therefore must not be
* used from within an ISR.
*/
#define prvAddCoRoutineToReadyQueue( pxCRCB ) \
{ \
if( pxCRCB->uxPriority > uxTopCoRoutineReadyPriority ) \
{ \
uxTopCoRoutineReadyPriority = pxCRCB->uxPriority; \
} \
vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ pxCRCB->uxPriority ] ), &( pxCRCB->xGenericListItem ) ); \
}
/*
* Utility to ready all the lists used by the scheduler. This is called
* automatically upon the creation of the first co-routine.
*/
static void prvInitialiseCoRoutineLists( void );
/*
* Co-routines that are readied by an interrupt cannot be placed directly into
* the ready lists (there is no mutual exclusion). Instead they are placed in
* in the pending ready list in order that they can later be moved to the ready
* list by the co-routine scheduler.
*/
static void prvCheckPendingReadyList( void );
/*
* Macro that looks at the list of co-routines that are currently delayed to
* see if any require waking.
*
* Co-routines are stored in the queue in the order of their wake time -
* meaning once one co-routine has been found whose timer has not expired
* we need not look any further down the list.
*/
static void prvCheckDelayedList( void );
/*-----------------------------------------------------------*/
BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, UBaseType_t uxPriority, UBaseType_t uxIndex )
{
BaseType_t xReturn;
CRCB_t *pxCoRoutine;
/* Allocate the memory that will store the co-routine control block. */
pxCoRoutine = ( CRCB_t * ) pvPortMalloc( sizeof( CRCB_t ) );
if( pxCoRoutine )
{
/* If pxCurrentCoRoutine is NULL then this is the first co-routine to
be created and the co-routine data structures need initialising. */
if( pxCurrentCoRoutine == NULL )
{
pxCurrentCoRoutine = pxCoRoutine;
prvInitialiseCoRoutineLists();
}
/* Check the priority is within limits. */
if( uxPriority >= configMAX_CO_ROUTINE_PRIORITIES )
{
uxPriority = configMAX_CO_ROUTINE_PRIORITIES - 1;
}
/* Fill out the co-routine control block from the function parameters. */
pxCoRoutine->uxState = corINITIAL_STATE;
pxCoRoutine->uxPriority = uxPriority;
pxCoRoutine->uxIndex = uxIndex;
pxCoRoutine->pxCoRoutineFunction = pxCoRoutineCode;
/* Initialise all the other co-routine control block parameters. */
vListInitialiseItem( &( pxCoRoutine->xGenericListItem ) );
vListInitialiseItem( &( pxCoRoutine->xEventListItem ) );
/* Set the co-routine control block as a link back from the ListItem_t.
This is so we can get back to the containing CRCB from a generic item
in a list. */
listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xGenericListItem ), pxCoRoutine );
listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xEventListItem ), pxCoRoutine );
/* Event lists are always in priority order. */
listSET_LIST_ITEM_VALUE( &( pxCoRoutine->xEventListItem ), ( ( TickType_t ) configMAX_CO_ROUTINE_PRIORITIES - ( TickType_t ) uxPriority ) );
/* Now the co-routine has been initialised it can be added to the ready
list at the correct priority. */
prvAddCoRoutineToReadyQueue( pxCoRoutine );
xReturn = pdPASS;
}
else
{
xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
}
return xReturn;
}
/*-----------------------------------------------------------*/
void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, List_t *pxEventList )
{
TickType_t xTimeToWake;
/* Calculate the time to wake - this may overflow but this is
not a problem. */
xTimeToWake = xCoRoutineTickCount + xTicksToDelay;
/* We must remove ourselves from the ready list before adding
ourselves to the blocked list as the same list item is used for
both lists. */
( void ) uxListRemove( ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) );
/* The list item will be inserted in wake time order. */
listSET_LIST_ITEM_VALUE( &( pxCurrentCoRoutine->xGenericListItem ), xTimeToWake );
if( xTimeToWake < xCoRoutineTickCount )
{
/* Wake time has overflowed. Place this item in the
overflow list. */
vListInsert( ( List_t * ) pxOverflowDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) );
}
else
{
/* The wake time has not overflowed, so we can use the
current block list. */
vListInsert( ( List_t * ) pxDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) );
}
if( pxEventList )
{
/* Also add the co-routine to an event list. If this is done then the
function must be called with interrupts disabled. */
vListInsert( pxEventList, &( pxCurrentCoRoutine->xEventListItem ) );
}
}
/*-----------------------------------------------------------*/
static void prvCheckPendingReadyList( void )
{
/* Are there any co-routines waiting to get moved to the ready list? These
are co-routines that have been readied by an ISR. The ISR cannot access
the ready lists itself. */
while( listLIST_IS_EMPTY( &xPendingReadyCoRoutineList ) == pdFALSE )
{
CRCB_t *pxUnblockedCRCB;
/* The pending ready list can be accessed by an ISR. */
portDISABLE_INTERRUPTS();
{
pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( (&xPendingReadyCoRoutineList) );
( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) );
}
portENABLE_INTERRUPTS();
( void ) uxListRemove( &( pxUnblockedCRCB->xGenericListItem ) );
prvAddCoRoutineToReadyQueue( pxUnblockedCRCB );
}
}
/*-----------------------------------------------------------*/
static void prvCheckDelayedList( void )
{
CRCB_t *pxCRCB;
xPassedTicks = xTaskGetTickCount() - xLastTickCount;
while( xPassedTicks )
{
xCoRoutineTickCount++;
xPassedTicks--;
/* If the tick count has overflowed we need to swap the ready lists. */
if( xCoRoutineTickCount == 0 )
{
List_t * pxTemp;
/* Tick count has overflowed so we need to swap the delay lists. If there are
any items in pxDelayedCoRoutineList here then there is an error! */
pxTemp = pxDelayedCoRoutineList;
pxDelayedCoRoutineList = pxOverflowDelayedCoRoutineList;
pxOverflowDelayedCoRoutineList = pxTemp;
}
/* See if this tick has made a timeout expire. */
while( listLIST_IS_EMPTY( pxDelayedCoRoutineList ) == pdFALSE )
{
pxCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedCoRoutineList );
if( xCoRoutineTickCount < listGET_LIST_ITEM_VALUE( &( pxCRCB->xGenericListItem ) ) )
{
/* Timeout not yet expired. */
break;
}
portDISABLE_INTERRUPTS();
{
/* The event could have occurred just before this critical
section. If this is the case then the generic list item will
have been moved to the pending ready list and the following
line is still valid. Also the pvContainer parameter will have
been set to NULL so the following lines are also valid. */
( void ) uxListRemove( &( pxCRCB->xGenericListItem ) );
/* Is the co-routine waiting on an event also? */
if( pxCRCB->xEventListItem.pxContainer )
{
( void ) uxListRemove( &( pxCRCB->xEventListItem ) );
}
}
portENABLE_INTERRUPTS();
prvAddCoRoutineToReadyQueue( pxCRCB );
}
}
xLastTickCount = xCoRoutineTickCount;
}
/*-----------------------------------------------------------*/
void vCoRoutineSchedule( void )
{
/* See if any co-routines readied by events need moving to the ready lists. */
prvCheckPendingReadyList();
/* See if any delayed co-routines have timed out. */
prvCheckDelayedList();
/* Find the highest priority queue that contains ready co-routines. */
while( listLIST_IS_EMPTY( &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ) )
{
if( uxTopCoRoutineReadyPriority == 0 )
{
/* No more co-routines to check. */
return;
}
--uxTopCoRoutineReadyPriority;
}
/* listGET_OWNER_OF_NEXT_ENTRY walks through the list, so the co-routines
of the same priority get an equal share of the processor time. */
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentCoRoutine, &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) );
/* Call the co-routine. */
( pxCurrentCoRoutine->pxCoRoutineFunction )( pxCurrentCoRoutine, pxCurrentCoRoutine->uxIndex );
return;
}
/*-----------------------------------------------------------*/
static void prvInitialiseCoRoutineLists( void )
{
UBaseType_t uxPriority;
for( uxPriority = 0; uxPriority < configMAX_CO_ROUTINE_PRIORITIES; uxPriority++ )
{
vListInitialise( ( List_t * ) &( pxReadyCoRoutineLists[ uxPriority ] ) );
}
vListInitialise( ( List_t * ) &xDelayedCoRoutineList1 );
vListInitialise( ( List_t * ) &xDelayedCoRoutineList2 );
vListInitialise( ( List_t * ) &xPendingReadyCoRoutineList );
/* Start with pxDelayedCoRoutineList using list1 and the
pxOverflowDelayedCoRoutineList using list2. */
pxDelayedCoRoutineList = &xDelayedCoRoutineList1;
pxOverflowDelayedCoRoutineList = &xDelayedCoRoutineList2;
}
/*-----------------------------------------------------------*/
BaseType_t xCoRoutineRemoveFromEventList( const List_t *pxEventList )
{
CRCB_t *pxUnblockedCRCB;
BaseType_t xReturn;
/* This function is called from within an interrupt. It can only access
event lists and the pending ready list. This function assumes that a
check has already been made to ensure pxEventList is not empty. */
pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) );
vListInsertEnd( ( List_t * ) &( xPendingReadyCoRoutineList ), &( pxUnblockedCRCB->xEventListItem ) );
if( pxUnblockedCRCB->uxPriority >= pxCurrentCoRoutine->uxPriority )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
}
#endif /* configUSE_CO_ROUTINES == 0 */

View File

@ -1,753 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/* Standard includes. */
#include <stdlib.h>
/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
all the API functions to use the MPU wrappers. That should only be done when
task.h is included from an application file. */
#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
/* FreeRTOS includes. */
#include "FreeRTOS.h"
#include "task.h"
#include "timers.h"
#include "event_groups.h"
/* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified
because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
for the header files above, but not in this file, in order to generate the
correct privileged Vs unprivileged linkage and placement. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021 See comment above. */
/* The following bit fields convey control information in a task's event list
item value. It is important they don't clash with the
taskEVENT_LIST_ITEM_VALUE_IN_USE definition. */
#if configUSE_16_BIT_TICKS == 1
#define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U
#define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U
#define eventWAIT_FOR_ALL_BITS 0x0400U
#define eventEVENT_BITS_CONTROL_BYTES 0xff00U
#else
#define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL
#define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL
#define eventWAIT_FOR_ALL_BITS 0x04000000UL
#define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL
#endif
typedef struct EventGroupDef_t
{
EventBits_t uxEventBits;
List_t xTasksWaitingForBits; /*< List of tasks waiting for a bit to be set. */
#if( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxEventGroupNumber;
#endif
#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif
} EventGroup_t;
/*-----------------------------------------------------------*/
/*
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
* pdTRUE then the wait condition is met if all the bits set in uxBitsToWaitFor
* are also set in uxCurrentEventBits. If xWaitForAllBits is pdFALSE then the
* wait condition is met if any of the bits set in uxBitsToWait for are also set
* in uxCurrentEventBits.
*/
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer )
{
EventGroup_t *pxEventBits;
/* A StaticEventGroup_t object must be provided. */
configASSERT( pxEventGroupBuffer );
#if( configASSERT_DEFINED == 1 )
{
/* Sanity check that the size of the structure used to declare a
variable of type StaticEventGroup_t equals the size of the real
event group structure. */
volatile size_t xSize = sizeof( StaticEventGroup_t );
configASSERT( xSize == sizeof( EventGroup_t ) );
} /*lint !e529 xSize is referenced if configASSERT() is defined. */
#endif /* configASSERT_DEFINED */
/* The user has provided a statically allocated event group - use it. */
pxEventBits = ( EventGroup_t * ) pxEventGroupBuffer; /*lint !e740 !e9087 EventGroup_t and StaticEventGroup_t are deliberately aliased for data hiding purposes and guaranteed to have the same size and alignment requirement - checked by configASSERT(). */
if( pxEventBits != NULL )
{
pxEventBits->uxEventBits = 0;
vListInitialise( &( pxEventBits->xTasksWaitingForBits ) );
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
{
/* Both static and dynamic allocation can be used, so note that
this event group was created statically in case the event group
is later deleted. */
pxEventBits->ucStaticallyAllocated = pdTRUE;
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
traceEVENT_GROUP_CREATE( pxEventBits );
}
else
{
/* xEventGroupCreateStatic should only ever be called with
pxEventGroupBuffer pointing to a pre-allocated (compile time
allocated) StaticEventGroup_t variable. */
traceEVENT_GROUP_CREATE_FAILED();
}
return pxEventBits;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
/*-----------------------------------------------------------*/
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
EventGroupHandle_t xEventGroupCreate( void )
{
EventGroup_t *pxEventBits;
/* Allocate the event group. Justification for MISRA deviation as
follows: pvPortMalloc() always ensures returned memory blocks are
aligned per the requirements of the MCU stack. In this case
pvPortMalloc() must return a pointer that is guaranteed to meet the
alignment requirements of the EventGroup_t structure - which (if you
follow it through) is the alignment requirements of the TickType_t type
(EventBits_t being of TickType_t itself). Therefore, whenever the
stack alignment requirements are greater than or equal to the
TickType_t alignment requirements the cast is safe. In other cases,
where the natural word size of the architecture is less than
sizeof( TickType_t ), the TickType_t variables will be accessed in two
or more reads operations, and the alignment requirements is only that
of each individual read. */
pxEventBits = ( EventGroup_t * ) pvPortMalloc( sizeof( EventGroup_t ) ); /*lint !e9087 !e9079 see comment above. */
if( pxEventBits != NULL )
{
pxEventBits->uxEventBits = 0;
vListInitialise( &( pxEventBits->xTasksWaitingForBits ) );
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
{
/* Both static and dynamic allocation can be used, so note this
event group was allocated statically in case the event group is
later deleted. */
pxEventBits->ucStaticallyAllocated = pdFALSE;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
traceEVENT_GROUP_CREATE( pxEventBits );
}
else
{
traceEVENT_GROUP_CREATE_FAILED(); /*lint !e9063 Else branch only exists to allow tracing and does not generate code if trace macros are not defined. */
}
return pxEventBits;
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
/*-----------------------------------------------------------*/
EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait )
{
EventBits_t uxOriginalBitValue, uxReturn;
EventGroup_t *pxEventBits = xEventGroup;
BaseType_t xAlreadyYielded;
BaseType_t xTimeoutOccurred = pdFALSE;
configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
configASSERT( uxBitsToWaitFor != 0 );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
vTaskSuspendAll();
{
uxOriginalBitValue = pxEventBits->uxEventBits;
( void ) xEventGroupSetBits( xEventGroup, uxBitsToSet );
if( ( ( uxOriginalBitValue | uxBitsToSet ) & uxBitsToWaitFor ) == uxBitsToWaitFor )
{
/* All the rendezvous bits are now set - no need to block. */
uxReturn = ( uxOriginalBitValue | uxBitsToSet );
/* Rendezvous always clear the bits. They will have been cleared
already unless this is the only task in the rendezvous. */
pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
xTicksToWait = 0;
}
else
{
if( xTicksToWait != ( TickType_t ) 0 )
{
traceEVENT_GROUP_SYNC_BLOCK( xEventGroup, uxBitsToSet, uxBitsToWaitFor );
/* Store the bits that the calling task is waiting for in the
task's event list item so the kernel knows when a match is
found. Then enter the blocked state. */
vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | eventCLEAR_EVENTS_ON_EXIT_BIT | eventWAIT_FOR_ALL_BITS ), xTicksToWait );
/* This assignment is obsolete as uxReturn will get set after
the task unblocks, but some compilers mistakenly generate a
warning about uxReturn being returned without being set if the
assignment is omitted. */
uxReturn = 0;
}
else
{
/* The rendezvous bits were not set, but no block time was
specified - just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits;
xTimeoutOccurred = pdTRUE;
}
}
}
xAlreadyYielded = xTaskResumeAll();
if( xTicksToWait != ( TickType_t ) 0 )
{
if( xAlreadyYielded == pdFALSE )
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The task blocked to wait for its required bits to be set - at this
point either the required bits were set or the block time expired. If
the required bits were set they will have been stored in the task's
event list item, and they should now be retrieved then cleared. */
uxReturn = uxTaskResetEventItemValue();
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
/* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL();
{
uxReturn = pxEventBits->uxEventBits;
/* Although the task got here because it timed out before the
bits it was waiting for were set, it is possible that since it
unblocked another task has set the bits. If this is the case
then it needs to clear the bits before exiting. */
if( ( uxReturn & uxBitsToWaitFor ) == uxBitsToWaitFor )
{
pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
xTimeoutOccurred = pdTRUE;
}
else
{
/* The task unblocked because the bits were set. */
}
/* Control bits might be set as the task had blocked should not be
returned. */
uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES;
}
traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred );
/* Prevent compiler warnings when trace macros are not used. */
( void ) xTimeoutOccurred;
return uxReturn;
}
/*-----------------------------------------------------------*/
EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait )
{
EventGroup_t *pxEventBits = xEventGroup;
EventBits_t uxReturn, uxControlBits = 0;
BaseType_t xWaitConditionMet, xAlreadyYielded;
BaseType_t xTimeoutOccurred = pdFALSE;
/* Check the user is not attempting to wait on the bits used by the kernel
itself, and that at least one bit is being requested. */
configASSERT( xEventGroup );
configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
configASSERT( uxBitsToWaitFor != 0 );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
vTaskSuspendAll();
{
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
/* Check to see if the wait condition is already met or not. */
xWaitConditionMet = prvTestWaitCondition( uxCurrentEventBits, uxBitsToWaitFor, xWaitForAllBits );
if( xWaitConditionMet != pdFALSE )
{
/* The wait condition has already been met so there is no need to
block. */
uxReturn = uxCurrentEventBits;
xTicksToWait = ( TickType_t ) 0;
/* Clear the wait bits if requested to do so. */
if( xClearOnExit != pdFALSE )
{
pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xTicksToWait == ( TickType_t ) 0 )
{
/* The wait condition has not been met, but no block time was
specified, so just return the current value. */
uxReturn = uxCurrentEventBits;
xTimeoutOccurred = pdTRUE;
}
else
{
/* The task is going to block to wait for its required bits to be
set. uxControlBits are used to remember the specified behaviour of
this call to xEventGroupWaitBits() - for use when the event bits
unblock the task. */
if( xClearOnExit != pdFALSE )
{
uxControlBits |= eventCLEAR_EVENTS_ON_EXIT_BIT;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xWaitForAllBits != pdFALSE )
{
uxControlBits |= eventWAIT_FOR_ALL_BITS;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Store the bits that the calling task is waiting for in the
task's event list item so the kernel knows when a match is
found. Then enter the blocked state. */
vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | uxControlBits ), xTicksToWait );
/* This is obsolete as it will get set after the task unblocks, but
some compilers mistakenly generate a warning about the variable
being returned without being set if it is not done. */
uxReturn = 0;
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
xAlreadyYielded = xTaskResumeAll();
if( xTicksToWait != ( TickType_t ) 0 )
{
if( xAlreadyYielded == pdFALSE )
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The task blocked to wait for its required bits to be set - at this
point either the required bits were set or the block time expired. If
the required bits were set they will have been stored in the task's
event list item, and they should now be retrieved then cleared. */
uxReturn = uxTaskResetEventItemValue();
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
taskENTER_CRITICAL();
{
/* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits;
/* It is possible that the event bits were updated between this
task leaving the Blocked state and running again. */
if( prvTestWaitCondition( uxReturn, uxBitsToWaitFor, xWaitForAllBits ) != pdFALSE )
{
if( xClearOnExit != pdFALSE )
{
pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xTimeoutOccurred = pdTRUE;
}
taskEXIT_CRITICAL();
}
else
{
/* The task unblocked because the bits were set. */
}
/* The task blocked so control bits may have been set. */
uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES;
}
traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred );
/* Prevent compiler warnings when trace macros are not used. */
( void ) xTimeoutOccurred;
return uxReturn;
}
/*-----------------------------------------------------------*/
EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear )
{
EventGroup_t *pxEventBits = xEventGroup;
EventBits_t uxReturn;
/* Check the user is not attempting to clear the bits used by the kernel
itself. */
configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
taskENTER_CRITICAL();
{
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
/* The value returned is the event group value prior to the bits being
cleared. */
uxReturn = pxEventBits->uxEventBits;
/* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
taskEXIT_CRITICAL();
return uxReturn;
}
/*-----------------------------------------------------------*/
#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear )
{
BaseType_t xReturn;
traceEVENT_GROUP_CLEAR_BITS_FROM_ISR( xEventGroup, uxBitsToClear );
xReturn = xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL ); /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */
return xReturn;
}
#endif
/*-----------------------------------------------------------*/
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
{
UBaseType_t uxSavedInterruptStatus;
EventGroup_t const * const pxEventBits = xEventGroup;
EventBits_t uxReturn;
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
uxReturn = pxEventBits->uxEventBits;
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return uxReturn;
} /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */
/*-----------------------------------------------------------*/
EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet )
{
ListItem_t *pxListItem, *pxNext;
ListItem_t const *pxListEnd;
List_t const * pxList;
EventBits_t uxBitsToClear = 0, uxBitsWaitedFor, uxControlBits;
EventGroup_t *pxEventBits = xEventGroup;
BaseType_t xMatchFound = pdFALSE;
/* Check the user is not attempting to set the bits used by the kernel
itself. */
configASSERT( xEventGroup );
configASSERT( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
vTaskSuspendAll();
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
pxListItem = listGET_HEAD_ENTRY( pxList );
/* Set the bits. */
pxEventBits->uxEventBits |= uxBitsToSet;
/* See if the new bit value should unblock any tasks. */
while( pxListItem != pxListEnd )
{
pxNext = listGET_NEXT( pxListItem );
uxBitsWaitedFor = listGET_LIST_ITEM_VALUE( pxListItem );
xMatchFound = pdFALSE;
/* Split the bits waited for from the control bits. */
uxControlBits = uxBitsWaitedFor & eventEVENT_BITS_CONTROL_BYTES;
uxBitsWaitedFor &= ~eventEVENT_BITS_CONTROL_BYTES;
if( ( uxControlBits & eventWAIT_FOR_ALL_BITS ) == ( EventBits_t ) 0 )
{
/* Just looking for single bit being set. */
if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) != ( EventBits_t ) 0 )
{
xMatchFound = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) == uxBitsWaitedFor )
{
/* All bits are set. */
xMatchFound = pdTRUE;
}
else
{
/* Need all bits to be set, but not all the bits were set. */
}
if( xMatchFound != pdFALSE )
{
/* The bits match. Should the bits be cleared on exit? */
if( ( uxControlBits & eventCLEAR_EVENTS_ON_EXIT_BIT ) != ( EventBits_t ) 0 )
{
uxBitsToClear |= uxBitsWaitedFor;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Store the actual event flag value in the task's event list
item before removing the task from the event list. The
eventUNBLOCKED_DUE_TO_BIT_SET bit is set so the task knows
that is was unblocked due to its required bits matching, rather
than because it timed out. */
vTaskRemoveFromUnorderedEventList( pxListItem, pxEventBits->uxEventBits | eventUNBLOCKED_DUE_TO_BIT_SET );
}
/* Move onto the next list item. Note pxListItem->pxNext is not
used here as the list item may have been removed from the event list
and inserted into the ready/pending reading list. */
pxListItem = pxNext;
}
/* Clear any bits that matched when the eventCLEAR_EVENTS_ON_EXIT_BIT
bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
( void ) xTaskResumeAll();
return pxEventBits->uxEventBits;
}
/*-----------------------------------------------------------*/
void vEventGroupDelete( EventGroupHandle_t xEventGroup )
{
EventGroup_t *pxEventBits = xEventGroup;
const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
vTaskSuspendAll();
{
traceEVENT_GROUP_DELETE( xEventGroup );
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{
/* Unblock the task, returning 0 as the event list is being deleted
and cannot therefore have any bits set. */
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
}
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
/* The event group can only have been allocated dynamically - free
it again. */
vPortFree( pxEventBits );
}
#elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{
/* The event group could have been allocated statically or
dynamically, so check before attempting to free the memory. */
if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{
vPortFree( pxEventBits );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
}
( void ) xTaskResumeAll();
}
/*-----------------------------------------------------------*/
/* For internal use only - execute a 'set bits' command that was pended from
an interrupt. */
void vEventGroupSetBitsCallback( void *pvEventGroup, const uint32_t ulBitsToSet )
{
( void ) xEventGroupSetBits( pvEventGroup, ( EventBits_t ) ulBitsToSet ); /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */
}
/*-----------------------------------------------------------*/
/* For internal use only - execute a 'clear bits' command that was pended from
an interrupt. */
void vEventGroupClearBitsCallback( void *pvEventGroup, const uint32_t ulBitsToClear )
{
( void ) xEventGroupClearBits( pvEventGroup, ( EventBits_t ) ulBitsToClear ); /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */
}
/*-----------------------------------------------------------*/
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits )
{
BaseType_t xWaitConditionMet = pdFALSE;
if( xWaitForAllBits == pdFALSE )
{
/* Task only has to wait for one bit within uxBitsToWaitFor to be
set. Is one already set? */
if( ( uxCurrentEventBits & uxBitsToWaitFor ) != ( EventBits_t ) 0 )
{
xWaitConditionMet = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Task has to wait for all the bits in uxBitsToWaitFor to be set.
Are they set already? */
if( ( uxCurrentEventBits & uxBitsToWaitFor ) == uxBitsToWaitFor )
{
xWaitConditionMet = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
return xWaitConditionMet;
}
/*-----------------------------------------------------------*/
#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken )
{
BaseType_t xReturn;
traceEVENT_GROUP_SET_BITS_FROM_ISR( xEventGroup, uxBitsToSet );
xReturn = xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken ); /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */
return xReturn;
}
#endif
/*-----------------------------------------------------------*/
#if (configUSE_TRACE_FACILITY == 1)
UBaseType_t uxEventGroupGetNumber( void* xEventGroup )
{
UBaseType_t xReturn;
EventGroup_t const *pxEventBits = ( EventGroup_t * ) xEventGroup; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */
if( xEventGroup == NULL )
{
xReturn = 0;
}
else
{
xReturn = pxEventBits->uxEventGroupNumber;
}
return xReturn;
}
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
#if ( configUSE_TRACE_FACILITY == 1 )
void vEventGroupSetNumber( void * xEventGroup, UBaseType_t uxEventGroupNumber )
{
( ( EventGroup_t * ) xEventGroup )->uxEventGroupNumber = uxEventGroupNumber; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */
}
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/

View File

@ -1,133 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#ifndef STACK_MACROS_H
#define STACK_MACROS_H
#ifndef _MSC_VER /* Visual Studio doesn't support #warning. */
#warning The name of this file has changed to stack_macros.h. Please update your code accordingly. This source file (which has the original name) will be removed in future released.
#endif
/*
* Call the stack overflow hook function if the stack of the task being swapped
* out is currently overflowed, or looks like it might have overflowed in the
* past.
*
* Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check
* the current stack state only - comparing the current top of stack value to
* the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1
* will also cause the last few stack bytes to be checked to ensure the value
* to which the bytes were set when the task was created have not been
* overwritten. Note this second test does not guarantee that an overflowed
* stack will always be recognised.
*/
/*-----------------------------------------------------------*/
#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) )
/* Only the current stack state is to be checked. */
#define taskCHECK_FOR_STACK_OVERFLOW() \
{ \
/* Is the currently saved stack pointer within the stack limit? */ \
if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
}
#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */
/*-----------------------------------------------------------*/
#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) )
/* Only the current stack state is to be checked. */
#define taskCHECK_FOR_STACK_OVERFLOW() \
{ \
\
/* Is the currently saved stack pointer within the stack limit? */ \
if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
}
#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */
/*-----------------------------------------------------------*/
#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) )
#define taskCHECK_FOR_STACK_OVERFLOW() \
{ \
const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \
const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \
\
if( ( pulStack[ 0 ] != ulCheckValue ) || \
( pulStack[ 1 ] != ulCheckValue ) || \
( pulStack[ 2 ] != ulCheckValue ) || \
( pulStack[ 3 ] != ulCheckValue ) ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
}
#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */
/*-----------------------------------------------------------*/
#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) )
#define taskCHECK_FOR_STACK_OVERFLOW() \
{ \
int8_t *pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \
static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE }; \
\
\
pcEndOfStack -= sizeof( ucExpectedStackBytes ); \
\
/* Has the extremity of the task stack ever been written over? */ \
if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
}
#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */
/*-----------------------------------------------------------*/
/* Remove stack overflow macro if not being used. */
#ifndef taskCHECK_FOR_STACK_OVERFLOW
#define taskCHECK_FOR_STACK_OVERFLOW()
#endif
#endif /* STACK_MACROS_H */

View File

@ -1,720 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#ifndef CO_ROUTINE_H
#define CO_ROUTINE_H
#ifndef INC_FREERTOS_H
#error "include FreeRTOS.h must appear in source files before include croutine.h"
#endif
#include "list.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Used to hide the implementation of the co-routine control block. The
control block structure however has to be included in the header due to
the macro implementation of the co-routine functionality. */
typedef void * CoRoutineHandle_t;
/* Defines the prototype to which co-routine functions must conform. */
typedef void (*crCOROUTINE_CODE)( CoRoutineHandle_t, UBaseType_t );
typedef struct corCoRoutineControlBlock
{
crCOROUTINE_CODE pxCoRoutineFunction;
ListItem_t xGenericListItem; /*< List item used to place the CRCB in ready and blocked queues. */
ListItem_t xEventListItem; /*< List item used to place the CRCB in event lists. */
UBaseType_t uxPriority; /*< The priority of the co-routine in relation to other co-routines. */
UBaseType_t uxIndex; /*< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */
uint16_t uxState; /*< Used internally by the co-routine implementation. */
} CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */
/**
* croutine. h
*<pre>
BaseType_t xCoRoutineCreate(
crCOROUTINE_CODE pxCoRoutineCode,
UBaseType_t uxPriority,
UBaseType_t uxIndex
);</pre>
*
* Create a new co-routine and add it to the list of co-routines that are
* ready to run.
*
* @param pxCoRoutineCode Pointer to the co-routine function. Co-routine
* functions require special syntax - see the co-routine section of the WEB
* documentation for more information.
*
* @param uxPriority The priority with respect to other co-routines at which
* the co-routine will run.
*
* @param uxIndex Used to distinguish between different co-routines that
* execute the same function. See the example below and the co-routine section
* of the WEB documentation for further information.
*
* @return pdPASS if the co-routine was successfully created and added to a ready
* list, otherwise an error code defined with ProjDefs.h.
*
* Example usage:
<pre>
// Co-routine to be created.
void vFlashCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
{
// Variables in co-routines must be declared static if they must maintain value across a blocking call.
// This may not be necessary for const variables.
static const char cLedToFlash[ 2 ] = { 5, 6 };
static const TickType_t uxFlashRates[ 2 ] = { 200, 400 };
// Must start every co-routine with a call to crSTART();
crSTART( xHandle );
for( ;; )
{
// This co-routine just delays for a fixed period, then toggles
// an LED. Two co-routines are created using this function, so
// the uxIndex parameter is used to tell the co-routine which
// LED to flash and how int32_t to delay. This assumes xQueue has
// already been created.
vParTestToggleLED( cLedToFlash[ uxIndex ] );
crDELAY( xHandle, uxFlashRates[ uxIndex ] );
}
// Must end every co-routine with a call to crEND();
crEND();
}
// Function that creates two co-routines.
void vOtherFunction( void )
{
uint8_t ucParameterToPass;
TaskHandle_t xHandle;
// Create two co-routines at priority 0. The first is given index 0
// so (from the code above) toggles LED 5 every 200 ticks. The second
// is given index 1 so toggles LED 6 every 400 ticks.
for( uxIndex = 0; uxIndex < 2; uxIndex++ )
{
xCoRoutineCreate( vFlashCoRoutine, 0, uxIndex );
}
}
</pre>
* \defgroup xCoRoutineCreate xCoRoutineCreate
* \ingroup Tasks
*/
BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, UBaseType_t uxPriority, UBaseType_t uxIndex );
/**
* croutine. h
*<pre>
void vCoRoutineSchedule( void );</pre>
*
* Run a co-routine.
*
* vCoRoutineSchedule() executes the highest priority co-routine that is able
* to run. The co-routine will execute until it either blocks, yields or is
* preempted by a task. Co-routines execute cooperatively so one
* co-routine cannot be preempted by another, but can be preempted by a task.
*
* If an application comprises of both tasks and co-routines then
* vCoRoutineSchedule should be called from the idle task (in an idle task
* hook).
*
* Example usage:
<pre>
// This idle task hook will schedule a co-routine each time it is called.
// The rest of the idle task will execute between co-routine calls.
void vApplicationIdleHook( void )
{
vCoRoutineSchedule();
}
// Alternatively, if you do not require any other part of the idle task to
// execute, the idle task hook can call vCoRoutineScheduler() within an
// infinite loop.
void vApplicationIdleHook( void )
{
for( ;; )
{
vCoRoutineSchedule();
}
}
</pre>
* \defgroup vCoRoutineSchedule vCoRoutineSchedule
* \ingroup Tasks
*/
void vCoRoutineSchedule( void );
/**
* croutine. h
* <pre>
crSTART( CoRoutineHandle_t xHandle );</pre>
*
* This macro MUST always be called at the start of a co-routine function.
*
* Example usage:
<pre>
// Co-routine to be created.
void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
{
// Variables in co-routines must be declared static if they must maintain value across a blocking call.
static int32_t ulAVariable;
// Must start every co-routine with a call to crSTART();
crSTART( xHandle );
for( ;; )
{
// Co-routine functionality goes here.
}
// Must end every co-routine with a call to crEND();
crEND();
}</pre>
* \defgroup crSTART crSTART
* \ingroup Tasks
*/
#define crSTART( pxCRCB ) switch( ( ( CRCB_t * )( pxCRCB ) )->uxState ) { case 0:
/**
* croutine. h
* <pre>
crEND();</pre>
*
* This macro MUST always be called at the end of a co-routine function.
*
* Example usage:
<pre>
// Co-routine to be created.
void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
{
// Variables in co-routines must be declared static if they must maintain value across a blocking call.
static int32_t ulAVariable;
// Must start every co-routine with a call to crSTART();
crSTART( xHandle );
for( ;; )
{
// Co-routine functionality goes here.
}
// Must end every co-routine with a call to crEND();
crEND();
}</pre>
* \defgroup crSTART crSTART
* \ingroup Tasks
*/
#define crEND() }
/*
* These macros are intended for internal use by the co-routine implementation
* only. The macros should not be used directly by application writers.
*/
#define crSET_STATE0( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = (__LINE__ * 2); return; case (__LINE__ * 2):
#define crSET_STATE1( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = ((__LINE__ * 2)+1); return; case ((__LINE__ * 2)+1):
/**
* croutine. h
*<pre>
crDELAY( CoRoutineHandle_t xHandle, TickType_t xTicksToDelay );</pre>
*
* Delay a co-routine for a fixed period of time.
*
* crDELAY can only be called from the co-routine function itself - not
* from within a function called by the co-routine function. This is because
* co-routines do not maintain their own stack.
*
* @param xHandle The handle of the co-routine to delay. This is the xHandle
* parameter of the co-routine function.
*
* @param xTickToDelay The number of ticks that the co-routine should delay
* for. The actual amount of time this equates to is defined by
* configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant portTICK_PERIOD_MS
* can be used to convert ticks to milliseconds.
*
* Example usage:
<pre>
// Co-routine to be created.
void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
{
// Variables in co-routines must be declared static if they must maintain value across a blocking call.
// This may not be necessary for const variables.
// We are to delay for 200ms.
static const xTickType xDelayTime = 200 / portTICK_PERIOD_MS;
// Must start every co-routine with a call to crSTART();
crSTART( xHandle );
for( ;; )
{
// Delay for 200ms.
crDELAY( xHandle, xDelayTime );
// Do something here.
}
// Must end every co-routine with a call to crEND();
crEND();
}</pre>
* \defgroup crDELAY crDELAY
* \ingroup Tasks
*/
#define crDELAY( xHandle, xTicksToDelay ) \
if( ( xTicksToDelay ) > 0 ) \
{ \
vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \
} \
crSET_STATE0( ( xHandle ) );
/**
* <pre>
crQUEUE_SEND(
CoRoutineHandle_t xHandle,
QueueHandle_t pxQueue,
void *pvItemToQueue,
TickType_t xTicksToWait,
BaseType_t *pxResult
)</pre>
*
* The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine
* equivalent to the xQueueSend() and xQueueReceive() functions used by tasks.
*
* crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas
* xQueueSend() and xQueueReceive() can only be used from tasks.
*
* crQUEUE_SEND can only be called from the co-routine function itself - not
* from within a function called by the co-routine function. This is because
* co-routines do not maintain their own stack.
*
* See the co-routine section of the WEB documentation for information on
* passing data between tasks and co-routines and between ISR's and
* co-routines.
*
* @param xHandle The handle of the calling co-routine. This is the xHandle
* parameter of the co-routine function.
*
* @param pxQueue The handle of the queue on which the data will be posted.
* The handle is obtained as the return value when the queue is created using
* the xQueueCreate() API function.
*
* @param pvItemToQueue A pointer to the data being posted onto the queue.
* The number of bytes of each queued item is specified when the queue is
* created. This number of bytes is copied from pvItemToQueue into the queue
* itself.
*
* @param xTickToDelay The number of ticks that the co-routine should block
* to wait for space to become available on the queue, should space not be
* available immediately. The actual amount of time this equates to is defined
* by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant
* portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see example
* below).
*
* @param pxResult The variable pointed to by pxResult will be set to pdPASS if
* data was successfully posted onto the queue, otherwise it will be set to an
* error defined within ProjDefs.h.
*
* Example usage:
<pre>
// Co-routine function that blocks for a fixed period then posts a number onto
// a queue.
static void prvCoRoutineFlashTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
{
// Variables in co-routines must be declared static if they must maintain value across a blocking call.
static BaseType_t xNumberToPost = 0;
static BaseType_t xResult;
// Co-routines must begin with a call to crSTART().
crSTART( xHandle );
for( ;; )
{
// This assumes the queue has already been created.
crQUEUE_SEND( xHandle, xCoRoutineQueue, &xNumberToPost, NO_DELAY, &xResult );
if( xResult != pdPASS )
{
// The message was not posted!
}
// Increment the number to be posted onto the queue.
xNumberToPost++;
// Delay for 100 ticks.
crDELAY( xHandle, 100 );
}
// Co-routines must end with a call to crEND().
crEND();
}</pre>
* \defgroup crQUEUE_SEND crQUEUE_SEND
* \ingroup Tasks
*/
#define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \
{ \
*( pxResult ) = xQueueCRSend( ( pxQueue) , ( pvItemToQueue) , ( xTicksToWait ) ); \
if( *( pxResult ) == errQUEUE_BLOCKED ) \
{ \
crSET_STATE0( ( xHandle ) ); \
*pxResult = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), 0 ); \
} \
if( *pxResult == errQUEUE_YIELD ) \
{ \
crSET_STATE1( ( xHandle ) ); \
*pxResult = pdPASS; \
} \
}
/**
* croutine. h
* <pre>
crQUEUE_RECEIVE(
CoRoutineHandle_t xHandle,
QueueHandle_t pxQueue,
void *pvBuffer,
TickType_t xTicksToWait,
BaseType_t *pxResult
)</pre>
*
* The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine
* equivalent to the xQueueSend() and xQueueReceive() functions used by tasks.
*
* crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas
* xQueueSend() and xQueueReceive() can only be used from tasks.
*
* crQUEUE_RECEIVE can only be called from the co-routine function itself - not
* from within a function called by the co-routine function. This is because
* co-routines do not maintain their own stack.
*
* See the co-routine section of the WEB documentation for information on
* passing data between tasks and co-routines and between ISR's and
* co-routines.
*
* @param xHandle The handle of the calling co-routine. This is the xHandle
* parameter of the co-routine function.
*
* @param pxQueue The handle of the queue from which the data will be received.
* The handle is obtained as the return value when the queue is created using
* the xQueueCreate() API function.
*
* @param pvBuffer The buffer into which the received item is to be copied.
* The number of bytes of each queued item is specified when the queue is
* created. This number of bytes is copied into pvBuffer.
*
* @param xTickToDelay The number of ticks that the co-routine should block
* to wait for data to become available from the queue, should data not be
* available immediately. The actual amount of time this equates to is defined
* by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant
* portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see the
* crQUEUE_SEND example).
*
* @param pxResult The variable pointed to by pxResult will be set to pdPASS if
* data was successfully retrieved from the queue, otherwise it will be set to
* an error code as defined within ProjDefs.h.
*
* Example usage:
<pre>
// A co-routine receives the number of an LED to flash from a queue. It
// blocks on the queue until the number is received.
static void prvCoRoutineFlashWorkTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
{
// Variables in co-routines must be declared static if they must maintain value across a blocking call.
static BaseType_t xResult;
static UBaseType_t uxLEDToFlash;
// All co-routines must start with a call to crSTART().
crSTART( xHandle );
for( ;; )
{
// Wait for data to become available on the queue.
crQUEUE_RECEIVE( xHandle, xCoRoutineQueue, &uxLEDToFlash, portMAX_DELAY, &xResult );
if( xResult == pdPASS )
{
// We received the LED to flash - flash it!
vParTestToggleLED( uxLEDToFlash );
}
}
crEND();
}</pre>
* \defgroup crQUEUE_RECEIVE crQUEUE_RECEIVE
* \ingroup Tasks
*/
#define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \
{ \
*( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), ( xTicksToWait ) ); \
if( *( pxResult ) == errQUEUE_BLOCKED ) \
{ \
crSET_STATE0( ( xHandle ) ); \
*( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), 0 ); \
} \
if( *( pxResult ) == errQUEUE_YIELD ) \
{ \
crSET_STATE1( ( xHandle ) ); \
*( pxResult ) = pdPASS; \
} \
}
/**
* croutine. h
* <pre>
crQUEUE_SEND_FROM_ISR(
QueueHandle_t pxQueue,
void *pvItemToQueue,
BaseType_t xCoRoutinePreviouslyWoken
)</pre>
*
* The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the
* co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR()
* functions used by tasks.
*
* crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to
* pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and
* xQueueReceiveFromISR() can only be used to pass data between a task and and
* ISR.
*
* crQUEUE_SEND_FROM_ISR can only be called from an ISR to send data to a queue
* that is being used from within a co-routine.
*
* See the co-routine section of the WEB documentation for information on
* passing data between tasks and co-routines and between ISR's and
* co-routines.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvItemToQueue A pointer to the item that is to be placed on the
* queue. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from pvItemToQueue
* into the queue storage area.
*
* @param xCoRoutinePreviouslyWoken This is included so an ISR can post onto
* the same queue multiple times from a single interrupt. The first call
* should always pass in pdFALSE. Subsequent calls should pass in
* the value returned from the previous call.
*
* @return pdTRUE if a co-routine was woken by posting onto the queue. This is
* used by the ISR to determine if a context switch may be required following
* the ISR.
*
* Example usage:
<pre>
// A co-routine that blocks on a queue waiting for characters to be received.
static void vReceivingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
{
char cRxedChar;
BaseType_t xResult;
// All co-routines must start with a call to crSTART().
crSTART( xHandle );
for( ;; )
{
// Wait for data to become available on the queue. This assumes the
// queue xCommsRxQueue has already been created!
crQUEUE_RECEIVE( xHandle, xCommsRxQueue, &uxLEDToFlash, portMAX_DELAY, &xResult );
// Was a character received?
if( xResult == pdPASS )
{
// Process the character here.
}
}
// All co-routines must end with a call to crEND().
crEND();
}
// An ISR that uses a queue to send characters received on a serial port to
// a co-routine.
void vUART_ISR( void )
{
char cRxedChar;
BaseType_t xCRWokenByPost = pdFALSE;
// We loop around reading characters until there are none left in the UART.
while( UART_RX_REG_NOT_EMPTY() )
{
// Obtain the character from the UART.
cRxedChar = UART_RX_REG;
// Post the character onto a queue. xCRWokenByPost will be pdFALSE
// the first time around the loop. If the post causes a co-routine
// to be woken (unblocked) then xCRWokenByPost will be set to pdTRUE.
// In this manner we can ensure that if more than one co-routine is
// blocked on the queue only one is woken by this ISR no matter how
// many characters are posted to the queue.
xCRWokenByPost = crQUEUE_SEND_FROM_ISR( xCommsRxQueue, &cRxedChar, xCRWokenByPost );
}
}</pre>
* \defgroup crQUEUE_SEND_FROM_ISR crQUEUE_SEND_FROM_ISR
* \ingroup Tasks
*/
#define crQUEUE_SEND_FROM_ISR( pxQueue, pvItemToQueue, xCoRoutinePreviouslyWoken ) xQueueCRSendFromISR( ( pxQueue ), ( pvItemToQueue ), ( xCoRoutinePreviouslyWoken ) )
/**
* croutine. h
* <pre>
crQUEUE_SEND_FROM_ISR(
QueueHandle_t pxQueue,
void *pvBuffer,
BaseType_t * pxCoRoutineWoken
)</pre>
*
* The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the
* co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR()
* functions used by tasks.
*
* crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to
* pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and
* xQueueReceiveFromISR() can only be used to pass data between a task and and
* ISR.
*
* crQUEUE_RECEIVE_FROM_ISR can only be called from an ISR to receive data
* from a queue that is being used from within a co-routine (a co-routine
* posted to the queue).
*
* See the co-routine section of the WEB documentation for information on
* passing data between tasks and co-routines and between ISR's and
* co-routines.
*
* @param xQueue The handle to the queue on which the item is to be posted.
*
* @param pvBuffer A pointer to a buffer into which the received item will be
* placed. The size of the items the queue will hold was defined when the
* queue was created, so this many bytes will be copied from the queue into
* pvBuffer.
*
* @param pxCoRoutineWoken A co-routine may be blocked waiting for space to become
* available on the queue. If crQUEUE_RECEIVE_FROM_ISR causes such a
* co-routine to unblock *pxCoRoutineWoken will get set to pdTRUE, otherwise
* *pxCoRoutineWoken will remain unchanged.
*
* @return pdTRUE an item was successfully received from the queue, otherwise
* pdFALSE.
*
* Example usage:
<pre>
// A co-routine that posts a character to a queue then blocks for a fixed
// period. The character is incremented each time.
static void vSendingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
{
// cChar holds its value while this co-routine is blocked and must therefore
// be declared static.
static char cCharToTx = 'a';
BaseType_t xResult;
// All co-routines must start with a call to crSTART().
crSTART( xHandle );
for( ;; )
{
// Send the next character to the queue.
crQUEUE_SEND( xHandle, xCoRoutineQueue, &cCharToTx, NO_DELAY, &xResult );
if( xResult == pdPASS )
{
// The character was successfully posted to the queue.
}
else
{
// Could not post the character to the queue.
}
// Enable the UART Tx interrupt to cause an interrupt in this
// hypothetical UART. The interrupt will obtain the character
// from the queue and send it.
ENABLE_RX_INTERRUPT();
// Increment to the next character then block for a fixed period.
// cCharToTx will maintain its value across the delay as it is
// declared static.
cCharToTx++;
if( cCharToTx > 'x' )
{
cCharToTx = 'a';
}
crDELAY( 100 );
}
// All co-routines must end with a call to crEND().
crEND();
}
// An ISR that uses a queue to receive characters to send on a UART.
void vUART_ISR( void )
{
char cCharToTx;
BaseType_t xCRWokenByPost = pdFALSE;
while( UART_TX_REG_EMPTY() )
{
// Are there any characters in the queue waiting to be sent?
// xCRWokenByPost will automatically be set to pdTRUE if a co-routine
// is woken by the post - ensuring that only a single co-routine is
// woken no matter how many times we go around this loop.
if( crQUEUE_RECEIVE_FROM_ISR( pxQueue, &cCharToTx, &xCRWokenByPost ) )
{
SEND_CHARACTER( cCharToTx );
}
}
}</pre>
* \defgroup crQUEUE_RECEIVE_FROM_ISR crQUEUE_RECEIVE_FROM_ISR
* \ingroup Tasks
*/
#define crQUEUE_RECEIVE_FROM_ISR( pxQueue, pvBuffer, pxCoRoutineWoken ) xQueueCRReceiveFromISR( ( pxQueue ), ( pvBuffer ), ( pxCoRoutineWoken ) )
/*
* This function is intended for internal use by the co-routine macros only.
* The macro nature of the co-routine implementation requires that the
* prototype appears here. The function should not be used by application
* writers.
*
* Removes the current co-routine from its ready list and places it in the
* appropriate delayed list.
*/
void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, List_t *pxEventList );
/*
* This function is intended for internal use by the queue implementation only.
* The function should not be used by application writers.
*
* Removes the highest priority co-routine from the event list and places it in
* the pending ready list.
*/
BaseType_t xCoRoutineRemoveFromEventList( const List_t *pxEventList );
#ifdef __cplusplus
}
#endif
#endif /* CO_ROUTINE_H */

View File

@ -1,279 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#ifndef DEPRECATED_DEFINITIONS_H
#define DEPRECATED_DEFINITIONS_H
/* Each FreeRTOS port has a unique portmacro.h header file. Originally a
pre-processor definition was used to ensure the pre-processor found the correct
portmacro.h file for the port being used. That scheme was deprecated in favour
of setting the compiler's include path such that it found the correct
portmacro.h file - removing the need for the constant and allowing the
portmacro.h file to be located anywhere in relation to the port being used. The
definitions below remain in the code for backward compatibility only. New
projects should not use them. */
#ifdef OPEN_WATCOM_INDUSTRIAL_PC_PORT
#include "..\..\Source\portable\owatcom\16bitdos\pc\portmacro.h"
typedef void ( __interrupt __far *pxISR )();
#endif
#ifdef OPEN_WATCOM_FLASH_LITE_186_PORT
#include "..\..\Source\portable\owatcom\16bitdos\flsh186\portmacro.h"
typedef void ( __interrupt __far *pxISR )();
#endif
#ifdef GCC_MEGA_AVR
#include "../portable/GCC/ATMega323/portmacro.h"
#endif
#ifdef IAR_MEGA_AVR
#include "../portable/IAR/ATMega323/portmacro.h"
#endif
#ifdef MPLAB_PIC24_PORT
#include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h"
#endif
#ifdef MPLAB_DSPIC_PORT
#include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h"
#endif
#ifdef MPLAB_PIC18F_PORT
#include "../../Source/portable/MPLAB/PIC18F/portmacro.h"
#endif
#ifdef MPLAB_PIC32MX_PORT
#include "../../Source/portable/MPLAB/PIC32MX/portmacro.h"
#endif
#ifdef _FEDPICC
#include "libFreeRTOS/Include/portmacro.h"
#endif
#ifdef SDCC_CYGNAL
#include "../../Source/portable/SDCC/Cygnal/portmacro.h"
#endif
#ifdef GCC_ARM7
#include "../../Source/portable/GCC/ARM7_LPC2000/portmacro.h"
#endif
#ifdef GCC_ARM7_ECLIPSE
#include "portmacro.h"
#endif
#ifdef ROWLEY_LPC23xx
#include "../../Source/portable/GCC/ARM7_LPC23xx/portmacro.h"
#endif
#ifdef IAR_MSP430
#include "..\..\Source\portable\IAR\MSP430\portmacro.h"
#endif
#ifdef GCC_MSP430
#include "../../Source/portable/GCC/MSP430F449/portmacro.h"
#endif
#ifdef ROWLEY_MSP430
#include "../../Source/portable/Rowley/MSP430F449/portmacro.h"
#endif
#ifdef ARM7_LPC21xx_KEIL_RVDS
#include "..\..\Source\portable\RVDS\ARM7_LPC21xx\portmacro.h"
#endif
#ifdef SAM7_GCC
#include "../../Source/portable/GCC/ARM7_AT91SAM7S/portmacro.h"
#endif
#ifdef SAM7_IAR
#include "..\..\Source\portable\IAR\AtmelSAM7S64\portmacro.h"
#endif
#ifdef SAM9XE_IAR
#include "..\..\Source\portable\IAR\AtmelSAM9XE\portmacro.h"
#endif
#ifdef LPC2000_IAR
#include "..\..\Source\portable\IAR\LPC2000\portmacro.h"
#endif
#ifdef STR71X_IAR
#include "..\..\Source\portable\IAR\STR71x\portmacro.h"
#endif
#ifdef STR75X_IAR
#include "..\..\Source\portable\IAR\STR75x\portmacro.h"
#endif
#ifdef STR75X_GCC
#include "..\..\Source\portable\GCC\STR75x\portmacro.h"
#endif
#ifdef STR91X_IAR
#include "..\..\Source\portable\IAR\STR91x\portmacro.h"
#endif
#ifdef GCC_H8S
#include "../../Source/portable/GCC/H8S2329/portmacro.h"
#endif
#ifdef GCC_AT91FR40008
#include "../../Source/portable/GCC/ARM7_AT91FR40008/portmacro.h"
#endif
#ifdef RVDS_ARMCM3_LM3S102
#include "../../Source/portable/RVDS/ARM_CM3/portmacro.h"
#endif
#ifdef GCC_ARMCM3_LM3S102
#include "../../Source/portable/GCC/ARM_CM3/portmacro.h"
#endif
#ifdef GCC_ARMCM3
#include "../../Source/portable/GCC/ARM_CM3/portmacro.h"
#endif
#ifdef IAR_ARM_CM3
#include "../../Source/portable/IAR/ARM_CM3/portmacro.h"
#endif
#ifdef IAR_ARMCM3_LM
#include "../../Source/portable/IAR/ARM_CM3/portmacro.h"
#endif
#ifdef HCS12_CODE_WARRIOR
#include "../../Source/portable/CodeWarrior/HCS12/portmacro.h"
#endif
#ifdef MICROBLAZE_GCC
#include "../../Source/portable/GCC/MicroBlaze/portmacro.h"
#endif
#ifdef TERN_EE
#include "..\..\Source\portable\Paradigm\Tern_EE\small\portmacro.h"
#endif
#ifdef GCC_HCS12
#include "../../Source/portable/GCC/HCS12/portmacro.h"
#endif
#ifdef GCC_MCF5235
#include "../../Source/portable/GCC/MCF5235/portmacro.h"
#endif
#ifdef COLDFIRE_V2_GCC
#include "../../../Source/portable/GCC/ColdFire_V2/portmacro.h"
#endif
#ifdef COLDFIRE_V2_CODEWARRIOR
#include "../../Source/portable/CodeWarrior/ColdFire_V2/portmacro.h"
#endif
#ifdef GCC_PPC405
#include "../../Source/portable/GCC/PPC405_Xilinx/portmacro.h"
#endif
#ifdef GCC_PPC440
#include "../../Source/portable/GCC/PPC440_Xilinx/portmacro.h"
#endif
#ifdef _16FX_SOFTUNE
#include "..\..\Source\portable\Softune\MB96340\portmacro.h"
#endif
#ifdef BCC_INDUSTRIAL_PC_PORT
/* A short file name has to be used in place of the normal
FreeRTOSConfig.h when using the Borland compiler. */
#include "frconfig.h"
#include "..\portable\BCC\16BitDOS\PC\prtmacro.h"
typedef void ( __interrupt __far *pxISR )();
#endif
#ifdef BCC_FLASH_LITE_186_PORT
/* A short file name has to be used in place of the normal
FreeRTOSConfig.h when using the Borland compiler. */
#include "frconfig.h"
#include "..\portable\BCC\16BitDOS\flsh186\prtmacro.h"
typedef void ( __interrupt __far *pxISR )();
#endif
#ifdef __GNUC__
#ifdef __AVR32_AVR32A__
#include "portmacro.h"
#endif
#endif
#ifdef __ICCAVR32__
#ifdef __CORE__
#if __CORE__ == __AVR32A__
#include "portmacro.h"
#endif
#endif
#endif
#ifdef __91467D
#include "portmacro.h"
#endif
#ifdef __96340
#include "portmacro.h"
#endif
#ifdef __IAR_V850ES_Fx3__
#include "../../Source/portable/IAR/V850ES/portmacro.h"
#endif
#ifdef __IAR_V850ES_Jx3__
#include "../../Source/portable/IAR/V850ES/portmacro.h"
#endif
#ifdef __IAR_V850ES_Jx3_L__
#include "../../Source/portable/IAR/V850ES/portmacro.h"
#endif
#ifdef __IAR_V850ES_Jx2__
#include "../../Source/portable/IAR/V850ES/portmacro.h"
#endif
#ifdef __IAR_V850ES_Hx2__
#include "../../Source/portable/IAR/V850ES/portmacro.h"
#endif
#ifdef __IAR_78K0R_Kx3__
#include "../../Source/portable/IAR/78K0R/portmacro.h"
#endif
#ifdef __IAR_78K0R_Kx3L__
#include "../../Source/portable/IAR/78K0R/portmacro.h"
#endif
#endif /* DEPRECATED_DEFINITIONS_H */

View File

@ -1,757 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#ifndef EVENT_GROUPS_H
#define EVENT_GROUPS_H
#ifndef INC_FREERTOS_H
#error "include FreeRTOS.h" must appear in source files before "include event_groups.h"
#endif
/* FreeRTOS includes. */
#include "timers.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* An event group is a collection of bits to which an application can assign a
* meaning. For example, an application may create an event group to convey
* the status of various CAN bus related events in which bit 0 might mean "A CAN
* message has been received and is ready for processing", bit 1 might mean "The
* application has queued a message that is ready for sending onto the CAN
* network", and bit 2 might mean "It is time to send a SYNC message onto the
* CAN network" etc. A task can then test the bit values to see which events
* are active, and optionally enter the Blocked state to wait for a specified
* bit or a group of specified bits to be active. To continue the CAN bus
* example, a CAN controlling task can enter the Blocked state (and therefore
* not consume any processing time) until either bit 0, bit 1 or bit 2 are
* active, at which time the bit that was actually active would inform the task
* which action it had to take (process a received message, send a message, or
* send a SYNC).
*
* The event groups implementation contains intelligence to avoid race
* conditions that would otherwise occur were an application to use a simple
* variable for the same purpose. This is particularly important with respect
* to when a bit within an event group is to be cleared, and when bits have to
* be set and then tested atomically - as is the case where event groups are
* used to create a synchronisation point between multiple tasks (a
* 'rendezvous').
*
* \defgroup EventGroup
*/
/**
* event_groups.h
*
* Type by which event groups are referenced. For example, a call to
* xEventGroupCreate() returns an EventGroupHandle_t variable that can then
* be used as a parameter to other event group functions.
*
* \defgroup EventGroupHandle_t EventGroupHandle_t
* \ingroup EventGroup
*/
struct EventGroupDef_t;
typedef struct EventGroupDef_t * EventGroupHandle_t;
/*
* The type that holds event bits always matches TickType_t - therefore the
* number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1,
* 32 bits if set to 0.
*
* \defgroup EventBits_t EventBits_t
* \ingroup EventGroup
*/
typedef TickType_t EventBits_t;
/**
* event_groups.h
*<pre>
EventGroupHandle_t xEventGroupCreate( void );
</pre>
*
* Create a new event group.
*
* Internally, within the FreeRTOS implementation, event groups use a [small]
* block of memory, in which the event group's structure is stored. If an event
* groups is created using xEventGropuCreate() then the required memory is
* automatically dynamically allocated inside the xEventGroupCreate() function.
* (see http://www.freertos.org/a00111.html). If an event group is created
* using xEventGropuCreateStatic() then the application writer must instead
* provide the memory that will get used by the event group.
* xEventGroupCreateStatic() therefore allows an event group to be created
* without using any dynamic memory allocation.
*
* Although event groups are not related to ticks, for internal implementation
* reasons the number of bits available for use in an event group is dependent
* on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If
* configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit
* 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has
* 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store
* event bits within an event group.
*
* @return If the event group was created then a handle to the event group is
* returned. If there was insufficient FreeRTOS heap available to create the
* event group then NULL is returned. See http://www.freertos.org/a00111.html
*
* Example usage:
<pre>
// Declare a variable to hold the created event group.
EventGroupHandle_t xCreatedEventGroup;
// Attempt to create the event group.
xCreatedEventGroup = xEventGroupCreate();
// Was the event group created successfully?
if( xCreatedEventGroup == NULL )
{
// The event group was not created because there was insufficient
// FreeRTOS heap available.
}
else
{
// The event group was created.
}
</pre>
* \defgroup xEventGroupCreate xEventGroupCreate
* \ingroup EventGroup
*/
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
EventGroupHandle_t xEventGroupCreate( void ) PRIVILEGED_FUNCTION;
#endif
/**
* event_groups.h
*<pre>
EventGroupHandle_t xEventGroupCreateStatic( EventGroupHandle_t * pxEventGroupBuffer );
</pre>
*
* Create a new event group.
*
* Internally, within the FreeRTOS implementation, event groups use a [small]
* block of memory, in which the event group's structure is stored. If an event
* groups is created using xEventGropuCreate() then the required memory is
* automatically dynamically allocated inside the xEventGroupCreate() function.
* (see http://www.freertos.org/a00111.html). If an event group is created
* using xEventGropuCreateStatic() then the application writer must instead
* provide the memory that will get used by the event group.
* xEventGroupCreateStatic() therefore allows an event group to be created
* without using any dynamic memory allocation.
*
* Although event groups are not related to ticks, for internal implementation
* reasons the number of bits available for use in an event group is dependent
* on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If
* configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit
* 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has
* 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store
* event bits within an event group.
*
* @param pxEventGroupBuffer pxEventGroupBuffer must point to a variable of type
* StaticEventGroup_t, which will be then be used to hold the event group's data
* structures, removing the need for the memory to be allocated dynamically.
*
* @return If the event group was created then a handle to the event group is
* returned. If pxEventGroupBuffer was NULL then NULL is returned.
*
* Example usage:
<pre>
// StaticEventGroup_t is a publicly accessible structure that has the same
// size and alignment requirements as the real event group structure. It is
// provided as a mechanism for applications to know the size of the event
// group (which is dependent on the architecture and configuration file
// settings) without breaking the strict data hiding policy by exposing the
// real event group internals. This StaticEventGroup_t variable is passed
// into the xSemaphoreCreateEventGroupStatic() function and is used to store
// the event group's data structures
StaticEventGroup_t xEventGroupBuffer;
// Create the event group without dynamically allocating any memory.
xEventGroup = xEventGroupCreateStatic( &xEventGroupBuffer );
</pre>
*/
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) PRIVILEGED_FUNCTION;
#endif
/**
* event_groups.h
*<pre>
EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
const EventBits_t uxBitsToWaitFor,
const BaseType_t xClearOnExit,
const BaseType_t xWaitForAllBits,
const TickType_t xTicksToWait );
</pre>
*
* [Potentially] block to wait for one or more bits to be set within a
* previously created event group.
*
* This function cannot be called from an interrupt.
*
* @param xEventGroup The event group in which the bits are being tested. The
* event group must have previously been created using a call to
* xEventGroupCreate().
*
* @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test
* inside the event group. For example, to wait for bit 0 and/or bit 2 set
* uxBitsToWaitFor to 0x05. To wait for bits 0 and/or bit 1 and/or bit 2 set
* uxBitsToWaitFor to 0x07. Etc.
*
* @param xClearOnExit If xClearOnExit is set to pdTRUE then any bits within
* uxBitsToWaitFor that are set within the event group will be cleared before
* xEventGroupWaitBits() returns if the wait condition was met (if the function
* returns for a reason other than a timeout). If xClearOnExit is set to
* pdFALSE then the bits set in the event group are not altered when the call to
* xEventGroupWaitBits() returns.
*
* @param xWaitForAllBits If xWaitForAllBits is set to pdTRUE then
* xEventGroupWaitBits() will return when either all the bits in uxBitsToWaitFor
* are set or the specified block time expires. If xWaitForAllBits is set to
* pdFALSE then xEventGroupWaitBits() will return when any one of the bits set
* in uxBitsToWaitFor is set or the specified block time expires. The block
* time is specified by the xTicksToWait parameter.
*
* @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait
* for one/all (depending on the xWaitForAllBits value) of the bits specified by
* uxBitsToWaitFor to become set.
*
* @return The value of the event group at the time either the bits being waited
* for became set, or the block time expired. Test the return value to know
* which bits were set. If xEventGroupWaitBits() returned because its timeout
* expired then not all the bits being waited for will be set. If
* xEventGroupWaitBits() returned because the bits it was waiting for were set
* then the returned value is the event group value before any bits were
* automatically cleared in the case that xClearOnExit parameter was set to
* pdTRUE.
*
* Example usage:
<pre>
#define BIT_0 ( 1 << 0 )
#define BIT_4 ( 1 << 4 )
void aFunction( EventGroupHandle_t xEventGroup )
{
EventBits_t uxBits;
const TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS;
// Wait a maximum of 100ms for either bit 0 or bit 4 to be set within
// the event group. Clear the bits before exiting.
uxBits = xEventGroupWaitBits(
xEventGroup, // The event group being tested.
BIT_0 | BIT_4, // The bits within the event group to wait for.
pdTRUE, // BIT_0 and BIT_4 should be cleared before returning.
pdFALSE, // Don't wait for both bits, either bit will do.
xTicksToWait ); // Wait a maximum of 100ms for either bit to be set.
if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
{
// xEventGroupWaitBits() returned because both bits were set.
}
else if( ( uxBits & BIT_0 ) != 0 )
{
// xEventGroupWaitBits() returned because just BIT_0 was set.
}
else if( ( uxBits & BIT_4 ) != 0 )
{
// xEventGroupWaitBits() returned because just BIT_4 was set.
}
else
{
// xEventGroupWaitBits() returned because xTicksToWait ticks passed
// without either BIT_0 or BIT_4 becoming set.
}
}
</pre>
* \defgroup xEventGroupWaitBits xEventGroupWaitBits
* \ingroup EventGroup
*/
EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
/**
* event_groups.h
*<pre>
EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear );
</pre>
*
* Clear bits within an event group. This function cannot be called from an
* interrupt.
*
* @param xEventGroup The event group in which the bits are to be cleared.
*
* @param uxBitsToClear A bitwise value that indicates the bit or bits to clear
* in the event group. For example, to clear bit 3 only, set uxBitsToClear to
* 0x08. To clear bit 3 and bit 0 set uxBitsToClear to 0x09.
*
* @return The value of the event group before the specified bits were cleared.
*
* Example usage:
<pre>
#define BIT_0 ( 1 << 0 )
#define BIT_4 ( 1 << 4 )
void aFunction( EventGroupHandle_t xEventGroup )
{
EventBits_t uxBits;
// Clear bit 0 and bit 4 in xEventGroup.
uxBits = xEventGroupClearBits(
xEventGroup, // The event group being updated.
BIT_0 | BIT_4 );// The bits being cleared.
if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
{
// Both bit 0 and bit 4 were set before xEventGroupClearBits() was
// called. Both will now be clear (not set).
}
else if( ( uxBits & BIT_0 ) != 0 )
{
// Bit 0 was set before xEventGroupClearBits() was called. It will
// now be clear.
}
else if( ( uxBits & BIT_4 ) != 0 )
{
// Bit 4 was set before xEventGroupClearBits() was called. It will
// now be clear.
}
else
{
// Neither bit 0 nor bit 4 were set in the first place.
}
}
</pre>
* \defgroup xEventGroupClearBits xEventGroupClearBits
* \ingroup EventGroup
*/
EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION;
/**
* event_groups.h
*<pre>
BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
</pre>
*
* A version of xEventGroupClearBits() that can be called from an interrupt.
*
* Setting bits in an event group is not a deterministic operation because there
* are an unknown number of tasks that may be waiting for the bit or bits being
* set. FreeRTOS does not allow nondeterministic operations to be performed
* while interrupts are disabled, so protects event groups that are accessed
* from tasks by suspending the scheduler rather than disabling interrupts. As
* a result event groups cannot be accessed directly from an interrupt service
* routine. Therefore xEventGroupClearBitsFromISR() sends a message to the
* timer task to have the clear operation performed in the context of the timer
* task.
*
* @param xEventGroup The event group in which the bits are to be cleared.
*
* @param uxBitsToClear A bitwise value that indicates the bit or bits to clear.
* For example, to clear bit 3 only, set uxBitsToClear to 0x08. To clear bit 3
* and bit 0 set uxBitsToClear to 0x09.
*
* @return If the request to execute the function was posted successfully then
* pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned
* if the timer service queue was full.
*
* Example usage:
<pre>
#define BIT_0 ( 1 << 0 )
#define BIT_4 ( 1 << 4 )
// An event group which it is assumed has already been created by a call to
// xEventGroupCreate().
EventGroupHandle_t xEventGroup;
void anInterruptHandler( void )
{
// Clear bit 0 and bit 4 in xEventGroup.
xResult = xEventGroupClearBitsFromISR(
xEventGroup, // The event group being updated.
BIT_0 | BIT_4 ); // The bits being set.
if( xResult == pdPASS )
{
// The message was posted successfully.
}
}
</pre>
* \defgroup xEventGroupClearBitsFromISR xEventGroupClearBitsFromISR
* \ingroup EventGroup
*/
#if( configUSE_TRACE_FACILITY == 1 )
BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION;
#else
#define xEventGroupClearBitsFromISR( xEventGroup, uxBitsToClear ) xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL )
#endif
/**
* event_groups.h
*<pre>
EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
</pre>
*
* Set bits within an event group.
* This function cannot be called from an interrupt. xEventGroupSetBitsFromISR()
* is a version that can be called from an interrupt.
*
* Setting bits in an event group will automatically unblock tasks that are
* blocked waiting for the bits.
*
* @param xEventGroup The event group in which the bits are to be set.
*
* @param uxBitsToSet A bitwise value that indicates the bit or bits to set.
* For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3
* and bit 0 set uxBitsToSet to 0x09.
*
* @return The value of the event group at the time the call to
* xEventGroupSetBits() returns. There are two reasons why the returned value
* might have the bits specified by the uxBitsToSet parameter cleared. First,
* if setting a bit results in a task that was waiting for the bit leaving the
* blocked state then it is possible the bit will be cleared automatically
* (see the xClearBitOnExit parameter of xEventGroupWaitBits()). Second, any
* unblocked (or otherwise Ready state) task that has a priority above that of
* the task that called xEventGroupSetBits() will execute and may change the
* event group value before the call to xEventGroupSetBits() returns.
*
* Example usage:
<pre>
#define BIT_0 ( 1 << 0 )
#define BIT_4 ( 1 << 4 )
void aFunction( EventGroupHandle_t xEventGroup )
{
EventBits_t uxBits;
// Set bit 0 and bit 4 in xEventGroup.
uxBits = xEventGroupSetBits(
xEventGroup, // The event group being updated.
BIT_0 | BIT_4 );// The bits being set.
if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
{
// Both bit 0 and bit 4 remained set when the function returned.
}
else if( ( uxBits & BIT_0 ) != 0 )
{
// Bit 0 remained set when the function returned, but bit 4 was
// cleared. It might be that bit 4 was cleared automatically as a
// task that was waiting for bit 4 was removed from the Blocked
// state.
}
else if( ( uxBits & BIT_4 ) != 0 )
{
// Bit 4 remained set when the function returned, but bit 0 was
// cleared. It might be that bit 0 was cleared automatically as a
// task that was waiting for bit 0 was removed from the Blocked
// state.
}
else
{
// Neither bit 0 nor bit 4 remained set. It might be that a task
// was waiting for both of the bits to be set, and the bits were
// cleared as the task left the Blocked state.
}
}
</pre>
* \defgroup xEventGroupSetBits xEventGroupSetBits
* \ingroup EventGroup
*/
EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION;
/**
* event_groups.h
*<pre>
BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken );
</pre>
*
* A version of xEventGroupSetBits() that can be called from an interrupt.
*
* Setting bits in an event group is not a deterministic operation because there
* are an unknown number of tasks that may be waiting for the bit or bits being
* set. FreeRTOS does not allow nondeterministic operations to be performed in
* interrupts or from critical sections. Therefore xEventGroupSetBitsFromISR()
* sends a message to the timer task to have the set operation performed in the
* context of the timer task - where a scheduler lock is used in place of a
* critical section.
*
* @param xEventGroup The event group in which the bits are to be set.
*
* @param uxBitsToSet A bitwise value that indicates the bit or bits to set.
* For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3
* and bit 0 set uxBitsToSet to 0x09.
*
* @param pxHigherPriorityTaskWoken As mentioned above, calling this function
* will result in a message being sent to the timer daemon task. If the
* priority of the timer daemon task is higher than the priority of the
* currently running task (the task the interrupt interrupted) then
* *pxHigherPriorityTaskWoken will be set to pdTRUE by
* xEventGroupSetBitsFromISR(), indicating that a context switch should be
* requested before the interrupt exits. For that reason
* *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the
* example code below.
*
* @return If the request to execute the function was posted successfully then
* pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned
* if the timer service queue was full.
*
* Example usage:
<pre>
#define BIT_0 ( 1 << 0 )
#define BIT_4 ( 1 << 4 )
// An event group which it is assumed has already been created by a call to
// xEventGroupCreate().
EventGroupHandle_t xEventGroup;
void anInterruptHandler( void )
{
BaseType_t xHigherPriorityTaskWoken, xResult;
// xHigherPriorityTaskWoken must be initialised to pdFALSE.
xHigherPriorityTaskWoken = pdFALSE;
// Set bit 0 and bit 4 in xEventGroup.
xResult = xEventGroupSetBitsFromISR(
xEventGroup, // The event group being updated.
BIT_0 | BIT_4 // The bits being set.
&xHigherPriorityTaskWoken );
// Was the message posted successfully?
if( xResult == pdPASS )
{
// If xHigherPriorityTaskWoken is now set to pdTRUE then a context
// switch should be requested. The macro used is port specific and
// will be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() -
// refer to the documentation page for the port being used.
portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
}
}
</pre>
* \defgroup xEventGroupSetBitsFromISR xEventGroupSetBitsFromISR
* \ingroup EventGroup
*/
#if( configUSE_TRACE_FACILITY == 1 )
BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
#else
#define xEventGroupSetBitsFromISR( xEventGroup, uxBitsToSet, pxHigherPriorityTaskWoken ) xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken )
#endif
/**
* event_groups.h
*<pre>
EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
const EventBits_t uxBitsToSet,
const EventBits_t uxBitsToWaitFor,
TickType_t xTicksToWait );
</pre>
*
* Atomically set bits within an event group, then wait for a combination of
* bits to be set within the same event group. This functionality is typically
* used to synchronise multiple tasks, where each task has to wait for the other
* tasks to reach a synchronisation point before proceeding.
*
* This function cannot be used from an interrupt.
*
* The function will return before its block time expires if the bits specified
* by the uxBitsToWait parameter are set, or become set within that time. In
* this case all the bits specified by uxBitsToWait will be automatically
* cleared before the function returns.
*
* @param xEventGroup The event group in which the bits are being tested. The
* event group must have previously been created using a call to
* xEventGroupCreate().
*
* @param uxBitsToSet The bits to set in the event group before determining
* if, and possibly waiting for, all the bits specified by the uxBitsToWait
* parameter are set.
*
* @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test
* inside the event group. For example, to wait for bit 0 and bit 2 set
* uxBitsToWaitFor to 0x05. To wait for bits 0 and bit 1 and bit 2 set
* uxBitsToWaitFor to 0x07. Etc.
*
* @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait
* for all of the bits specified by uxBitsToWaitFor to become set.
*
* @return The value of the event group at the time either the bits being waited
* for became set, or the block time expired. Test the return value to know
* which bits were set. If xEventGroupSync() returned because its timeout
* expired then not all the bits being waited for will be set. If
* xEventGroupSync() returned because all the bits it was waiting for were
* set then the returned value is the event group value before any bits were
* automatically cleared.
*
* Example usage:
<pre>
// Bits used by the three tasks.
#define TASK_0_BIT ( 1 << 0 )
#define TASK_1_BIT ( 1 << 1 )
#define TASK_2_BIT ( 1 << 2 )
#define ALL_SYNC_BITS ( TASK_0_BIT | TASK_1_BIT | TASK_2_BIT )
// Use an event group to synchronise three tasks. It is assumed this event
// group has already been created elsewhere.
EventGroupHandle_t xEventBits;
void vTask0( void *pvParameters )
{
EventBits_t uxReturn;
TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS;
for( ;; )
{
// Perform task functionality here.
// Set bit 0 in the event flag to note this task has reached the
// sync point. The other two tasks will set the other two bits defined
// by ALL_SYNC_BITS. All three tasks have reached the synchronisation
// point when all the ALL_SYNC_BITS are set. Wait a maximum of 100ms
// for this to happen.
uxReturn = xEventGroupSync( xEventBits, TASK_0_BIT, ALL_SYNC_BITS, xTicksToWait );
if( ( uxReturn & ALL_SYNC_BITS ) == ALL_SYNC_BITS )
{
// All three tasks reached the synchronisation point before the call
// to xEventGroupSync() timed out.
}
}
}
void vTask1( void *pvParameters )
{
for( ;; )
{
// Perform task functionality here.
// Set bit 1 in the event flag to note this task has reached the
// synchronisation point. The other two tasks will set the other two
// bits defined by ALL_SYNC_BITS. All three tasks have reached the
// synchronisation point when all the ALL_SYNC_BITS are set. Wait
// indefinitely for this to happen.
xEventGroupSync( xEventBits, TASK_1_BIT, ALL_SYNC_BITS, portMAX_DELAY );
// xEventGroupSync() was called with an indefinite block time, so
// this task will only reach here if the syncrhonisation was made by all
// three tasks, so there is no need to test the return value.
}
}
void vTask2( void *pvParameters )
{
for( ;; )
{
// Perform task functionality here.
// Set bit 2 in the event flag to note this task has reached the
// synchronisation point. The other two tasks will set the other two
// bits defined by ALL_SYNC_BITS. All three tasks have reached the
// synchronisation point when all the ALL_SYNC_BITS are set. Wait
// indefinitely for this to happen.
xEventGroupSync( xEventBits, TASK_2_BIT, ALL_SYNC_BITS, portMAX_DELAY );
// xEventGroupSync() was called with an indefinite block time, so
// this task will only reach here if the syncrhonisation was made by all
// three tasks, so there is no need to test the return value.
}
}
</pre>
* \defgroup xEventGroupSync xEventGroupSync
* \ingroup EventGroup
*/
EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
/**
* event_groups.h
*<pre>
EventBits_t xEventGroupGetBits( EventGroupHandle_t xEventGroup );
</pre>
*
* Returns the current value of the bits in an event group. This function
* cannot be used from an interrupt.
*
* @param xEventGroup The event group being queried.
*
* @return The event group bits at the time xEventGroupGetBits() was called.
*
* \defgroup xEventGroupGetBits xEventGroupGetBits
* \ingroup EventGroup
*/
#define xEventGroupGetBits( xEventGroup ) xEventGroupClearBits( xEventGroup, 0 )
/**
* event_groups.h
*<pre>
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup );
</pre>
*
* A version of xEventGroupGetBits() that can be called from an ISR.
*
* @param xEventGroup The event group being queried.
*
* @return The event group bits at the time xEventGroupGetBitsFromISR() was called.
*
* \defgroup xEventGroupGetBitsFromISR xEventGroupGetBitsFromISR
* \ingroup EventGroup
*/
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
/**
* event_groups.h
*<pre>
void xEventGroupDelete( EventGroupHandle_t xEventGroup );
</pre>
*
* Delete an event group that was previously created by a call to
* xEventGroupCreate(). Tasks that are blocked on the event group will be
* unblocked and obtain 0 as the event group's value.
*
* @param xEventGroup The event group being deleted.
*/
void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
/* For internal use only. */
void vEventGroupSetBitsCallback( void *pvEventGroup, const uint32_t ulBitsToSet ) PRIVILEGED_FUNCTION;
void vEventGroupClearBitsCallback( void *pvEventGroup, const uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION;
#if (configUSE_TRACE_FACILITY == 1)
UBaseType_t uxEventGroupGetNumber( void* xEventGroup ) PRIVILEGED_FUNCTION;
void vEventGroupSetNumber( void* xEventGroup, UBaseType_t uxEventGroupNumber ) PRIVILEGED_FUNCTION;
#endif
#ifdef __cplusplus
}
#endif
#endif /* EVENT_GROUPS_H */

View File

@ -1,412 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/*
* This is the list implementation used by the scheduler. While it is tailored
* heavily for the schedulers needs, it is also available for use by
* application code.
*
* list_ts can only store pointers to list_item_ts. Each ListItem_t contains a
* numeric value (xItemValue). Most of the time the lists are sorted in
* descending item value order.
*
* Lists are created already containing one list item. The value of this
* item is the maximum possible that can be stored, it is therefore always at
* the end of the list and acts as a marker. The list member pxHead always
* points to this marker - even though it is at the tail of the list. This
* is because the tail contains a wrap back pointer to the true head of
* the list.
*
* In addition to it's value, each list item contains a pointer to the next
* item in the list (pxNext), a pointer to the list it is in (pxContainer)
* and a pointer to back to the object that contains it. These later two
* pointers are included for efficiency of list manipulation. There is
* effectively a two way link between the object containing the list item and
* the list item itself.
*
*
* \page ListIntroduction List Implementation
* \ingroup FreeRTOSIntro
*/
#ifndef INC_FREERTOS_H
#error FreeRTOS.h must be included before list.h
#endif
#ifndef LIST_H
#define LIST_H
/*
* The list structure members are modified from within interrupts, and therefore
* by rights should be declared volatile. However, they are only modified in a
* functionally atomic way (within critical sections of with the scheduler
* suspended) and are either passed by reference into a function or indexed via
* a volatile variable. Therefore, in all use cases tested so far, the volatile
* qualifier can be omitted in order to provide a moderate performance
* improvement without adversely affecting functional behaviour. The assembly
* instructions generated by the IAR, ARM and GCC compilers when the respective
* compiler's options were set for maximum optimisation has been inspected and
* deemed to be as intended. That said, as compiler technology advances, and
* especially if aggressive cross module optimisation is used (a use case that
* has not been exercised to any great extend) then it is feasible that the
* volatile qualifier will be needed for correct optimisation. It is expected
* that a compiler removing essential code because, without the volatile
* qualifier on the list structure members and with aggressive cross module
* optimisation, the compiler deemed the code unnecessary will result in
* complete and obvious failure of the scheduler. If this is ever experienced
* then the volatile qualifier can be inserted in the relevant places within the
* list structures by simply defining configLIST_VOLATILE to volatile in
* FreeRTOSConfig.h (as per the example at the bottom of this comment block).
* If configLIST_VOLATILE is not defined then the preprocessor directives below
* will simply #define configLIST_VOLATILE away completely.
*
* To use volatile list structure members then add the following line to
* FreeRTOSConfig.h (without the quotes):
* "#define configLIST_VOLATILE volatile"
*/
#ifndef configLIST_VOLATILE
#define configLIST_VOLATILE
#endif /* configSUPPORT_CROSS_MODULE_OPTIMISATION */
#ifdef __cplusplus
extern "C" {
#endif
/* Macros that can be used to place known values within the list structures,
then check that the known values do not get corrupted during the execution of
the application. These may catch the list data structures being overwritten in
memory. They will not catch data errors caused by incorrect configuration or
use of FreeRTOS.*/
#if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0 )
/* Define the macros to do nothing. */
#define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE
#define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE
#define listFIRST_LIST_INTEGRITY_CHECK_VALUE
#define listSECOND_LIST_INTEGRITY_CHECK_VALUE
#define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem )
#define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem )
#define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList )
#define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList )
#define listTEST_LIST_ITEM_INTEGRITY( pxItem )
#define listTEST_LIST_INTEGRITY( pxList )
#else
/* Define macros that add new members into the list structures. */
#define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue1;
#define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue2;
#define listFIRST_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue1;
#define listSECOND_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue2;
/* Define macros that set the new structure members to known values. */
#define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue1 = pdINTEGRITY_CHECK_VALUE
#define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue2 = pdINTEGRITY_CHECK_VALUE
#define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) ( pxList )->xListIntegrityValue1 = pdINTEGRITY_CHECK_VALUE
#define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) ( pxList )->xListIntegrityValue2 = pdINTEGRITY_CHECK_VALUE
/* Define macros that will assert if one of the structure members does not
contain its expected value. */
#define listTEST_LIST_ITEM_INTEGRITY( pxItem ) configASSERT( ( ( pxItem )->xListItemIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxItem )->xListItemIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) )
#define listTEST_LIST_INTEGRITY( pxList ) configASSERT( ( ( pxList )->xListIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxList )->xListIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) )
#endif /* configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES */
/*
* Definition of the only type of object that a list can contain.
*/
struct xLIST;
struct xLIST_ITEM
{
listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in descending order. */
struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */
struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */
void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */
struct xLIST * configLIST_VOLATILE pxContainer; /*< Pointer to the list in which this list item is placed (if any). */
listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
};
typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */
struct xMINI_LIST_ITEM
{
listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
configLIST_VOLATILE TickType_t xItemValue;
struct xLIST_ITEM * configLIST_VOLATILE pxNext;
struct xLIST_ITEM * configLIST_VOLATILE pxPrevious;
};
typedef struct xMINI_LIST_ITEM MiniListItem_t;
/*
* Definition of the type of queue used by the scheduler.
*/
typedef struct xLIST
{
listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
volatile UBaseType_t uxNumberOfItems;
ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */
MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */
listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
} List_t;
/*
* Access macro to set the owner of a list item. The owner of a list item
* is the object (usually a TCB) that contains the list item.
*
* \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER
* \ingroup LinkedList
*/
#define listSET_LIST_ITEM_OWNER( pxListItem, pxOwner ) ( ( pxListItem )->pvOwner = ( void * ) ( pxOwner ) )
/*
* Access macro to get the owner of a list item. The owner of a list item
* is the object (usually a TCB) that contains the list item.
*
* \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER
* \ingroup LinkedList
*/
#define listGET_LIST_ITEM_OWNER( pxListItem ) ( ( pxListItem )->pvOwner )
/*
* Access macro to set the value of the list item. In most cases the value is
* used to sort the list in descending order.
*
* \page listSET_LIST_ITEM_VALUE listSET_LIST_ITEM_VALUE
* \ingroup LinkedList
*/
#define listSET_LIST_ITEM_VALUE( pxListItem, xValue ) ( ( pxListItem )->xItemValue = ( xValue ) )
/*
* Access macro to retrieve the value of the list item. The value can
* represent anything - for example the priority of a task, or the time at
* which a task should be unblocked.
*
* \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE
* \ingroup LinkedList
*/
#define listGET_LIST_ITEM_VALUE( pxListItem ) ( ( pxListItem )->xItemValue )
/*
* Access macro to retrieve the value of the list item at the head of a given
* list.
*
* \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE
* \ingroup LinkedList
*/
#define listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext->xItemValue )
/*
* Return the list item at the head of the list.
*
* \page listGET_HEAD_ENTRY listGET_HEAD_ENTRY
* \ingroup LinkedList
*/
#define listGET_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext )
/*
* Return the list item at the head of the list.
*
* \page listGET_NEXT listGET_NEXT
* \ingroup LinkedList
*/
#define listGET_NEXT( pxListItem ) ( ( pxListItem )->pxNext )
/*
* Return the list item that marks the end of the list
*
* \page listGET_END_MARKER listGET_END_MARKER
* \ingroup LinkedList
*/
#define listGET_END_MARKER( pxList ) ( ( ListItem_t const * ) ( &( ( pxList )->xListEnd ) ) )
/*
* Access macro to determine if a list contains any items. The macro will
* only have the value true if the list is empty.
*
* \page listLIST_IS_EMPTY listLIST_IS_EMPTY
* \ingroup LinkedList
*/
#define listLIST_IS_EMPTY( pxList ) ( ( ( pxList )->uxNumberOfItems == ( UBaseType_t ) 0 ) ? pdTRUE : pdFALSE )
/*
* Access macro to return the number of items in the list.
*/
#define listCURRENT_LIST_LENGTH( pxList ) ( ( pxList )->uxNumberOfItems )
/*
* Access function to obtain the owner of the next entry in a list.
*
* The list member pxIndex is used to walk through a list. Calling
* listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list
* and returns that entry's pxOwner parameter. Using multiple calls to this
* function it is therefore possible to move through every item contained in
* a list.
*
* The pxOwner parameter of a list item is a pointer to the object that owns
* the list item. In the scheduler this is normally a task control block.
* The pxOwner parameter effectively creates a two way link between the list
* item and its owner.
*
* @param pxTCB pxTCB is set to the address of the owner of the next list item.
* @param pxList The list from which the next item owner is to be returned.
*
* \page listGET_OWNER_OF_NEXT_ENTRY listGET_OWNER_OF_NEXT_ENTRY
* \ingroup LinkedList
*/
#define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \
{ \
List_t * const pxConstList = ( pxList ); \
/* Increment the index to the next item and return the item, ensuring */ \
/* we don't return the marker used at the end of the list. */ \
( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \
if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \
{ \
( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \
} \
( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \
}
/*
* Access function to obtain the owner of the first entry in a list. Lists
* are normally sorted in ascending item value order.
*
* This function returns the pxOwner member of the first item in the list.
* The pxOwner parameter of a list item is a pointer to the object that owns
* the list item. In the scheduler this is normally a task control block.
* The pxOwner parameter effectively creates a two way link between the list
* item and its owner.
*
* @param pxList The list from which the owner of the head item is to be
* returned.
*
* \page listGET_OWNER_OF_HEAD_ENTRY listGET_OWNER_OF_HEAD_ENTRY
* \ingroup LinkedList
*/
#define listGET_OWNER_OF_HEAD_ENTRY( pxList ) ( (&( ( pxList )->xListEnd ))->pxNext->pvOwner )
/*
* Check to see if a list item is within a list. The list item maintains a
* "container" pointer that points to the list it is in. All this macro does
* is check to see if the container and the list match.
*
* @param pxList The list we want to know if the list item is within.
* @param pxListItem The list item we want to know if is in the list.
* @return pdTRUE if the list item is in the list, otherwise pdFALSE.
*/
#define listIS_CONTAINED_WITHIN( pxList, pxListItem ) ( ( ( pxListItem )->pxContainer == ( pxList ) ) ? ( pdTRUE ) : ( pdFALSE ) )
/*
* Return the list a list item is contained within (referenced from).
*
* @param pxListItem The list item being queried.
* @return A pointer to the List_t object that references the pxListItem
*/
#define listLIST_ITEM_CONTAINER( pxListItem ) ( ( pxListItem )->pxContainer )
/*
* This provides a crude means of knowing if a list has been initialised, as
* pxList->xListEnd.xItemValue is set to portMAX_DELAY by the vListInitialise()
* function.
*/
#define listLIST_IS_INITIALISED( pxList ) ( ( pxList )->xListEnd.xItemValue == portMAX_DELAY )
/*
* Must be called before a list is used! This initialises all the members
* of the list structure and inserts the xListEnd item into the list as a
* marker to the back of the list.
*
* @param pxList Pointer to the list being initialised.
*
* \page vListInitialise vListInitialise
* \ingroup LinkedList
*/
void vListInitialise( List_t * const pxList ) PRIVILEGED_FUNCTION;
/*
* Must be called before a list item is used. This sets the list container to
* null so the item does not think that it is already contained in a list.
*
* @param pxItem Pointer to the list item being initialised.
*
* \page vListInitialiseItem vListInitialiseItem
* \ingroup LinkedList
*/
void vListInitialiseItem( ListItem_t * const pxItem ) PRIVILEGED_FUNCTION;
/*
* Insert a list item into a list. The item will be inserted into the list in
* a position determined by its item value (descending item value order).
*
* @param pxList The list into which the item is to be inserted.
*
* @param pxNewListItem The item that is to be placed in the list.
*
* \page vListInsert vListInsert
* \ingroup LinkedList
*/
void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION;
/*
* Insert a list item into a list. The item will be inserted in a position
* such that it will be the last item within the list returned by multiple
* calls to listGET_OWNER_OF_NEXT_ENTRY.
*
* The list member pxIndex is used to walk through a list. Calling
* listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list.
* Placing an item in a list using vListInsertEnd effectively places the item
* in the list position pointed to by pxIndex. This means that every other
* item within the list will be returned by listGET_OWNER_OF_NEXT_ENTRY before
* the pxIndex parameter again points to the item being inserted.
*
* @param pxList The list into which the item is to be inserted.
*
* @param pxNewListItem The list item to be inserted into the list.
*
* \page vListInsertEnd vListInsertEnd
* \ingroup LinkedList
*/
void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION;
/*
* Remove an item from a list. The list item has a pointer to the list that
* it is in, so only the list item need be passed into the function.
*
* @param uxListRemove The item to be removed. The item will remove itself from
* the list pointed to by it's pxContainer parameter.
*
* @return The number of items that remain in the list after the list item has
* been removed.
*
* \page uxListRemove uxListRemove
* \ingroup LinkedList
*/
UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) PRIVILEGED_FUNCTION;
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,799 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/*
* Message buffers build functionality on top of FreeRTOS stream buffers.
* Whereas stream buffers are used to send a continuous stream of data from one
* task or interrupt to another, message buffers are used to send variable
* length discrete messages from one task or interrupt to another. Their
* implementation is light weight, making them particularly suited for interrupt
* to task and core to core communication scenarios.
*
* ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer
* implementation (so also the message buffer implementation, as message buffers
* are built on top of stream buffers) assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader). It is safe for the
* writer and reader to be different tasks or interrupts, but, unlike other
* FreeRTOS objects, it is not safe to have multiple different writers or
* multiple different readers. If there are to be multiple different writers
* then the application writer must place each call to a writing API function
* (such as xMessageBufferSend()) inside a critical section and set the send
* block time to 0. Likewise, if there are to be multiple different readers
* then the application writer must place each call to a reading API function
* (such as xMessageBufferRead()) inside a critical section and set the receive
* timeout to 0.
*
* Message buffers hold variable length messages. To enable that, when a
* message is written to the message buffer an additional sizeof( size_t ) bytes
* are also written to store the message's length (that happens internally, with
* the API function). sizeof( size_t ) is typically 4 bytes on a 32-bit
* architecture, so writing a 10 byte message to a message buffer on a 32-bit
* architecture will actually reduce the available space in the message buffer
* by 14 bytes (10 byte are used by the message, and 4 bytes to hold the length
* of the message).
*/
#ifndef FREERTOS_MESSAGE_BUFFER_H
#define FREERTOS_MESSAGE_BUFFER_H
/* Message buffers are built onto of stream buffers. */
#include "stream_buffer.h"
#if defined( __cplusplus )
extern "C" {
#endif
/**
* Type by which message buffers are referenced. For example, a call to
* xMessageBufferCreate() returns an MessageBufferHandle_t variable that can
* then be used as a parameter to xMessageBufferSend(), xMessageBufferReceive(),
* etc.
*/
typedef void * MessageBufferHandle_t;
/*-----------------------------------------------------------*/
/**
* message_buffer.h
*
<pre>
MessageBufferHandle_t xMessageBufferCreate( size_t xBufferSizeBytes );
</pre>
*
* Creates a new message buffer using dynamically allocated memory. See
* xMessageBufferCreateStatic() for a version that uses statically allocated
* memory (memory that is allocated at compile time).
*
* configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in
* FreeRTOSConfig.h for xMessageBufferCreate() to be available.
*
* @param xBufferSizeBytes The total number of bytes (not messages) the message
* buffer will be able to hold at any one time. When a message is written to
* the message buffer an additional sizeof( size_t ) bytes are also written to
* store the message's length. sizeof( size_t ) is typically 4 bytes on a
* 32-bit architecture, so on most 32-bit architectures a 10 byte message will
* take up 14 bytes of message buffer space.
*
* @return If NULL is returned, then the message buffer cannot be created
* because there is insufficient heap memory available for FreeRTOS to allocate
* the message buffer data structures and storage area. A non-NULL value being
* returned indicates that the message buffer has been created successfully -
* the returned value should be stored as the handle to the created message
* buffer.
*
* Example use:
<pre>
void vAFunction( void )
{
MessageBufferHandle_t xMessageBuffer;
const size_t xMessageBufferSizeBytes = 100;
// Create a message buffer that can hold 100 bytes. The memory used to hold
// both the message buffer structure and the messages themselves is allocated
// dynamically. Each message added to the buffer consumes an additional 4
// bytes which are used to hold the lengh of the message.
xMessageBuffer = xMessageBufferCreate( xMessageBufferSizeBytes );
if( xMessageBuffer == NULL )
{
// There was not enough heap memory space available to create the
// message buffer.
}
else
{
// The message buffer was created successfully and can now be used.
}
</pre>
* \defgroup xMessageBufferCreate xMessageBufferCreate
* \ingroup MessageBufferManagement
*/
#define xMessageBufferCreate( xBufferSizeBytes ) ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE )
/**
* message_buffer.h
*
<pre>
MessageBufferHandle_t xMessageBufferCreateStatic( size_t xBufferSizeBytes,
uint8_t *pucMessageBufferStorageArea,
StaticMessageBuffer_t *pxStaticMessageBuffer );
</pre>
* Creates a new message buffer using statically allocated memory. See
* xMessageBufferCreate() for a version that uses dynamically allocated memory.
*
* @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the
* pucMessageBufferStorageArea parameter. When a message is written to the
* message buffer an additional sizeof( size_t ) bytes are also written to store
* the message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit
* architecture, so on most 32-bit architecture a 10 byte message will take up
* 14 bytes of message buffer space. The maximum number of bytes that can be
* stored in the message buffer is actually (xBufferSizeBytes - 1).
*
* @param pucMessageBufferStorageArea Must point to a uint8_t array that is at
* least xBufferSizeBytes + 1 big. This is the array to which messages are
* copied when they are written to the message buffer.
*
* @param pxStaticMessageBuffer Must point to a variable of type
* StaticMessageBuffer_t, which will be used to hold the message buffer's data
* structure.
*
* @return If the message buffer is created successfully then a handle to the
* created message buffer is returned. If either pucMessageBufferStorageArea or
* pxStaticmessageBuffer are NULL then NULL is returned.
*
* Example use:
<pre>
// Used to dimension the array used to hold the messages. The available space
// will actually be one less than this, so 999.
#define STORAGE_SIZE_BYTES 1000
// Defines the memory that will actually hold the messages within the message
// buffer.
static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ];
// The variable used to hold the message buffer structure.
StaticMessageBuffer_t xMessageBufferStruct;
void MyFunction( void )
{
MessageBufferHandle_t xMessageBuffer;
xMessageBuffer = xMessageBufferCreateStatic( sizeof( ucBufferStorage ),
ucBufferStorage,
&xMessageBufferStruct );
// As neither the pucMessageBufferStorageArea or pxStaticMessageBuffer
// parameters were NULL, xMessageBuffer will not be NULL, and can be used to
// reference the created message buffer in other message buffer API calls.
// Other code that uses the message buffer can go here.
}
</pre>
* \defgroup xMessageBufferCreateStatic xMessageBufferCreateStatic
* \ingroup MessageBufferManagement
*/
#define xMessageBufferCreateStatic( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer ) ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer )
/**
* message_buffer.h
*
<pre>
size_t xMessageBufferSend( MessageBufferHandle_t xMessageBuffer,
const void *pvTxData,
size_t xDataLengthBytes,
TickType_t xTicksToWait );
<pre>
*
* Sends a discrete message to the message buffer. The message can be any
* length that fits within the buffer's free space, and is copied into the
* buffer.
*
* ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer
* implementation (so also the message buffer implementation, as message buffers
* are built on top of stream buffers) assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader). It is safe for the
* writer and reader to be different tasks or interrupts, but, unlike other
* FreeRTOS objects, it is not safe to have multiple different writers or
* multiple different readers. If there are to be multiple different writers
* then the application writer must place each call to a writing API function
* (such as xMessageBufferSend()) inside a critical section and set the send
* block time to 0. Likewise, if there are to be multiple different readers
* then the application writer must place each call to a reading API function
* (such as xMessageBufferRead()) inside a critical section and set the receive
* block time to 0.
*
* Use xMessageBufferSend() to write to a message buffer from a task. Use
* xMessageBufferSendFromISR() to write to a message buffer from an interrupt
* service routine (ISR).
*
* @param xMessageBuffer The handle of the message buffer to which a message is
* being sent.
*
* @param pvTxData A pointer to the message that is to be copied into the
* message buffer.
*
* @param xDataLengthBytes The length of the message. That is, the number of
* bytes to copy from pvTxData into the message buffer. When a message is
* written to the message buffer an additional sizeof( size_t ) bytes are also
* written to store the message's length. sizeof( size_t ) is typically 4 bytes
* on a 32-bit architecture, so on most 32-bit architecture setting
* xDataLengthBytes to 20 will reduce the free space in the message buffer by 24
* bytes (20 bytes of message data and 4 bytes to hold the message length).
*
* @param xTicksToWait The maximum amount of time the calling task should remain
* in the Blocked state to wait for enough space to become available in the
* message buffer, should the message buffer have insufficient space when
* xMessageBufferSend() is called. The calling task will never block if
* xTicksToWait is zero. The block time is specified in tick periods, so the
* absolute time it represents is dependent on the tick frequency. The macro
* pdMS_TO_TICKS() can be used to convert a time specified in milliseconds into
* a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will cause
* the task to wait indefinitely (without timing out), provided
* INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. Tasks do not use any
* CPU time when they are in the Blocked state.
*
* @return The number of bytes written to the message buffer. If the call to
* xMessageBufferSend() times out before there was enough space to write the
* message into the message buffer then zero is returned. If the call did not
* time out then xDataLengthBytes is returned.
*
* Example use:
<pre>
void vAFunction( MessageBufferHandle_t xMessageBuffer )
{
size_t xBytesSent;
uint8_t ucArrayToSend[] = { 0, 1, 2, 3 };
char *pcStringToSend = "String to send";
const TickType_t x100ms = pdMS_TO_TICKS( 100 );
// Send an array to the message buffer, blocking for a maximum of 100ms to
// wait for enough space to be available in the message buffer.
xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms );
if( xBytesSent != sizeof( ucArrayToSend ) )
{
// The call to xMessageBufferSend() times out before there was enough
// space in the buffer for the data to be written.
}
// Send the string to the message buffer. Return immediately if there is
// not enough space in the buffer.
xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 );
if( xBytesSent != strlen( pcStringToSend ) )
{
// The string could not be added to the message buffer because there was
// not enough free space in the buffer.
}
}
</pre>
* \defgroup xMessageBufferSend xMessageBufferSend
* \ingroup MessageBufferManagement
*/
#define xMessageBufferSend( xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) xStreamBufferSend( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait )
/**
* message_buffer.h
*
<pre>
size_t xMessageBufferSendFromISR( MessageBufferHandle_t xMessageBuffer,
const void *pvTxData,
size_t xDataLengthBytes,
BaseType_t *pxHigherPriorityTaskWoken );
<pre>
*
* Interrupt safe version of the API function that sends a discrete message to
* the message buffer. The message can be any length that fits within the
* buffer's free space, and is copied into the buffer.
*
* ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer
* implementation (so also the message buffer implementation, as message buffers
* are built on top of stream buffers) assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader). It is safe for the
* writer and reader to be different tasks or interrupts, but, unlike other
* FreeRTOS objects, it is not safe to have multiple different writers or
* multiple different readers. If there are to be multiple different writers
* then the application writer must place each call to a writing API function
* (such as xMessageBufferSend()) inside a critical section and set the send
* block time to 0. Likewise, if there are to be multiple different readers
* then the application writer must place each call to a reading API function
* (such as xMessageBufferRead()) inside a critical section and set the receive
* block time to 0.
*
* Use xMessageBufferSend() to write to a message buffer from a task. Use
* xMessageBufferSendFromISR() to write to a message buffer from an interrupt
* service routine (ISR).
*
* @param xMessageBuffer The handle of the message buffer to which a message is
* being sent.
*
* @param pvTxData A pointer to the message that is to be copied into the
* message buffer.
*
* @param xDataLengthBytes The length of the message. That is, the number of
* bytes to copy from pvTxData into the message buffer. When a message is
* written to the message buffer an additional sizeof( size_t ) bytes are also
* written to store the message's length. sizeof( size_t ) is typically 4 bytes
* on a 32-bit architecture, so on most 32-bit architecture setting
* xDataLengthBytes to 20 will reduce the free space in the message buffer by 24
* bytes (20 bytes of message data and 4 bytes to hold the message length).
*
* @param pxHigherPriorityTaskWoken It is possible that a message buffer will
* have a task blocked on it waiting for data. Calling
* xMessageBufferSendFromISR() can make data available, and so cause a task that
* was waiting for data to leave the Blocked state. If calling
* xMessageBufferSendFromISR() causes a task to leave the Blocked state, and the
* unblocked task has a priority higher than the currently executing task (the
* task that was interrupted), then, internally, xMessageBufferSendFromISR()
* will set *pxHigherPriorityTaskWoken to pdTRUE. If
* xMessageBufferSendFromISR() sets this value to pdTRUE, then normally a
* context switch should be performed before the interrupt is exited. This will
* ensure that the interrupt returns directly to the highest priority Ready
* state task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it
* is passed into the function. See the code example below for an example.
*
* @return The number of bytes actually written to the message buffer. If the
* message buffer didn't have enough free space for the message to be stored
* then 0 is returned, otherwise xDataLengthBytes is returned.
*
* Example use:
<pre>
// A message buffer that has already been created.
MessageBufferHandle_t xMessageBuffer;
void vAnInterruptServiceRoutine( void )
{
size_t xBytesSent;
char *pcStringToSend = "String to send";
BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE.
// Attempt to send the string to the message buffer.
xBytesSent = xMessageBufferSendFromISR( xMessageBuffer,
( void * ) pcStringToSend,
strlen( pcStringToSend ),
&xHigherPriorityTaskWoken );
if( xBytesSent != strlen( pcStringToSend ) )
{
// The string could not be added to the message buffer because there was
// not enough free space in the buffer.
}
// If xHigherPriorityTaskWoken was set to pdTRUE inside
// xMessageBufferSendFromISR() then a task that has a priority above the
// priority of the currently executing task was unblocked and a context
// switch should be performed to ensure the ISR returns to the unblocked
// task. In most FreeRTOS ports this is done by simply passing
// xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
// variables value, and perform the context switch if necessary. Check the
// documentation for the port in use for port specific instructions.
taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
}
</pre>
* \defgroup xMessageBufferSendFromISR xMessageBufferSendFromISR
* \ingroup MessageBufferManagement
*/
#define xMessageBufferSendFromISR( xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferSendFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken )
/**
* message_buffer.h
*
<pre>
size_t xMessageBufferReceive( MessageBufferHandle_t xMessageBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
TickType_t xTicksToWait );
</pre>
*
* Receives a discrete message from a message buffer. Messages can be of
* variable length and are copied out of the buffer.
*
* ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer
* implementation (so also the message buffer implementation, as message buffers
* are built on top of stream buffers) assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader). It is safe for the
* writer and reader to be different tasks or interrupts, but, unlike other
* FreeRTOS objects, it is not safe to have multiple different writers or
* multiple different readers. If there are to be multiple different writers
* then the application writer must place each call to a writing API function
* (such as xMessageBufferSend()) inside a critical section and set the send
* block time to 0. Likewise, if there are to be multiple different readers
* then the application writer must place each call to a reading API function
* (such as xMessageBufferRead()) inside a critical section and set the receive
* block time to 0.
*
* Use xMessageBufferReceive() to read from a message buffer from a task. Use
* xMessageBufferReceiveFromISR() to read from a message buffer from an
* interrupt service routine (ISR).
*
* @param xMessageBuffer The handle of the message buffer from which a message
* is being received.
*
* @param pvRxData A pointer to the buffer into which the received message is
* to be copied.
*
* @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData
* parameter. This sets the maximum length of the message that can be received.
* If xBufferLengthBytes is too small to hold the next message then the message
* will be left in the message buffer and 0 will be returned.
*
* @param xTicksToWait The maximum amount of time the task should remain in the
* Blocked state to wait for a message, should the message buffer be empty.
* xMessageBufferReceive() will return immediately if xTicksToWait is zero and
* the message buffer is empty. The block time is specified in tick periods, so
* the absolute time it represents is dependent on the tick frequency. The
* macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds
* into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will
* cause the task to wait indefinitely (without timing out), provided
* INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. Tasks do not use any
* CPU time when they are in the Blocked state.
*
* @return The length, in bytes, of the message read from the message buffer, if
* any. If xMessageBufferReceive() times out before a message became available
* then zero is returned. If the length of the message is greater than
* xBufferLengthBytes then the message will be left in the message buffer and
* zero is returned.
*
* Example use:
<pre>
void vAFunction( MessageBuffer_t xMessageBuffer )
{
uint8_t ucRxData[ 20 ];
size_t xReceivedBytes;
const TickType_t xBlockTime = pdMS_TO_TICKS( 20 );
// Receive the next message from the message buffer. Wait in the Blocked
// state (so not using any CPU processing time) for a maximum of 100ms for
// a message to become available.
xReceivedBytes = xMessageBufferReceive( xMessageBuffer,
( void * ) ucRxData,
sizeof( ucRxData ),
xBlockTime );
if( xReceivedBytes > 0 )
{
// A ucRxData contains a message that is xReceivedBytes long. Process
// the message here....
}
}
</pre>
* \defgroup xMessageBufferReceive xMessageBufferReceive
* \ingroup MessageBufferManagement
*/
#define xMessageBufferReceive( xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) xStreamBufferReceive( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait )
/**
* message_buffer.h
*
<pre>
size_t xMessageBufferReceiveFromISR( MessageBufferHandle_t xMessageBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
BaseType_t *pxHigherPriorityTaskWoken );
</pre>
*
* An interrupt safe version of the API function that receives a discrete
* message from a message buffer. Messages can be of variable length and are
* copied out of the buffer.
*
* ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer
* implementation (so also the message buffer implementation, as message buffers
* are built on top of stream buffers) assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader). It is safe for the
* writer and reader to be different tasks or interrupts, but, unlike other
* FreeRTOS objects, it is not safe to have multiple different writers or
* multiple different readers. If there are to be multiple different writers
* then the application writer must place each call to a writing API function
* (such as xMessageBufferSend()) inside a critical section and set the send
* block time to 0. Likewise, if there are to be multiple different readers
* then the application writer must place each call to a reading API function
* (such as xMessageBufferRead()) inside a critical section and set the receive
* block time to 0.
*
* Use xMessageBufferReceive() to read from a message buffer from a task. Use
* xMessageBufferReceiveFromISR() to read from a message buffer from an
* interrupt service routine (ISR).
*
* @param xMessageBuffer The handle of the message buffer from which a message
* is being received.
*
* @param pvRxData A pointer to the buffer into which the received message is
* to be copied.
*
* @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData
* parameter. This sets the maximum length of the message that can be received.
* If xBufferLengthBytes is too small to hold the next message then the message
* will be left in the message buffer and 0 will be returned.
*
* @param pxHigherPriorityTaskWoken It is possible that a message buffer will
* have a task blocked on it waiting for space to become available. Calling
* xMessageBufferReceiveFromISR() can make space available, and so cause a task
* that is waiting for space to leave the Blocked state. If calling
* xMessageBufferReceiveFromISR() causes a task to leave the Blocked state, and
* the unblocked task has a priority higher than the currently executing task
* (the task that was interrupted), then, internally,
* xMessageBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE.
* If xMessageBufferReceiveFromISR() sets this value to pdTRUE, then normally a
* context switch should be performed before the interrupt is exited. That will
* ensure the interrupt returns directly to the highest priority Ready state
* task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is
* passed into the function. See the code example below for an example.
*
* @return The length, in bytes, of the message read from the message buffer, if
* any.
*
* Example use:
<pre>
// A message buffer that has already been created.
MessageBuffer_t xMessageBuffer;
void vAnInterruptServiceRoutine( void )
{
uint8_t ucRxData[ 20 ];
size_t xReceivedBytes;
BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE.
// Receive the next message from the message buffer.
xReceivedBytes = xMessageBufferReceiveFromISR( xMessageBuffer,
( void * ) ucRxData,
sizeof( ucRxData ),
&xHigherPriorityTaskWoken );
if( xReceivedBytes > 0 )
{
// A ucRxData contains a message that is xReceivedBytes long. Process
// the message here....
}
// If xHigherPriorityTaskWoken was set to pdTRUE inside
// xMessageBufferReceiveFromISR() then a task that has a priority above the
// priority of the currently executing task was unblocked and a context
// switch should be performed to ensure the ISR returns to the unblocked
// task. In most FreeRTOS ports this is done by simply passing
// xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
// variables value, and perform the context switch if necessary. Check the
// documentation for the port in use for port specific instructions.
taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
}
</pre>
* \defgroup xMessageBufferReceiveFromISR xMessageBufferReceiveFromISR
* \ingroup MessageBufferManagement
*/
#define xMessageBufferReceiveFromISR( xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferReceiveFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken )
/**
* message_buffer.h
*
<pre>
void vMessageBufferDelete( MessageBufferHandle_t xMessageBuffer );
</pre>
*
* Deletes a message buffer that was previously created using a call to
* xMessageBufferCreate() or xMessageBufferCreateStatic(). If the message
* buffer was created using dynamic memory (that is, by xMessageBufferCreate()),
* then the allocated memory is freed.
*
* A message buffer handle must not be used after the message buffer has been
* deleted.
*
* @param xMessageBuffer The handle of the message buffer to be deleted.
*
*/
#define vMessageBufferDelete( xMessageBuffer ) vStreamBufferDelete( ( StreamBufferHandle_t ) xMessageBuffer )
/**
* message_buffer.h
<pre>
BaseType_t xMessageBufferIsFull( MessageBufferHandle_t xMessageBuffer ) );
</pre>
*
* Tests to see if a message buffer is full. A message buffer is full if it
* cannot accept any more messages, of any size, until space is made available
* by a message being removed from the message buffer.
*
* @param xMessageBuffer The handle of the message buffer being queried.
*
* @return If the message buffer referenced by xMessageBuffer is full then
* pdTRUE is returned. Otherwise pdFALSE is returned.
*/
#define xMessageBufferIsFull( xMessageBuffer ) xStreamBufferIsFull( ( StreamBufferHandle_t ) xMessageBuffer )
/**
* message_buffer.h
<pre>
BaseType_t xMessageBufferIsEmpty( MessageBufferHandle_t xMessageBuffer ) );
</pre>
*
* Tests to see if a message buffer is empty (does not contain any messages).
*
* @param xMessageBuffer The handle of the message buffer being queried.
*
* @return If the message buffer referenced by xMessageBuffer is empty then
* pdTRUE is returned. Otherwise pdFALSE is returned.
*
*/
#define xMessageBufferIsEmpty( xMessageBuffer ) xStreamBufferIsEmpty( ( StreamBufferHandle_t ) xMessageBuffer )
/**
* message_buffer.h
<pre>
BaseType_t xMessageBufferReset( MessageBufferHandle_t xMessageBuffer );
</pre>
*
* Resets a message buffer to its initial empty state, discarding any message it
* contained.
*
* A message buffer can only be reset if there are no tasks blocked on it.
*
* @param xMessageBuffer The handle of the message buffer being reset.
*
* @return If the message buffer was reset then pdPASS is returned. If the
* message buffer could not be reset because either there was a task blocked on
* the message queue to wait for space to become available, or to wait for a
* a message to be available, then pdFAIL is returned.
*
* \defgroup xMessageBufferReset xMessageBufferReset
* \ingroup MessageBufferManagement
*/
#define xMessageBufferReset( xMessageBuffer ) xStreamBufferReset( ( StreamBufferHandle_t ) xMessageBuffer )
/**
* message_buffer.h
<pre>
size_t xMessageBufferSpaceAvailable( MessageBufferHandle_t xMessageBuffer ) );
</pre>
* Returns the number of bytes of free space in the message buffer.
*
* @param xMessageBuffer The handle of the message buffer being queried.
*
* @return The number of bytes that can be written to the message buffer before
* the message buffer would be full. When a message is written to the message
* buffer an additional sizeof( size_t ) bytes are also written to store the
* message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit
* architecture, so if xMessageBufferSpacesAvailable() returns 10, then the size
* of the largest message that can be written to the message buffer is 6 bytes.
*
* \defgroup xMessageBufferSpaceAvailable xMessageBufferSpaceAvailable
* \ingroup MessageBufferManagement
*/
#define xMessageBufferSpaceAvailable( xMessageBuffer ) xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer )
#define xMessageBufferSpacesAvailable( xMessageBuffer ) xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) /* Corrects typo in original macro name. */
/**
* message_buffer.h
<pre>
size_t xMessageBufferNextLengthBytes( MessageBufferHandle_t xMessageBuffer ) );
</pre>
* Returns the length (in bytes) of the next message in a message buffer.
* Useful if xMessageBufferReceive() returned 0 because the size of the buffer
* passed into xMessageBufferReceive() was too small to hold the next message.
*
* @param xMessageBuffer The handle of the message buffer being queried.
*
* @return The length (in bytes) of the next message in the message buffer, or 0
* if the message buffer is empty.
*
* \defgroup xMessageBufferNextLengthBytes xMessageBufferNextLengthBytes
* \ingroup MessageBufferManagement
*/
#define xMessageBufferNextLengthBytes( xMessageBuffer ) xStreamBufferNextMessageLengthBytes( ( StreamBufferHandle_t ) xMessageBuffer ) PRIVILEGED_FUNCTION;
/**
* message_buffer.h
*
<pre>
BaseType_t xMessageBufferSendCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
</pre>
*
* For advanced users only.
*
* The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when
* data is sent to a message buffer or stream buffer. If there was a task that
* was blocked on the message or stream buffer waiting for data to arrive then
* the sbSEND_COMPLETED() macro sends a notification to the task to remove it
* from the Blocked state. xMessageBufferSendCompletedFromISR() does the same
* thing. It is provided to enable application writers to implement their own
* version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME.
*
* See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for
* additional information.
*
* @param xStreamBuffer The handle of the stream buffer to which data was
* written.
*
* @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be
* initialised to pdFALSE before it is passed into
* xMessageBufferSendCompletedFromISR(). If calling
* xMessageBufferSendCompletedFromISR() removes a task from the Blocked state,
* and the task has a priority above the priority of the currently running task,
* then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a
* context switch should be performed before exiting the ISR.
*
* @return If a task was removed from the Blocked state then pdTRUE is returned.
* Otherwise pdFALSE is returned.
*
* \defgroup xMessageBufferSendCompletedFromISR xMessageBufferSendCompletedFromISR
* \ingroup StreamBufferManagement
*/
#define xMessageBufferSendCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferSendCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken )
/**
* message_buffer.h
*
<pre>
BaseType_t xMessageBufferReceiveCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
</pre>
*
* For advanced users only.
*
* The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when
* data is read out of a message buffer or stream buffer. If there was a task
* that was blocked on the message or stream buffer waiting for data to arrive
* then the sbRECEIVE_COMPLETED() macro sends a notification to the task to
* remove it from the Blocked state. xMessageBufferReceiveCompletedFromISR()
* does the same thing. It is provided to enable application writers to
* implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT
* ANY OTHER TIME.
*
* See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for
* additional information.
*
* @param xStreamBuffer The handle of the stream buffer from which data was
* read.
*
* @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be
* initialised to pdFALSE before it is passed into
* xMessageBufferReceiveCompletedFromISR(). If calling
* xMessageBufferReceiveCompletedFromISR() removes a task from the Blocked state,
* and the task has a priority above the priority of the currently running task,
* then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a
* context switch should be performed before exiting the ISR.
*
* @return If a task was removed from the Blocked state then pdTRUE is returned.
* Otherwise pdFALSE is returned.
*
* \defgroup xMessageBufferReceiveCompletedFromISR xMessageBufferReceiveCompletedFromISR
* \ingroup StreamBufferManagement
*/
#define xMessageBufferReceiveCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferReceiveCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken )
#if defined( __cplusplus )
} /* extern "C" */
#endif
#endif /* !defined( FREERTOS_MESSAGE_BUFFER_H ) */

View File

@ -1,157 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/*
* When the MPU is used the standard (non MPU) API functions are mapped to
* equivalents that start "MPU_", the prototypes for which are defined in this
* header files. This will cause the application code to call the MPU_ version
* which wraps the non-MPU version with privilege promoting then demoting code,
* so the kernel code always runs will full privileges.
*/
#ifndef MPU_PROTOTYPES_H
#define MPU_PROTOTYPES_H
/* MPU versions of tasks.h API functions. */
BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, const char * const pcName, const uint32_t ulStackDepth, void * const pvParameters, UBaseType_t uxPriority, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskStartScheduler( void ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSuspendAll( void ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskResumeAll( void ) FREERTOS_SYSTEM_CALL;
TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL;
char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xTaskGetHandle( const char *pcNameToQuery ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) FREERTOS_SYSTEM_CALL;
TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) FREERTOS_SYSTEM_CALL;
void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL;
TickType_t MPU_xTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskList( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskGetRunTimeStats( char *pcWriteBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
uint32_t MPU_ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskNotifyStateClear( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskIncrementTick( void ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL;
void MPU_vTaskMissedYield( void ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL;
/* MPU versions of queue.h API functions. */
BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
void MPU_vQueueDelete( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) FREERTOS_SYSTEM_CALL;
QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) FREERTOS_SYSTEM_CALL;
QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) FREERTOS_SYSTEM_CALL;
void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcName ) FREERTOS_SYSTEM_CALL;
void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL;
QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL;
QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) FREERTOS_SYSTEM_CALL;
void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL;
/* MPU versions of timers.h API functions. */
TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction ) FREERTOS_SYSTEM_CALL;
TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction, StaticTimer_t *pxTimerBuffer ) FREERTOS_SYSTEM_CALL;
void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
void MPU_vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL;
TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerCreateTimerTask( void ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
/* MPU versions of event_group.h API functions. */
EventGroupHandle_t MPU_xEventGroupCreate( void ) FREERTOS_SYSTEM_CALL;
EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) FREERTOS_SYSTEM_CALL;
EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) FREERTOS_SYSTEM_CALL;
UBaseType_t MPU_uxEventGroupGetNumber( void* xEventGroup ) FREERTOS_SYSTEM_CALL;
/* MPU versions of message/stream_buffer.h API functions. */
size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, const void *pvTxData, size_t xDataLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL;
size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL;
BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL;
StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ) FREERTOS_SYSTEM_CALL;
StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, StaticStreamBuffer_t * const pxStaticStreamBuffer ) FREERTOS_SYSTEM_CALL;
#endif /* MPU_PROTOTYPES_H */

View File

@ -1,186 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#ifndef MPU_WRAPPERS_H
#define MPU_WRAPPERS_H
/* This file redefines API functions to be called through a wrapper macro, but
only for ports that are using the MPU. */
#ifdef portUSING_MPU_WRAPPERS
/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE will be defined when this file is
included from queue.c or task.c to prevent it from having an effect within
those files. */
#ifndef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
/*
* Map standard (non MPU) API functions to equivalents that start
* "MPU_". This will cause the application code to call the MPU_
* version, which wraps the non-MPU version with privilege promoting
* then demoting code, so the kernel code always runs will full
* privileges.
*/
/* Map standard tasks.h API functions to the MPU equivalents. */
#define xTaskCreate MPU_xTaskCreate
#define xTaskCreateStatic MPU_xTaskCreateStatic
#define xTaskCreateRestricted MPU_xTaskCreateRestricted
#define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions
#define vTaskDelete MPU_vTaskDelete
#define vTaskDelay MPU_vTaskDelay
#define vTaskDelayUntil MPU_vTaskDelayUntil
#define xTaskAbortDelay MPU_xTaskAbortDelay
#define uxTaskPriorityGet MPU_uxTaskPriorityGet
#define eTaskGetState MPU_eTaskGetState
#define vTaskGetInfo MPU_vTaskGetInfo
#define vTaskPrioritySet MPU_vTaskPrioritySet
#define vTaskSuspend MPU_vTaskSuspend
#define vTaskResume MPU_vTaskResume
#define vTaskSuspendAll MPU_vTaskSuspendAll
#define xTaskResumeAll MPU_xTaskResumeAll
#define xTaskGetTickCount MPU_xTaskGetTickCount
#define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks
#define pcTaskGetName MPU_pcTaskGetName
#define xTaskGetHandle MPU_xTaskGetHandle
#define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark
#define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2
#define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag
#define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag
#define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer
#define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer
#define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook
#define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle
#define uxTaskGetSystemState MPU_uxTaskGetSystemState
#define vTaskList MPU_vTaskList
#define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats
#define xTaskGetIdleRunTimeCounter MPU_xTaskGetIdleRunTimeCounter
#define xTaskGenericNotify MPU_xTaskGenericNotify
#define xTaskNotifyWait MPU_xTaskNotifyWait
#define ulTaskNotifyTake MPU_ulTaskNotifyTake
#define xTaskNotifyStateClear MPU_xTaskNotifyStateClear
#define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle
#define vTaskSetTimeOutState MPU_vTaskSetTimeOutState
#define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut
#define xTaskGetSchedulerState MPU_xTaskGetSchedulerState
/* Map standard queue.h API functions to the MPU equivalents. */
#define xQueueGenericSend MPU_xQueueGenericSend
#define xQueueReceive MPU_xQueueReceive
#define xQueuePeek MPU_xQueuePeek
#define xQueueSemaphoreTake MPU_xQueueSemaphoreTake
#define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting
#define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable
#define vQueueDelete MPU_vQueueDelete
#define xQueueCreateMutex MPU_xQueueCreateMutex
#define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic
#define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore
#define xQueueCreateCountingSemaphoreStatic MPU_xQueueCreateCountingSemaphoreStatic
#define xQueueGetMutexHolder MPU_xQueueGetMutexHolder
#define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive
#define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive
#define xQueueGenericCreate MPU_xQueueGenericCreate
#define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic
#define xQueueCreateSet MPU_xQueueCreateSet
#define xQueueAddToSet MPU_xQueueAddToSet
#define xQueueRemoveFromSet MPU_xQueueRemoveFromSet
#define xQueueSelectFromSet MPU_xQueueSelectFromSet
#define xQueueGenericReset MPU_xQueueGenericReset
#if( configQUEUE_REGISTRY_SIZE > 0 )
#define vQueueAddToRegistry MPU_vQueueAddToRegistry
#define vQueueUnregisterQueue MPU_vQueueUnregisterQueue
#define pcQueueGetName MPU_pcQueueGetName
#endif
/* Map standard timer.h API functions to the MPU equivalents. */
#define xTimerCreate MPU_xTimerCreate
#define xTimerCreateStatic MPU_xTimerCreateStatic
#define pvTimerGetTimerID MPU_pvTimerGetTimerID
#define vTimerSetTimerID MPU_vTimerSetTimerID
#define xTimerIsTimerActive MPU_xTimerIsTimerActive
#define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle
#define xTimerPendFunctionCall MPU_xTimerPendFunctionCall
#define pcTimerGetName MPU_pcTimerGetName
#define vTimerSetReloadMode MPU_vTimerSetReloadMode
#define xTimerGetPeriod MPU_xTimerGetPeriod
#define xTimerGetExpiryTime MPU_xTimerGetExpiryTime
#define xTimerGenericCommand MPU_xTimerGenericCommand
/* Map standard event_group.h API functions to the MPU equivalents. */
#define xEventGroupCreate MPU_xEventGroupCreate
#define xEventGroupCreateStatic MPU_xEventGroupCreateStatic
#define xEventGroupWaitBits MPU_xEventGroupWaitBits
#define xEventGroupClearBits MPU_xEventGroupClearBits
#define xEventGroupSetBits MPU_xEventGroupSetBits
#define xEventGroupSync MPU_xEventGroupSync
#define vEventGroupDelete MPU_vEventGroupDelete
/* Map standard message/stream_buffer.h API functions to the MPU
equivalents. */
#define xStreamBufferSend MPU_xStreamBufferSend
#define xStreamBufferReceive MPU_xStreamBufferReceive
#define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes
#define vStreamBufferDelete MPU_vStreamBufferDelete
#define xStreamBufferIsFull MPU_xStreamBufferIsFull
#define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty
#define xStreamBufferReset MPU_xStreamBufferReset
#define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable
#define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable
#define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel
#define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate
#define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic
/* Remove the privileged function macro, but keep the PRIVILEGED_DATA
macro so applications can place data in privileged access sections
(useful when using statically allocated objects). */
#define PRIVILEGED_FUNCTION
#define PRIVILEGED_DATA __attribute__((section("privileged_data")))
#define FREERTOS_SYSTEM_CALL
#else /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */
/* Ensure API functions go in the privileged execution section. */
#define PRIVILEGED_FUNCTION __attribute__((section("privileged_functions")))
#define PRIVILEGED_DATA __attribute__((section("privileged_data")))
#define FREERTOS_SYSTEM_CALL __attribute__((section( "freertos_system_calls")))
#endif /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */
#else /* portUSING_MPU_WRAPPERS */
#define PRIVILEGED_FUNCTION
#define PRIVILEGED_DATA
#define FREERTOS_SYSTEM_CALL
#define portUSING_MPU_WRAPPERS 0
#endif /* portUSING_MPU_WRAPPERS */
#endif /* MPU_WRAPPERS_H */

View File

@ -1,181 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/*-----------------------------------------------------------
* Portable layer API. Each function must be defined for each port.
*----------------------------------------------------------*/
#ifndef PORTABLE_H
#define PORTABLE_H
/* Each FreeRTOS port has a unique portmacro.h header file. Originally a
pre-processor definition was used to ensure the pre-processor found the correct
portmacro.h file for the port being used. That scheme was deprecated in favour
of setting the compiler's include path such that it found the correct
portmacro.h file - removing the need for the constant and allowing the
portmacro.h file to be located anywhere in relation to the port being used.
Purely for reasons of backward compatibility the old method is still valid, but
to make it clear that new projects should not use it, support for the port
specific constants has been moved into the deprecated_definitions.h header
file. */
#include "deprecated_definitions.h"
/* If portENTER_CRITICAL is not defined then including deprecated_definitions.h
did not result in a portmacro.h header file being included - and it should be
included here. In this case the path to the correct portmacro.h header file
must be set in the compiler's include path. */
#ifndef portENTER_CRITICAL
#include "portmacro.h"
#endif
#if portBYTE_ALIGNMENT == 32
#define portBYTE_ALIGNMENT_MASK ( 0x001f )
#endif
#if portBYTE_ALIGNMENT == 16
#define portBYTE_ALIGNMENT_MASK ( 0x000f )
#endif
#if portBYTE_ALIGNMENT == 8
#define portBYTE_ALIGNMENT_MASK ( 0x0007 )
#endif
#if portBYTE_ALIGNMENT == 4
#define portBYTE_ALIGNMENT_MASK ( 0x0003 )
#endif
#if portBYTE_ALIGNMENT == 2
#define portBYTE_ALIGNMENT_MASK ( 0x0001 )
#endif
#if portBYTE_ALIGNMENT == 1
#define portBYTE_ALIGNMENT_MASK ( 0x0000 )
#endif
#ifndef portBYTE_ALIGNMENT_MASK
#error "Invalid portBYTE_ALIGNMENT definition"
#endif
#ifndef portNUM_CONFIGURABLE_REGIONS
#define portNUM_CONFIGURABLE_REGIONS 1
#endif
#ifndef portHAS_STACK_OVERFLOW_CHECKING
#define portHAS_STACK_OVERFLOW_CHECKING 0
#endif
#ifndef portARCH_NAME
#define portARCH_NAME NULL
#endif
#ifdef __cplusplus
extern "C" {
#endif
#include "mpu_wrappers.h"
/*
* Setup the stack of a new task so it is ready to be placed under the
* scheduler control. The registers have to be placed on the stack in
* the order that the port expects to find them.
*
*/
#if( portUSING_MPU_WRAPPERS == 1 )
#if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEndOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION;
#else
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION;
#endif
#else
#if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEndOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION;
#else
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION;
#endif
#endif
/* Used by heap_5.c. */
typedef struct HeapRegion
{
uint8_t *pucStartAddress;
size_t xSizeInBytes;
} HeapRegion_t;
/*
* Used to define multiple heap regions for use by heap_5.c. This function
* must be called before any calls to pvPortMalloc() - not creating a task,
* queue, semaphore, mutex, software timer, event group, etc. will result in
* pvPortMalloc being called.
*
* pxHeapRegions passes in an array of HeapRegion_t structures - each of which
* defines a region of memory that can be used as the heap. The array is
* terminated by a HeapRegions_t structure that has a size of 0. The region
* with the lowest start address must appear first in the array.
*/
void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) PRIVILEGED_FUNCTION;
/*
* Map to the memory management routines required for the port.
*/
void *pvPortMalloc( size_t xSize ) PRIVILEGED_FUNCTION;
void vPortFree( void *pv ) PRIVILEGED_FUNCTION;
void vPortInitialiseBlocks( void ) PRIVILEGED_FUNCTION;
size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION;
size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION;
/*
* Setup the hardware ready for the scheduler to take control. This generally
* sets up a tick interrupt and sets timers for the correct tick frequency.
*/
BaseType_t xPortStartScheduler( void ) PRIVILEGED_FUNCTION;
/*
* Undo any hardware/ISR setup that was performed by xPortStartScheduler() so
* the hardware is left in its original condition after the scheduler stops
* executing.
*/
void vPortEndScheduler( void ) PRIVILEGED_FUNCTION;
/*
* The structures and methods of manipulating the MPU are contained within the
* port layer.
*
* Fills the xMPUSettings structure with the memory region information
* contained in xRegions.
*/
#if( portUSING_MPU_WRAPPERS == 1 )
struct xMEMORY_REGION;
void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t ulStackDepth ) PRIVILEGED_FUNCTION;
#endif
#ifdef __cplusplus
}
#endif
#endif /* PORTABLE_H */

View File

@ -1,124 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#ifndef PROJDEFS_H
#define PROJDEFS_H
/*
* Defines the prototype to which task functions must conform. Defined in this
* file to ensure the type is known before portable.h is included.
*/
typedef void (*TaskFunction_t)( void * );
/* Converts a time in milliseconds to a time in ticks. This macro can be
overridden by a macro of the same name defined in FreeRTOSConfig.h in case the
definition here is not suitable for your application. */
#ifndef pdMS_TO_TICKS
#define pdMS_TO_TICKS( xTimeInMs ) ( ( TickType_t ) ( ( ( TickType_t ) ( xTimeInMs ) * ( TickType_t ) configTICK_RATE_HZ ) / ( TickType_t ) 1000 ) )
#endif
#define pdFALSE ( ( BaseType_t ) 0 )
#define pdTRUE ( ( BaseType_t ) 1 )
#define pdPASS ( pdTRUE )
#define pdFAIL ( pdFALSE )
#define errQUEUE_EMPTY ( ( BaseType_t ) 0 )
#define errQUEUE_FULL ( ( BaseType_t ) 0 )
/* FreeRTOS error definitions. */
#define errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ( -1 )
#define errQUEUE_BLOCKED ( -4 )
#define errQUEUE_YIELD ( -5 )
/* Macros used for basic data corruption checks. */
#ifndef configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES
#define configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES 0
#endif
#if( configUSE_16_BIT_TICKS == 1 )
#define pdINTEGRITY_CHECK_VALUE 0x5a5a
#else
#define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5aUL
#endif
/* The following errno values are used by FreeRTOS+ components, not FreeRTOS
itself. */
#define pdFREERTOS_ERRNO_NONE 0 /* No errors */
#define pdFREERTOS_ERRNO_ENOENT 2 /* No such file or directory */
#define pdFREERTOS_ERRNO_EINTR 4 /* Interrupted system call */
#define pdFREERTOS_ERRNO_EIO 5 /* I/O error */
#define pdFREERTOS_ERRNO_ENXIO 6 /* No such device or address */
#define pdFREERTOS_ERRNO_EBADF 9 /* Bad file number */
#define pdFREERTOS_ERRNO_EAGAIN 11 /* No more processes */
#define pdFREERTOS_ERRNO_EWOULDBLOCK 11 /* Operation would block */
#define pdFREERTOS_ERRNO_ENOMEM 12 /* Not enough memory */
#define pdFREERTOS_ERRNO_EACCES 13 /* Permission denied */
#define pdFREERTOS_ERRNO_EFAULT 14 /* Bad address */
#define pdFREERTOS_ERRNO_EBUSY 16 /* Mount device busy */
#define pdFREERTOS_ERRNO_EEXIST 17 /* File exists */
#define pdFREERTOS_ERRNO_EXDEV 18 /* Cross-device link */
#define pdFREERTOS_ERRNO_ENODEV 19 /* No such device */
#define pdFREERTOS_ERRNO_ENOTDIR 20 /* Not a directory */
#define pdFREERTOS_ERRNO_EISDIR 21 /* Is a directory */
#define pdFREERTOS_ERRNO_EINVAL 22 /* Invalid argument */
#define pdFREERTOS_ERRNO_ENOSPC 28 /* No space left on device */
#define pdFREERTOS_ERRNO_ESPIPE 29 /* Illegal seek */
#define pdFREERTOS_ERRNO_EROFS 30 /* Read only file system */
#define pdFREERTOS_ERRNO_EUNATCH 42 /* Protocol driver not attached */
#define pdFREERTOS_ERRNO_EBADE 50 /* Invalid exchange */
#define pdFREERTOS_ERRNO_EFTYPE 79 /* Inappropriate file type or format */
#define pdFREERTOS_ERRNO_ENMFILE 89 /* No more files */
#define pdFREERTOS_ERRNO_ENOTEMPTY 90 /* Directory not empty */
#define pdFREERTOS_ERRNO_ENAMETOOLONG 91 /* File or path name too long */
#define pdFREERTOS_ERRNO_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
#define pdFREERTOS_ERRNO_ENOBUFS 105 /* No buffer space available */
#define pdFREERTOS_ERRNO_ENOPROTOOPT 109 /* Protocol not available */
#define pdFREERTOS_ERRNO_EADDRINUSE 112 /* Address already in use */
#define pdFREERTOS_ERRNO_ETIMEDOUT 116 /* Connection timed out */
#define pdFREERTOS_ERRNO_EINPROGRESS 119 /* Connection already in progress */
#define pdFREERTOS_ERRNO_EALREADY 120 /* Socket already connected */
#define pdFREERTOS_ERRNO_EADDRNOTAVAIL 125 /* Address not available */
#define pdFREERTOS_ERRNO_EISCONN 127 /* Socket is already connected */
#define pdFREERTOS_ERRNO_ENOTCONN 128 /* Socket is not connected */
#define pdFREERTOS_ERRNO_ENOMEDIUM 135 /* No medium inserted */
#define pdFREERTOS_ERRNO_EILSEQ 138 /* An invalid UTF-16 sequence was encountered. */
#define pdFREERTOS_ERRNO_ECANCELED 140 /* Operation canceled. */
/* The following endian values are used by FreeRTOS+ components, not FreeRTOS
itself. */
#define pdFREERTOS_LITTLE_ENDIAN 0
#define pdFREERTOS_BIG_ENDIAN 1
/* Re-defining endian values for generic naming. */
#define pdLITTLE_ENDIAN pdFREERTOS_LITTLE_ENDIAN
#define pdBIG_ENDIAN pdFREERTOS_BIG_ENDIAN
#endif /* PROJDEFS_H */

View File

@ -1,129 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#ifndef STACK_MACROS_H
#define STACK_MACROS_H
/*
* Call the stack overflow hook function if the stack of the task being swapped
* out is currently overflowed, or looks like it might have overflowed in the
* past.
*
* Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check
* the current stack state only - comparing the current top of stack value to
* the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1
* will also cause the last few stack bytes to be checked to ensure the value
* to which the bytes were set when the task was created have not been
* overwritten. Note this second test does not guarantee that an overflowed
* stack will always be recognised.
*/
/*-----------------------------------------------------------*/
#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) )
/* Only the current stack state is to be checked. */
#define taskCHECK_FOR_STACK_OVERFLOW() \
{ \
/* Is the currently saved stack pointer within the stack limit? */ \
if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
}
#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */
/*-----------------------------------------------------------*/
#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) )
/* Only the current stack state is to be checked. */
#define taskCHECK_FOR_STACK_OVERFLOW() \
{ \
\
/* Is the currently saved stack pointer within the stack limit? */ \
if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
}
#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */
/*-----------------------------------------------------------*/
#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) )
#define taskCHECK_FOR_STACK_OVERFLOW() \
{ \
const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \
const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \
\
if( ( pulStack[ 0 ] != ulCheckValue ) || \
( pulStack[ 1 ] != ulCheckValue ) || \
( pulStack[ 2 ] != ulCheckValue ) || \
( pulStack[ 3 ] != ulCheckValue ) ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
}
#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */
/*-----------------------------------------------------------*/
#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) )
#define taskCHECK_FOR_STACK_OVERFLOW() \
{ \
int8_t *pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \
static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE }; \
\
\
pcEndOfStack -= sizeof( ucExpectedStackBytes ); \
\
/* Has the extremity of the task stack ever been written over? */ \
if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \
{ \
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
} \
}
#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */
/*-----------------------------------------------------------*/
/* Remove stack overflow macro if not being used. */
#ifndef taskCHECK_FOR_STACK_OVERFLOW
#define taskCHECK_FOR_STACK_OVERFLOW()
#endif
#endif /* STACK_MACROS_H */

View File

@ -1,855 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/*
* Stream buffers are used to send a continuous stream of data from one task or
* interrupt to another. Their implementation is light weight, making them
* particularly suited for interrupt to task and core to core communication
* scenarios.
*
* ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer
* implementation (so also the message buffer implementation, as message buffers
* are built on top of stream buffers) assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader). It is safe for the
* writer and reader to be different tasks or interrupts, but, unlike other
* FreeRTOS objects, it is not safe to have multiple different writers or
* multiple different readers. If there are to be multiple different writers
* then the application writer must place each call to a writing API function
* (such as xStreamBufferSend()) inside a critical section and set the send
* block time to 0. Likewise, if there are to be multiple different readers
* then the application writer must place each call to a reading API function
* (such as xStreamBufferRead()) inside a critical section section and set the
* receive block time to 0.
*
*/
#ifndef STREAM_BUFFER_H
#define STREAM_BUFFER_H
#if defined( __cplusplus )
extern "C" {
#endif
/**
* Type by which stream buffers are referenced. For example, a call to
* xStreamBufferCreate() returns an StreamBufferHandle_t variable that can
* then be used as a parameter to xStreamBufferSend(), xStreamBufferReceive(),
* etc.
*/
struct StreamBufferDef_t;
typedef struct StreamBufferDef_t * StreamBufferHandle_t;
/**
* message_buffer.h
*
<pre>
StreamBufferHandle_t xStreamBufferCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes );
</pre>
*
* Creates a new stream buffer using dynamically allocated memory. See
* xStreamBufferCreateStatic() for a version that uses statically allocated
* memory (memory that is allocated at compile time).
*
* configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in
* FreeRTOSConfig.h for xStreamBufferCreate() to be available.
*
* @param xBufferSizeBytes The total number of bytes the stream buffer will be
* able to hold at any one time.
*
* @param xTriggerLevelBytes The number of bytes that must be in the stream
* buffer before a task that is blocked on the stream buffer to wait for data is
* moved out of the blocked state. For example, if a task is blocked on a read
* of an empty stream buffer that has a trigger level of 1 then the task will be
* unblocked when a single byte is written to the buffer or the task's block
* time expires. As another example, if a task is blocked on a read of an empty
* stream buffer that has a trigger level of 10 then the task will not be
* unblocked until the stream buffer contains at least 10 bytes or the task's
* block time expires. If a reading task's block time expires before the
* trigger level is reached then the task will still receive however many bytes
* are actually available. Setting a trigger level of 0 will result in a
* trigger level of 1 being used. It is not valid to specify a trigger level
* that is greater than the buffer size.
*
* @return If NULL is returned, then the stream buffer cannot be created
* because there is insufficient heap memory available for FreeRTOS to allocate
* the stream buffer data structures and storage area. A non-NULL value being
* returned indicates that the stream buffer has been created successfully -
* the returned value should be stored as the handle to the created stream
* buffer.
*
* Example use:
<pre>
void vAFunction( void )
{
StreamBufferHandle_t xStreamBuffer;
const size_t xStreamBufferSizeBytes = 100, xTriggerLevel = 10;
// Create a stream buffer that can hold 100 bytes. The memory used to hold
// both the stream buffer structure and the data in the stream buffer is
// allocated dynamically.
xStreamBuffer = xStreamBufferCreate( xStreamBufferSizeBytes, xTriggerLevel );
if( xStreamBuffer == NULL )
{
// There was not enough heap memory space available to create the
// stream buffer.
}
else
{
// The stream buffer was created successfully and can now be used.
}
}
</pre>
* \defgroup xStreamBufferCreate xStreamBufferCreate
* \ingroup StreamBufferManagement
*/
#define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE )
/**
* stream_buffer.h
*
<pre>
StreamBufferHandle_t xStreamBufferCreateStatic( size_t xBufferSizeBytes,
size_t xTriggerLevelBytes,
uint8_t *pucStreamBufferStorageArea,
StaticStreamBuffer_t *pxStaticStreamBuffer );
</pre>
* Creates a new stream buffer using statically allocated memory. See
* xStreamBufferCreate() for a version that uses dynamically allocated memory.
*
* configSUPPORT_STATIC_ALLOCATION must be set to 1 in FreeRTOSConfig.h for
* xStreamBufferCreateStatic() to be available.
*
* @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the
* pucStreamBufferStorageArea parameter.
*
* @param xTriggerLevelBytes The number of bytes that must be in the stream
* buffer before a task that is blocked on the stream buffer to wait for data is
* moved out of the blocked state. For example, if a task is blocked on a read
* of an empty stream buffer that has a trigger level of 1 then the task will be
* unblocked when a single byte is written to the buffer or the task's block
* time expires. As another example, if a task is blocked on a read of an empty
* stream buffer that has a trigger level of 10 then the task will not be
* unblocked until the stream buffer contains at least 10 bytes or the task's
* block time expires. If a reading task's block time expires before the
* trigger level is reached then the task will still receive however many bytes
* are actually available. Setting a trigger level of 0 will result in a
* trigger level of 1 being used. It is not valid to specify a trigger level
* that is greater than the buffer size.
*
* @param pucStreamBufferStorageArea Must point to a uint8_t array that is at
* least xBufferSizeBytes + 1 big. This is the array to which streams are
* copied when they are written to the stream buffer.
*
* @param pxStaticStreamBuffer Must point to a variable of type
* StaticStreamBuffer_t, which will be used to hold the stream buffer's data
* structure.
*
* @return If the stream buffer is created successfully then a handle to the
* created stream buffer is returned. If either pucStreamBufferStorageArea or
* pxStaticstreamBuffer are NULL then NULL is returned.
*
* Example use:
<pre>
// Used to dimension the array used to hold the streams. The available space
// will actually be one less than this, so 999.
#define STORAGE_SIZE_BYTES 1000
// Defines the memory that will actually hold the streams within the stream
// buffer.
static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ];
// The variable used to hold the stream buffer structure.
StaticStreamBuffer_t xStreamBufferStruct;
void MyFunction( void )
{
StreamBufferHandle_t xStreamBuffer;
const size_t xTriggerLevel = 1;
xStreamBuffer = xStreamBufferCreateStatic( sizeof( ucBufferStorage ),
xTriggerLevel,
ucBufferStorage,
&xStreamBufferStruct );
// As neither the pucStreamBufferStorageArea or pxStaticStreamBuffer
// parameters were NULL, xStreamBuffer will not be NULL, and can be used to
// reference the created stream buffer in other stream buffer API calls.
// Other code that uses the stream buffer can go here.
}
</pre>
* \defgroup xStreamBufferCreateStatic xStreamBufferCreateStatic
* \ingroup StreamBufferManagement
*/
#define xStreamBufferCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pucStreamBufferStorageArea, pxStaticStreamBuffer ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer )
/**
* stream_buffer.h
*
<pre>
size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
const void *pvTxData,
size_t xDataLengthBytes,
TickType_t xTicksToWait );
</pre>
*
* Sends bytes to a stream buffer. The bytes are copied into the stream buffer.
*
* ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer
* implementation (so also the message buffer implementation, as message buffers
* are built on top of stream buffers) assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader). It is safe for the
* writer and reader to be different tasks or interrupts, but, unlike other
* FreeRTOS objects, it is not safe to have multiple different writers or
* multiple different readers. If there are to be multiple different writers
* then the application writer must place each call to a writing API function
* (such as xStreamBufferSend()) inside a critical section and set the send
* block time to 0. Likewise, if there are to be multiple different readers
* then the application writer must place each call to a reading API function
* (such as xStreamBufferRead()) inside a critical section and set the receive
* block time to 0.
*
* Use xStreamBufferSend() to write to a stream buffer from a task. Use
* xStreamBufferSendFromISR() to write to a stream buffer from an interrupt
* service routine (ISR).
*
* @param xStreamBuffer The handle of the stream buffer to which a stream is
* being sent.
*
* @param pvTxData A pointer to the buffer that holds the bytes to be copied
* into the stream buffer.
*
* @param xDataLengthBytes The maximum number of bytes to copy from pvTxData
* into the stream buffer.
*
* @param xTicksToWait The maximum amount of time the task should remain in the
* Blocked state to wait for enough space to become available in the stream
* buffer, should the stream buffer contain too little space to hold the
* another xDataLengthBytes bytes. The block time is specified in tick periods,
* so the absolute time it represents is dependent on the tick frequency. The
* macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds
* into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will
* cause the task to wait indefinitely (without timing out), provided
* INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. If a task times out
* before it can write all xDataLengthBytes into the buffer it will still write
* as many bytes as possible. A task does not use any CPU time when it is in
* the blocked state.
*
* @return The number of bytes written to the stream buffer. If a task times
* out before it can write all xDataLengthBytes into the buffer it will still
* write as many bytes as possible.
*
* Example use:
<pre>
void vAFunction( StreamBufferHandle_t xStreamBuffer )
{
size_t xBytesSent;
uint8_t ucArrayToSend[] = { 0, 1, 2, 3 };
char *pcStringToSend = "String to send";
const TickType_t x100ms = pdMS_TO_TICKS( 100 );
// Send an array to the stream buffer, blocking for a maximum of 100ms to
// wait for enough space to be available in the stream buffer.
xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms );
if( xBytesSent != sizeof( ucArrayToSend ) )
{
// The call to xStreamBufferSend() times out before there was enough
// space in the buffer for the data to be written, but it did
// successfully write xBytesSent bytes.
}
// Send the string to the stream buffer. Return immediately if there is not
// enough space in the buffer.
xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 );
if( xBytesSent != strlen( pcStringToSend ) )
{
// The entire string could not be added to the stream buffer because
// there was not enough free space in the buffer, but xBytesSent bytes
// were sent. Could try again to send the remaining bytes.
}
}
</pre>
* \defgroup xStreamBufferSend xStreamBufferSend
* \ingroup StreamBufferManagement
*/
size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
const void *pvTxData,
size_t xDataLengthBytes,
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
const void *pvTxData,
size_t xDataLengthBytes,
BaseType_t *pxHigherPriorityTaskWoken );
</pre>
*
* Interrupt safe version of the API function that sends a stream of bytes to
* the stream buffer.
*
* ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer
* implementation (so also the message buffer implementation, as message buffers
* are built on top of stream buffers) assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader). It is safe for the
* writer and reader to be different tasks or interrupts, but, unlike other
* FreeRTOS objects, it is not safe to have multiple different writers or
* multiple different readers. If there are to be multiple different writers
* then the application writer must place each call to a writing API function
* (such as xStreamBufferSend()) inside a critical section and set the send
* block time to 0. Likewise, if there are to be multiple different readers
* then the application writer must place each call to a reading API function
* (such as xStreamBufferRead()) inside a critical section and set the receive
* block time to 0.
*
* Use xStreamBufferSend() to write to a stream buffer from a task. Use
* xStreamBufferSendFromISR() to write to a stream buffer from an interrupt
* service routine (ISR).
*
* @param xStreamBuffer The handle of the stream buffer to which a stream is
* being sent.
*
* @param pvTxData A pointer to the data that is to be copied into the stream
* buffer.
*
* @param xDataLengthBytes The maximum number of bytes to copy from pvTxData
* into the stream buffer.
*
* @param pxHigherPriorityTaskWoken It is possible that a stream buffer will
* have a task blocked on it waiting for data. Calling
* xStreamBufferSendFromISR() can make data available, and so cause a task that
* was waiting for data to leave the Blocked state. If calling
* xStreamBufferSendFromISR() causes a task to leave the Blocked state, and the
* unblocked task has a priority higher than the currently executing task (the
* task that was interrupted), then, internally, xStreamBufferSendFromISR()
* will set *pxHigherPriorityTaskWoken to pdTRUE. If
* xStreamBufferSendFromISR() sets this value to pdTRUE, then normally a
* context switch should be performed before the interrupt is exited. This will
* ensure that the interrupt returns directly to the highest priority Ready
* state task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it
* is passed into the function. See the example code below for an example.
*
* @return The number of bytes actually written to the stream buffer, which will
* be less than xDataLengthBytes if the stream buffer didn't have enough free
* space for all the bytes to be written.
*
* Example use:
<pre>
// A stream buffer that has already been created.
StreamBufferHandle_t xStreamBuffer;
void vAnInterruptServiceRoutine( void )
{
size_t xBytesSent;
char *pcStringToSend = "String to send";
BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE.
// Attempt to send the string to the stream buffer.
xBytesSent = xStreamBufferSendFromISR( xStreamBuffer,
( void * ) pcStringToSend,
strlen( pcStringToSend ),
&xHigherPriorityTaskWoken );
if( xBytesSent != strlen( pcStringToSend ) )
{
// There was not enough free space in the stream buffer for the entire
// string to be written, ut xBytesSent bytes were written.
}
// If xHigherPriorityTaskWoken was set to pdTRUE inside
// xStreamBufferSendFromISR() then a task that has a priority above the
// priority of the currently executing task was unblocked and a context
// switch should be performed to ensure the ISR returns to the unblocked
// task. In most FreeRTOS ports this is done by simply passing
// xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
// variables value, and perform the context switch if necessary. Check the
// documentation for the port in use for port specific instructions.
taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
}
</pre>
* \defgroup xStreamBufferSendFromISR xStreamBufferSendFromISR
* \ingroup StreamBufferManagement
*/
size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
const void *pvTxData,
size_t xDataLengthBytes,
BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
TickType_t xTicksToWait );
</pre>
*
* Receives bytes from a stream buffer.
*
* ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer
* implementation (so also the message buffer implementation, as message buffers
* are built on top of stream buffers) assumes there is only one task or
* interrupt that will write to the buffer (the writer), and only one task or
* interrupt that will read from the buffer (the reader). It is safe for the
* writer and reader to be different tasks or interrupts, but, unlike other
* FreeRTOS objects, it is not safe to have multiple different writers or
* multiple different readers. If there are to be multiple different writers
* then the application writer must place each call to a writing API function
* (such as xStreamBufferSend()) inside a critical section and set the send
* block time to 0. Likewise, if there are to be multiple different readers
* then the application writer must place each call to a reading API function
* (such as xStreamBufferRead()) inside a critical section and set the receive
* block time to 0.
*
* Use xStreamBufferReceive() to read from a stream buffer from a task. Use
* xStreamBufferReceiveFromISR() to read from a stream buffer from an
* interrupt service routine (ISR).
*
* @param xStreamBuffer The handle of the stream buffer from which bytes are to
* be received.
*
* @param pvRxData A pointer to the buffer into which the received bytes will be
* copied.
*
* @param xBufferLengthBytes The length of the buffer pointed to by the
* pvRxData parameter. This sets the maximum number of bytes to receive in one
* call. xStreamBufferReceive will return as many bytes as possible up to a
* maximum set by xBufferLengthBytes.
*
* @param xTicksToWait The maximum amount of time the task should remain in the
* Blocked state to wait for data to become available if the stream buffer is
* empty. xStreamBufferReceive() will return immediately if xTicksToWait is
* zero. The block time is specified in tick periods, so the absolute time it
* represents is dependent on the tick frequency. The macro pdMS_TO_TICKS() can
* be used to convert a time specified in milliseconds into a time specified in
* ticks. Setting xTicksToWait to portMAX_DELAY will cause the task to wait
* indefinitely (without timing out), provided INCLUDE_vTaskSuspend is set to 1
* in FreeRTOSConfig.h. A task does not use any CPU time when it is in the
* Blocked state.
*
* @return The number of bytes actually read from the stream buffer, which will
* be less than xBufferLengthBytes if the call to xStreamBufferReceive() timed
* out before xBufferLengthBytes were available.
*
* Example use:
<pre>
void vAFunction( StreamBuffer_t xStreamBuffer )
{
uint8_t ucRxData[ 20 ];
size_t xReceivedBytes;
const TickType_t xBlockTime = pdMS_TO_TICKS( 20 );
// Receive up to another sizeof( ucRxData ) bytes from the stream buffer.
// Wait in the Blocked state (so not using any CPU processing time) for a
// maximum of 100ms for the full sizeof( ucRxData ) number of bytes to be
// available.
xReceivedBytes = xStreamBufferReceive( xStreamBuffer,
( void * ) ucRxData,
sizeof( ucRxData ),
xBlockTime );
if( xReceivedBytes > 0 )
{
// A ucRxData contains another xRecievedBytes bytes of data, which can
// be processed here....
}
}
</pre>
* \defgroup xStreamBufferReceive xStreamBufferReceive
* \ingroup StreamBufferManagement
*/
size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
BaseType_t *pxHigherPriorityTaskWoken );
</pre>
*
* An interrupt safe version of the API function that receives bytes from a
* stream buffer.
*
* Use xStreamBufferReceive() to read bytes from a stream buffer from a task.
* Use xStreamBufferReceiveFromISR() to read bytes from a stream buffer from an
* interrupt service routine (ISR).
*
* @param xStreamBuffer The handle of the stream buffer from which a stream
* is being received.
*
* @param pvRxData A pointer to the buffer into which the received bytes are
* copied.
*
* @param xBufferLengthBytes The length of the buffer pointed to by the
* pvRxData parameter. This sets the maximum number of bytes to receive in one
* call. xStreamBufferReceive will return as many bytes as possible up to a
* maximum set by xBufferLengthBytes.
*
* @param pxHigherPriorityTaskWoken It is possible that a stream buffer will
* have a task blocked on it waiting for space to become available. Calling
* xStreamBufferReceiveFromISR() can make space available, and so cause a task
* that is waiting for space to leave the Blocked state. If calling
* xStreamBufferReceiveFromISR() causes a task to leave the Blocked state, and
* the unblocked task has a priority higher than the currently executing task
* (the task that was interrupted), then, internally,
* xStreamBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE.
* If xStreamBufferReceiveFromISR() sets this value to pdTRUE, then normally a
* context switch should be performed before the interrupt is exited. That will
* ensure the interrupt returns directly to the highest priority Ready state
* task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is
* passed into the function. See the code example below for an example.
*
* @return The number of bytes read from the stream buffer, if any.
*
* Example use:
<pre>
// A stream buffer that has already been created.
StreamBuffer_t xStreamBuffer;
void vAnInterruptServiceRoutine( void )
{
uint8_t ucRxData[ 20 ];
size_t xReceivedBytes;
BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE.
// Receive the next stream from the stream buffer.
xReceivedBytes = xStreamBufferReceiveFromISR( xStreamBuffer,
( void * ) ucRxData,
sizeof( ucRxData ),
&xHigherPriorityTaskWoken );
if( xReceivedBytes > 0 )
{
// ucRxData contains xReceivedBytes read from the stream buffer.
// Process the stream here....
}
// If xHigherPriorityTaskWoken was set to pdTRUE inside
// xStreamBufferReceiveFromISR() then a task that has a priority above the
// priority of the currently executing task was unblocked and a context
// switch should be performed to ensure the ISR returns to the unblocked
// task. In most FreeRTOS ports this is done by simply passing
// xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
// variables value, and perform the context switch if necessary. Check the
// documentation for the port in use for port specific instructions.
taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
}
</pre>
* \defgroup xStreamBufferReceiveFromISR xStreamBufferReceiveFromISR
* \ingroup StreamBufferManagement
*/
size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer );
</pre>
*
* Deletes a stream buffer that was previously created using a call to
* xStreamBufferCreate() or xStreamBufferCreateStatic(). If the stream
* buffer was created using dynamic memory (that is, by xStreamBufferCreate()),
* then the allocated memory is freed.
*
* A stream buffer handle must not be used after the stream buffer has been
* deleted.
*
* @param xStreamBuffer The handle of the stream buffer to be deleted.
*
* \defgroup vStreamBufferDelete vStreamBufferDelete
* \ingroup StreamBufferManagement
*/
void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer );
</pre>
*
* Queries a stream buffer to see if it is full. A stream buffer is full if it
* does not have any free space, and therefore cannot accept any more data.
*
* @param xStreamBuffer The handle of the stream buffer being queried.
*
* @return If the stream buffer is full then pdTRUE is returned. Otherwise
* pdFALSE is returned.
*
* \defgroup xStreamBufferIsFull xStreamBufferIsFull
* \ingroup StreamBufferManagement
*/
BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer );
</pre>
*
* Queries a stream buffer to see if it is empty. A stream buffer is empty if
* it does not contain any data.
*
* @param xStreamBuffer The handle of the stream buffer being queried.
*
* @return If the stream buffer is empty then pdTRUE is returned. Otherwise
* pdFALSE is returned.
*
* \defgroup xStreamBufferIsEmpty xStreamBufferIsEmpty
* \ingroup StreamBufferManagement
*/
BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer );
</pre>
*
* Resets a stream buffer to its initial, empty, state. Any data that was in
* the stream buffer is discarded. A stream buffer can only be reset if there
* are no tasks blocked waiting to either send to or receive from the stream
* buffer.
*
* @param xStreamBuffer The handle of the stream buffer being reset.
*
* @return If the stream buffer is reset then pdPASS is returned. If there was
* a task blocked waiting to send to or read from the stream buffer then the
* stream buffer is not reset and pdFAIL is returned.
*
* \defgroup xStreamBufferReset xStreamBufferReset
* \ingroup StreamBufferManagement
*/
BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer );
</pre>
*
* Queries a stream buffer to see how much free space it contains, which is
* equal to the amount of data that can be sent to the stream buffer before it
* is full.
*
* @param xStreamBuffer The handle of the stream buffer being queried.
*
* @return The number of bytes that can be written to the stream buffer before
* the stream buffer would be full.
*
* \defgroup xStreamBufferSpacesAvailable xStreamBufferSpacesAvailable
* \ingroup StreamBufferManagement
*/
size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer );
</pre>
*
* Queries a stream buffer to see how much data it contains, which is equal to
* the number of bytes that can be read from the stream buffer before the stream
* buffer would be empty.
*
* @param xStreamBuffer The handle of the stream buffer being queried.
*
* @return The number of bytes that can be read from the stream buffer before
* the stream buffer would be empty.
*
* \defgroup xStreamBufferBytesAvailable xStreamBufferBytesAvailable
* \ingroup StreamBufferManagement
*/
size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel );
</pre>
*
* A stream buffer's trigger level is the number of bytes that must be in the
* stream buffer before a task that is blocked on the stream buffer to
* wait for data is moved out of the blocked state. For example, if a task is
* blocked on a read of an empty stream buffer that has a trigger level of 1
* then the task will be unblocked when a single byte is written to the buffer
* or the task's block time expires. As another example, if a task is blocked
* on a read of an empty stream buffer that has a trigger level of 10 then the
* task will not be unblocked until the stream buffer contains at least 10 bytes
* or the task's block time expires. If a reading task's block time expires
* before the trigger level is reached then the task will still receive however
* many bytes are actually available. Setting a trigger level of 0 will result
* in a trigger level of 1 being used. It is not valid to specify a trigger
* level that is greater than the buffer size.
*
* A trigger level is set when the stream buffer is created, and can be modified
* using xStreamBufferSetTriggerLevel().
*
* @param xStreamBuffer The handle of the stream buffer being updated.
*
* @param xTriggerLevel The new trigger level for the stream buffer.
*
* @return If xTriggerLevel was less than or equal to the stream buffer's length
* then the trigger level will be updated and pdTRUE is returned. Otherwise
* pdFALSE is returned.
*
* \defgroup xStreamBufferSetTriggerLevel xStreamBufferSetTriggerLevel
* \ingroup StreamBufferManagement
*/
BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
</pre>
*
* For advanced users only.
*
* The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when
* data is sent to a message buffer or stream buffer. If there was a task that
* was blocked on the message or stream buffer waiting for data to arrive then
* the sbSEND_COMPLETED() macro sends a notification to the task to remove it
* from the Blocked state. xStreamBufferSendCompletedFromISR() does the same
* thing. It is provided to enable application writers to implement their own
* version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME.
*
* See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for
* additional information.
*
* @param xStreamBuffer The handle of the stream buffer to which data was
* written.
*
* @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be
* initialised to pdFALSE before it is passed into
* xStreamBufferSendCompletedFromISR(). If calling
* xStreamBufferSendCompletedFromISR() removes a task from the Blocked state,
* and the task has a priority above the priority of the currently running task,
* then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a
* context switch should be performed before exiting the ISR.
*
* @return If a task was removed from the Blocked state then pdTRUE is returned.
* Otherwise pdFALSE is returned.
*
* \defgroup xStreamBufferSendCompletedFromISR xStreamBufferSendCompletedFromISR
* \ingroup StreamBufferManagement
*/
BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
/**
* stream_buffer.h
*
<pre>
BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
</pre>
*
* For advanced users only.
*
* The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when
* data is read out of a message buffer or stream buffer. If there was a task
* that was blocked on the message or stream buffer waiting for data to arrive
* then the sbRECEIVE_COMPLETED() macro sends a notification to the task to
* remove it from the Blocked state. xStreamBufferReceiveCompletedFromISR()
* does the same thing. It is provided to enable application writers to
* implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT
* ANY OTHER TIME.
*
* See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for
* additional information.
*
* @param xStreamBuffer The handle of the stream buffer from which data was
* read.
*
* @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be
* initialised to pdFALSE before it is passed into
* xStreamBufferReceiveCompletedFromISR(). If calling
* xStreamBufferReceiveCompletedFromISR() removes a task from the Blocked state,
* and the task has a priority above the priority of the currently running task,
* then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a
* context switch should be performed before exiting the ISR.
*
* @return If a task was removed from the Blocked state then pdTRUE is returned.
* Otherwise pdFALSE is returned.
*
* \defgroup xStreamBufferReceiveCompletedFromISR xStreamBufferReceiveCompletedFromISR
* \ingroup StreamBufferManagement
*/
BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
/* Functions below here are not part of the public API. */
StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes,
size_t xTriggerLevelBytes,
BaseType_t xIsMessageBuffer ) PRIVILEGED_FUNCTION;
StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes,
size_t xTriggerLevelBytes,
BaseType_t xIsMessageBuffer,
uint8_t * const pucStreamBufferStorageArea,
StaticStreamBuffer_t * const pxStaticStreamBuffer ) PRIVILEGED_FUNCTION;
size_t xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
#if( configUSE_TRACE_FACILITY == 1 )
void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) PRIVILEGED_FUNCTION;
UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
#endif
#if defined( __cplusplus )
}
#endif
#endif /* !defined( STREAM_BUFFER_H ) */

View File

@ -1,198 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#include <stdlib.h>
#include "FreeRTOS.h"
#include "list.h"
/*-----------------------------------------------------------
* PUBLIC LIST API documented in list.h
*----------------------------------------------------------*/
void vListInitialise( List_t * const pxList )
{
/* The list structure contains a list item which is used to mark the
end of the list. To initialise the list the list end is inserted
as the only list entry. */
pxList->pxIndex = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
/* The list end value is the highest possible value in the list to
ensure it remains at the end of the list. */
pxList->xListEnd.xItemValue = portMAX_DELAY;
/* The list end next and previous pointers point to itself so we know
when the list is empty. */
pxList->xListEnd.pxNext = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
pxList->xListEnd.pxPrevious = ( ListItem_t * ) &( pxList->xListEnd );/*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
pxList->uxNumberOfItems = ( UBaseType_t ) 0U;
/* Write known values into the list if
configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList );
listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList );
}
/*-----------------------------------------------------------*/
void vListInitialiseItem( ListItem_t * const pxItem )
{
/* Make sure the list item is not recorded as being on a list. */
pxItem->pxContainer = NULL;
/* Write known values into the list item if
configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem );
listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem );
}
/*-----------------------------------------------------------*/
void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem )
{
ListItem_t * const pxIndex = pxList->pxIndex;
/* Only effective when configASSERT() is also defined, these tests may catch
the list data structures being overwritten in memory. They will not catch
data errors caused by incorrect configuration or use of FreeRTOS. */
listTEST_LIST_INTEGRITY( pxList );
listTEST_LIST_ITEM_INTEGRITY( pxNewListItem );
/* Insert a new list item into pxList, but rather than sort the list,
makes the new list item the last item to be removed by a call to
listGET_OWNER_OF_NEXT_ENTRY(). */
pxNewListItem->pxNext = pxIndex;
pxNewListItem->pxPrevious = pxIndex->pxPrevious;
/* Only used during decision coverage testing. */
mtCOVERAGE_TEST_DELAY();
pxIndex->pxPrevious->pxNext = pxNewListItem;
pxIndex->pxPrevious = pxNewListItem;
/* Remember which list the item is in. */
pxNewListItem->pxContainer = pxList;
( pxList->uxNumberOfItems )++;
}
/*-----------------------------------------------------------*/
void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem )
{
ListItem_t *pxIterator;
const TickType_t xValueOfInsertion = pxNewListItem->xItemValue;
/* Only effective when configASSERT() is also defined, these tests may catch
the list data structures being overwritten in memory. They will not catch
data errors caused by incorrect configuration or use of FreeRTOS. */
listTEST_LIST_INTEGRITY( pxList );
listTEST_LIST_ITEM_INTEGRITY( pxNewListItem );
/* Insert the new list item into the list, sorted in xItemValue order.
If the list already contains a list item with the same item value then the
new list item should be placed after it. This ensures that TCBs which are
stored in ready lists (all of which have the same xItemValue value) get a
share of the CPU. However, if the xItemValue is the same as the back marker
the iteration loop below will not end. Therefore the value is checked
first, and the algorithm slightly modified if necessary. */
if( xValueOfInsertion == portMAX_DELAY )
{
pxIterator = pxList->xListEnd.pxPrevious;
}
else
{
/* *** NOTE ***********************************************************
If you find your application is crashing here then likely causes are
listed below. In addition see https://www.freertos.org/FAQHelp.html for
more tips, and ensure configASSERT() is defined!
https://www.freertos.org/a00110.html#configASSERT
1) Stack overflow -
see https://www.freertos.org/Stacks-and-stack-overflow-checking.html
2) Incorrect interrupt priority assignment, especially on Cortex-M
parts where numerically high priority values denote low actual
interrupt priorities, which can seem counter intuitive. See
https://www.freertos.org/RTOS-Cortex-M3-M4.html and the definition
of configMAX_SYSCALL_INTERRUPT_PRIORITY on
https://www.freertos.org/a00110.html
3) Calling an API function from within a critical section or when
the scheduler is suspended, or calling an API function that does
not end in "FromISR" from an interrupt.
4) Using a queue or semaphore before it has been initialised or
before the scheduler has been started (are interrupts firing
before vTaskStartScheduler() has been called?).
**********************************************************************/
for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. *//*lint !e440 The iterator moves to a different value, not xValueOfInsertion. */
{
/* There is nothing to do here, just iterating to the wanted
insertion position. */
}
}
pxNewListItem->pxNext = pxIterator->pxNext;
pxNewListItem->pxNext->pxPrevious = pxNewListItem;
pxNewListItem->pxPrevious = pxIterator;
pxIterator->pxNext = pxNewListItem;
/* Remember which list the item is in. This allows fast removal of the
item later. */
pxNewListItem->pxContainer = pxList;
( pxList->uxNumberOfItems )++;
}
/*-----------------------------------------------------------*/
UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
{
/* The list item knows which list it is in. Obtain the list from the list
item. */
List_t * const pxList = pxItemToRemove->pxContainer;
pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious;
pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext;
/* Only used during decision coverage testing. */
mtCOVERAGE_TEST_DELAY();
/* Make sure the index is left pointing to a valid item. */
if( pxList->pxIndex == pxItemToRemove )
{
pxList->pxIndex = pxItemToRemove->pxPrevious;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxItemToRemove->pxContainer = NULL;
( pxList->uxNumberOfItems )--;
return pxList->uxNumberOfItems;
}
/*-----------------------------------------------------------*/

View File

@ -1,765 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/*-----------------------------------------------------------
* Implementation of functions defined in portable.h for the ARM CM4F port.
*----------------------------------------------------------*/
/* Scheduler includes. */
#include "FreeRTOS.h"
#include "task.h"
#ifndef __VFP_FP__
#error This port can only be used when the project options are configured to enable hardware floating point support.
#endif
#ifndef configSYSTICK_CLOCK_HZ
#define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ
/* Ensure the SysTick is clocked at the same frequency as the core. */
#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
#else
/* The way the SysTick is clocked is not modified in case it is not the same
as the core. */
#define portNVIC_SYSTICK_CLK_BIT ( 0 )
#endif
/* Constants required to manipulate the core. Registers first... */
#define portNVIC_SYSTICK_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000e010 ) )
#define portNVIC_SYSTICK_LOAD_REG ( * ( ( volatile uint32_t * ) 0xe000e014 ) )
#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( * ( ( volatile uint32_t * ) 0xe000e018 ) )
#define portNVIC_SYSPRI2_REG ( * ( ( volatile uint32_t * ) 0xe000ed20 ) )
/* ...then bits in the registers. */
#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
#define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL )
#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL )
#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL )
/* Constants required to check the validity of an interrupt priority. */
#define portFIRST_USER_INTERRUPT_NUMBER ( 16 )
#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 )
#define portAIRCR_REG ( * ( ( volatile uint32_t * ) 0xE000ED0C ) )
#define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff )
#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 )
#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 )
#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL )
#define portPRIGROUP_SHIFT ( 8UL )
/* Masks off all bits but the VECTACTIVE bits in the ICSR register. */
#define portVECTACTIVE_MASK ( 0xFFUL )
/* Constants required to manipulate the VFP. */
#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating point context control register. */
#define portASPEN_AND_LSPEN_BITS ( 0x3UL << 30UL )
/* Constants required to set up the initial stack. */
#define portINITIAL_XPSR ( 0x01000000 )
#define portINITIAL_EXC_RETURN ( 0xfffffffd )
/* The systick is a 24-bit counter. */
#define portMAX_24_BIT_NUMBER ( 0xffffffUL )
/* For strict compliance with the Cortex-M spec the task start address should
have bit-0 clear, as it is loaded into the PC on exit from an ISR. */
#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL )
/* A fiddle factor to estimate the number of SysTick counts that would have
occurred while the SysTick counter is stopped during tickless idle
calculations. */
#define portMISSED_COUNTS_FACTOR ( 45UL )
/* Let the user override the pre-loading of the initial LR with the address of
prvTaskExitError() in case it messes up unwinding of the stack in the
debugger. */
#ifdef configTASK_RETURN_ADDRESS
#define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
#else
#define portTASK_RETURN_ADDRESS prvTaskExitError
#endif
/*
* Setup the timer to generate the tick interrupts. The implementation in this
* file is weak to allow application writers to change the timer used to
* generate the tick interrupt.
*/
void vPortSetupTimerInterrupt( void );
/*
* Exception handlers.
*/
void xPortPendSVHandler( void ) __attribute__ (( naked ));
void xPortSysTickHandler( void );
void vPortSVCHandler( void ) __attribute__ (( naked ));
/*
* Start first task is a separate function so it can be tested in isolation.
*/
static void prvPortStartFirstTask( void ) __attribute__ (( naked ));
/*
* Function to enable the VFP.
*/
static void vPortEnableVFP( void ) __attribute__ (( naked ));
/*
* Used to catch tasks that attempt to return from their implementing function.
*/
static void prvTaskExitError( void );
/*-----------------------------------------------------------*/
/* Each task maintains its own interrupt status in the critical nesting
variable. */
static UBaseType_t uxCriticalNesting = 0xaaaaaaaa;
/*
* The number of SysTick increments that make up one tick period.
*/
#if( configUSE_TICKLESS_IDLE == 1 )
static uint32_t ulTimerCountsForOneTick = 0;
#endif /* configUSE_TICKLESS_IDLE */
/*
* The maximum number of tick periods that can be suppressed is limited by the
* 24 bit resolution of the SysTick timer.
*/
#if( configUSE_TICKLESS_IDLE == 1 )
static uint32_t xMaximumPossibleSuppressedTicks = 0;
#endif /* configUSE_TICKLESS_IDLE */
/*
* Compensate for the CPU cycles that pass while the SysTick is stopped (low
* power functionality only.
*/
#if( configUSE_TICKLESS_IDLE == 1 )
static uint32_t ulStoppedTimerCompensation = 0;
#endif /* configUSE_TICKLESS_IDLE */
/*
* Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
* FreeRTOS API functions are not called from interrupts that have been assigned
* a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
*/
#if( configASSERT_DEFINED == 1 )
static uint8_t ucMaxSysCallPriority = 0;
static uint32_t ulMaxPRIGROUPValue = 0;
static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * const ) portNVIC_IP_REGISTERS_OFFSET_16;
#endif /* configASSERT_DEFINED */
/*-----------------------------------------------------------*/
/*
* See header file for description.
*/
StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )
{
/* Simulate the stack frame as it would be created by a context switch
interrupt. */
/* Offset added to account for the way the MCU uses the stack on entry/exit
of interrupts, and to ensure alignment. */
pxTopOfStack--;
*pxTopOfStack = portINITIAL_XPSR; /* xPSR */
pxTopOfStack--;
*pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */
pxTopOfStack--;
*pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
/* Save code space by skipping register initialisation. */
pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
*pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
/* A save method is being used that requires each task to maintain its
own exec return value. */
pxTopOfStack--;
*pxTopOfStack = portINITIAL_EXC_RETURN;
pxTopOfStack -= 8; /* R11, R10, R9, R8, R7, R6, R5 and R4. */
return pxTopOfStack;
}
/*-----------------------------------------------------------*/
static void prvTaskExitError( void )
{
volatile uint32_t ulDummy = 0;
/* A function that implements a task must not exit or attempt to return to
its caller as there is nothing to return to. If a task wants to exit it
should instead call vTaskDelete( NULL ).
Artificially force an assert() to be triggered if configASSERT() is
defined, then stop here so application writers can catch the error. */
configASSERT( uxCriticalNesting == ~0UL );
portDISABLE_INTERRUPTS();
while( ulDummy == 0 )
{
/* This file calls prvTaskExitError() after the scheduler has been
started to remove a compiler warning about the function being defined
but never called. ulDummy is used purely to quieten other warnings
about code appearing after this function is called - making ulDummy
volatile makes the compiler think the function could return and
therefore not output an 'unreachable code' warning for code that appears
after it. */
}
}
/*-----------------------------------------------------------*/
void vPortSVCHandler( void )
{
__asm volatile (
" ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */
" ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */
" ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */
" ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */
" msr psp, r0 \n" /* Restore the task stack pointer. */
" isb \n"
" mov r0, #0 \n"
" msr basepri, r0 \n"
" bx r14 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst2: .word pxCurrentTCB \n"
);
}
/*-----------------------------------------------------------*/
static void prvPortStartFirstTask( void )
{
/* Start the first task. This also clears the bit that indicates the FPU is
in use in case the FPU was used before the scheduler was started - which
would otherwise result in the unnecessary leaving of space in the SVC stack
for lazy saving of FPU registers. */
__asm volatile(
" ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
" ldr r0, [r0] \n"
" ldr r0, [r0] \n"
" msr msp, r0 \n" /* Set the msp back to the start of the stack. */
" mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */
" msr control, r0 \n"
" cpsie i \n" /* Globally enable interrupts. */
" cpsie f \n"
" dsb \n"
" isb \n"
" svc 0 \n" /* System call to start first task. */
" nop \n"
);
}
/*-----------------------------------------------------------*/
/*
* See header file for description.
*/
BaseType_t xPortStartScheduler( void )
{
/* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0.
See http://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY );
#if( configASSERT_DEFINED == 1 )
{
volatile uint32_t ulOriginalPriority;
volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER );
volatile uint8_t ucMaxPriorityValue;
/* Determine the maximum priority from which ISR safe FreeRTOS API
functions can be called. ISR safe functions are those that end in
"FromISR". FreeRTOS maintains separate thread and ISR API functions to
ensure interrupt entry is as fast and simple as possible.
Save the interrupt priority value that is about to be clobbered. */
ulOriginalPriority = *pucFirstUserPriorityRegister;
/* Determine the number of priority bits available. First write to all
possible bits. */
*pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
/* Read the value back to see how many bits stuck. */
ucMaxPriorityValue = *pucFirstUserPriorityRegister;
/* Use the same mask on the maximum system call priority. */
ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
/* Calculate the maximum acceptable priority group value for the number
of bits read back. */
ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS;
while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
{
ulMaxPRIGROUPValue--;
ucMaxPriorityValue <<= ( uint8_t ) 0x01;
}
#ifdef __NVIC_PRIO_BITS
{
/* Check the CMSIS configuration that defines the number of
priority bits matches the number of priority bits actually queried
from the hardware. */
configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS );
}
#endif
#ifdef configPRIO_BITS
{
/* Check the FreeRTOS configuration that defines the number of
priority bits matches the number of priority bits actually queried
from the hardware. */
configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS );
}
#endif
/* Shift the priority group value back to its position within the AIRCR
register. */
ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
/* Restore the clobbered interrupt priority register to its original
value. */
*pucFirstUserPriorityRegister = ulOriginalPriority;
}
#endif /* conifgASSERT_DEFINED */
/* Make PendSV and SysTick the lowest priority interrupts. */
portNVIC_SYSPRI2_REG |= portNVIC_PENDSV_PRI;
portNVIC_SYSPRI2_REG |= portNVIC_SYSTICK_PRI;
/* Start the timer that generates the tick ISR. Interrupts are disabled
here already. */
vPortSetupTimerInterrupt();
/* Initialise the critical nesting count ready for the first task. */
uxCriticalNesting = 0;
/* Ensure the VFP is enabled - it should be anyway. */
vPortEnableVFP();
/* Lazy save always. */
*( portFPCCR ) |= portASPEN_AND_LSPEN_BITS;
/* Start the first task. */
prvPortStartFirstTask();
/* Should never get here as the tasks will now be executing! Call the task
exit error function to prevent compiler warnings about a static function
not being called in the case that the application writer overrides this
functionality by defining configTASK_RETURN_ADDRESS. Call
vTaskSwitchContext() so link time optimisation does not remove the
symbol. */
vTaskSwitchContext();
prvTaskExitError();
/* Should not get here! */
return 0;
}
/*-----------------------------------------------------------*/
void vPortEndScheduler( void )
{
/* Not implemented in ports where there is nothing to return to.
Artificially force an assert. */
configASSERT( uxCriticalNesting == 1000UL );
}
/*-----------------------------------------------------------*/
void vPortEnterCritical( void )
{
portDISABLE_INTERRUPTS();
uxCriticalNesting++;
/* This is not the interrupt safe version of the enter critical function so
assert() if it is being called from an interrupt context. Only API
functions that end in "FromISR" can be used in an interrupt. Only assert if
the critical nesting count is 1 to protect against recursive calls if the
assert function also uses a critical section. */
if( uxCriticalNesting == 1 )
{
configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 );
}
}
/*-----------------------------------------------------------*/
void vPortExitCritical( void )
{
configASSERT( uxCriticalNesting );
uxCriticalNesting--;
if( uxCriticalNesting == 0 )
{
portENABLE_INTERRUPTS();
}
}
/*-----------------------------------------------------------*/
void xPortPendSVHandler( void )
{
/* This is a naked function. */
__asm volatile
(
" mrs r0, psp \n"
" isb \n"
" \n"
" ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */
" ldr r2, [r3] \n"
" \n"
" tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */
" it eq \n"
" vstmdbeq r0!, {s16-s31} \n"
" \n"
" stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */
" str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */
" \n"
" stmdb sp!, {r0, r3} \n"
" mov r0, %0 \n"
" cpsid i \n" /* Errata workaround. */
" msr basepri, r0 \n"
" dsb \n"
" isb \n"
" cpsie i \n" /* Errata workaround. */
" bl vTaskSwitchContext \n"
" mov r0, #0 \n"
" msr basepri, r0 \n"
" ldmia sp!, {r0, r3} \n"
" \n"
" ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */
" ldr r0, [r1] \n"
" \n"
" ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */
" \n"
" tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */
" it eq \n"
" vldmiaeq r0!, {s16-s31} \n"
" \n"
" msr psp, r0 \n"
" isb \n"
" \n"
#ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata workaround. */
#if WORKAROUND_PMU_CM001 == 1
" push { r14 } \n"
" pop { pc } \n"
#endif
#endif
" \n"
" bx r14 \n"
" \n"
" .align 4 \n"
"pxCurrentTCBConst: .word pxCurrentTCB \n"
::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY)
);
}
/*-----------------------------------------------------------*/
void xPortSysTickHandler( void )
{
/* The SysTick runs at the lowest interrupt priority, so when this interrupt
executes all interrupts must be unmasked. There is therefore no need to
save and then restore the interrupt mask value as its value is already
known. */
portDISABLE_INTERRUPTS();
{
/* Increment the RTOS tick. */
if( xTaskIncrementTick() != pdFALSE )
{
/* A context switch is required. Context switching is performed in
the PendSV interrupt. Pend the PendSV interrupt. */
portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
}
}
portENABLE_INTERRUPTS();
}
/*-----------------------------------------------------------*/
#if( configUSE_TICKLESS_IDLE == 1 )
__attribute__((weak)) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
{
uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements;
TickType_t xModifiableIdleTime;
/* Make sure the SysTick reload value does not overflow the counter. */
if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
{
xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
}
/* Stop the SysTick momentarily. The time the SysTick is stopped for
is accounted for as best it can be, but using the tickless mode will
inevitably result in some tiny drift of the time maintained by the
kernel with respect to calendar time. */
portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT;
/* Calculate the reload value required to wait xExpectedIdleTime
tick periods. -1 is used because this code will execute part way
through one of the tick periods. */
ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
if( ulReloadValue > ulStoppedTimerCompensation )
{
ulReloadValue -= ulStoppedTimerCompensation;
}
/* Enter a critical section but don't use the taskENTER_CRITICAL()
method as that will mask interrupts that should exit sleep mode. */
__asm volatile( "cpsid i" ::: "memory" );
__asm volatile( "dsb" );
__asm volatile( "isb" );
/* If a context switch is pending or a task is waiting for the scheduler
to be unsuspended then abandon the low power entry. */
if( eTaskConfirmSleepModeStatus() == eAbortSleep )
{
/* Restart from whatever is left in the count register to complete
this tick period. */
portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG;
/* Restart SysTick. */
portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
/* Reset the reload register to the value required for normal tick
periods. */
portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
/* Re-enable interrupts - see comments above the cpsid instruction()
above. */
__asm volatile( "cpsie i" ::: "memory" );
}
else
{
/* Set the new reload value. */
portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
/* Clear the SysTick count flag and set the count value back to
zero. */
portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
/* Restart SysTick. */
portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
/* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
set its parameter to 0 to indicate that its implementation contains
its own wait for interrupt or wait for event instruction, and so wfi
should not be executed again. However, the original expected idle
time variable must remain unmodified, so a copy is taken. */
xModifiableIdleTime = xExpectedIdleTime;
configPRE_SLEEP_PROCESSING( &xModifiableIdleTime );
if( xModifiableIdleTime > 0 )
{
__asm volatile( "dsb" ::: "memory" );
__asm volatile( "wfi" );
__asm volatile( "isb" );
}
configPOST_SLEEP_PROCESSING( &xExpectedIdleTime );
/* Re-enable interrupts to allow the interrupt that brought the MCU
out of sleep mode to execute immediately. see comments above
__disable_interrupt() call above. */
__asm volatile( "cpsie i" ::: "memory" );
__asm volatile( "dsb" );
__asm volatile( "isb" );
/* Disable interrupts again because the clock is about to be stopped
and interrupts that execute while the clock is stopped will increase
any slippage between the time maintained by the RTOS and calendar
time. */
__asm volatile( "cpsid i" ::: "memory" );
__asm volatile( "dsb" );
__asm volatile( "isb" );
/* Disable the SysTick clock without reading the
portNVIC_SYSTICK_CTRL_REG register to ensure the
portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
the time the SysTick is stopped for is accounted for as best it can
be, but using the tickless mode will inevitably result in some tiny
drift of the time maintained by the kernel with respect to calendar
time*/
portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT );
/* Determine if the SysTick clock has already counted to zero and
been set back to the current reload value (the reload back being
correct for the entire expected idle time) or if the SysTick is yet
to count to zero (in which case an interrupt other than the SysTick
must have brought the system out of sleep mode). */
if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
{
uint32_t ulCalculatedLoadValue;
/* The tick interrupt is already pending, and the SysTick count
reloaded with ulReloadValue. Reset the
portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick
period. */
ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
/* Don't allow a tiny value, or values that have somehow
underflowed because the post sleep hook did something
that took too long. */
if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
{
ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
}
portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
/* As the pending tick will be processed as soon as this
function exits, the tick value maintained by the tick is stepped
forward by one less than the time spent waiting. */
ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
}
else
{
/* Something other than the tick interrupt ended the sleep.
Work out how long the sleep lasted rounded to complete tick
periods (not the ulReload value which accounted for part
ticks). */
ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG;
/* How many complete tick periods passed while the processor
was waiting? */
ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
/* The reload value is set to whatever fraction of a single tick
period remains. */
portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
}
/* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG
again, then set portNVIC_SYSTICK_LOAD_REG back to its standard
value. */
portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
vTaskStepTick( ulCompleteTickPeriods );
portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
/* Exit with interrpts enabled. */
__asm volatile( "cpsie i" ::: "memory" );
}
}
#endif /* #if configUSE_TICKLESS_IDLE */
/*-----------------------------------------------------------*/
/*
* Setup the systick timer to generate the tick interrupts at the required
* frequency.
*/
__attribute__(( weak )) void vPortSetupTimerInterrupt( void )
{
/* Calculate the constants required to configure the tick interrupt. */
#if( configUSE_TICKLESS_IDLE == 1 )
{
ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
}
#endif /* configUSE_TICKLESS_IDLE */
/* Stop and clear the SysTick. */
portNVIC_SYSTICK_CTRL_REG = 0UL;
portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
/* Configure SysTick to interrupt at the requested rate. */
portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT );
}
/*-----------------------------------------------------------*/
/* This is a naked function. */
static void vPortEnableVFP( void )
{
__asm volatile
(
" ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */
" ldr r1, [r0] \n"
" \n"
" orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */
" str r1, [r0] \n"
" bx r14 "
);
}
/*-----------------------------------------------------------*/
#if( configASSERT_DEFINED == 1 )
void vPortValidateInterruptPriority( void )
{
uint32_t ulCurrentInterrupt;
uint8_t ucCurrentPriority;
/* Obtain the number of the currently executing interrupt. */
__asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" );
/* Is the interrupt number a user defined interrupt? */
if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
{
/* Look up the interrupt's priority. */
ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
/* The following assertion will fail if a service routine (ISR) for
an interrupt that has been assigned a priority above
configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
function. ISR safe FreeRTOS API functions must *only* be called
from interrupts that have been assigned a priority at or below
configMAX_SYSCALL_INTERRUPT_PRIORITY.
Numerically low interrupt priority numbers represent logically high
interrupt priorities, therefore the priority of the interrupt must
be set to a value equal to or numerically *higher* than
configMAX_SYSCALL_INTERRUPT_PRIORITY.
Interrupts that use the FreeRTOS API must not be left at their
default priority of zero as that is the highest possible priority,
which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
and therefore also guaranteed to be invalid.
FreeRTOS maintains separate thread and ISR API functions to ensure
interrupt entry is as fast and simple as possible.
The following links provide detailed information:
http://www.freertos.org/RTOS-Cortex-M3-M4.html
http://www.freertos.org/FAQHelp.html */
configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
}
/* Priority grouping: The interrupt controller (NVIC) allows the bits
that define each interrupt's priority to be split between bits that
define the interrupt's pre-emption priority bits and bits that define
the interrupt's sub-priority. For simplicity all bits must be defined
to be pre-emption priority bits. The following assertion will fail if
this is not the case (if some bits represent a sub-priority).
If the application only uses CMSIS libraries for interrupt
configuration then the correct setting can be achieved on all Cortex-M
devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
scheduler. Note however that some vendor specific peripheral libraries
assume a non-zero priority group setting, in which cases using a value
of zero will result in unpredictable behaviour. */
configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
}
#endif /* configASSERT_DEFINED */

View File

@ -1,247 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#ifndef PORTMACRO_H
#define PORTMACRO_H
#ifdef __cplusplus
extern "C" {
#endif
/*-----------------------------------------------------------
* Port specific definitions.
*
* The settings in this file configure FreeRTOS correctly for the
* given hardware and compiler.
*
* These settings should not be altered.
*-----------------------------------------------------------
*/
/* Type definitions. */
#define portCHAR char
#define portFLOAT float
#define portDOUBLE double
#define portLONG long
#define portSHORT short
#define portSTACK_TYPE uint32_t
#define portBASE_TYPE long
typedef portSTACK_TYPE StackType_t;
typedef long BaseType_t;
typedef unsigned long UBaseType_t;
#if( configUSE_16_BIT_TICKS == 1 )
typedef uint16_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffff
#else
typedef uint32_t TickType_t;
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do
not need to be guarded with a critical section. */
#define portTICK_TYPE_IS_ATOMIC 1
#endif
/*-----------------------------------------------------------*/
/* Architecture specifics. */
#define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 8
/*-----------------------------------------------------------*/
/* Scheduler utilities. */
#define portYIELD() \
{ \
/* Set a PendSV to request a context switch. */ \
portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; \
\
/* Barriers are normally not required but do ensure the code is completely \
within the specified behaviour for the architecture. */ \
__asm volatile( "dsb" ::: "memory" ); \
__asm volatile( "isb" ); \
}
#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired != pdFALSE ) portYIELD()
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
/* Critical section management. */
extern void vPortEnterCritical( void );
extern void vPortExitCritical( void );
#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortRaiseBASEPRI()
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortSetBASEPRI(x)
#define portDISABLE_INTERRUPTS() vPortRaiseBASEPRI()
#define portENABLE_INTERRUPTS() vPortSetBASEPRI(0)
#define portENTER_CRITICAL() vPortEnterCritical()
#define portEXIT_CRITICAL() vPortExitCritical()
/*-----------------------------------------------------------*/
/* Task function macros as described on the FreeRTOS.org WEB site. These are
not necessary for to use this port. They are defined so the common demo files
(which build with all the ports) will build. */
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
/*-----------------------------------------------------------*/
/* Tickless idle/low power functionality. */
#ifndef portSUPPRESS_TICKS_AND_SLEEP
extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime );
#define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime )
#endif
/*-----------------------------------------------------------*/
/* Architecture specific optimisations. */
#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
#endif
#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
/* Generic helper function. */
__attribute__( ( always_inline ) ) static inline uint8_t ucPortCountLeadingZeros( uint32_t ulBitmap )
{
uint8_t ucReturn;
__asm volatile ( "clz %0, %1" : "=r" ( ucReturn ) : "r" ( ulBitmap ) : "memory" );
return ucReturn;
}
/* Check the configuration. */
#if( configMAX_PRIORITIES > 32 )
#error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 difference priorities as tasks that share a priority will time slice.
#endif
/* Store/clear the ready priorities in a bit map. */
#define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) )
#define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) )
/*-----------------------------------------------------------*/
#define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) ucPortCountLeadingZeros( ( uxReadyPriorities ) ) )
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
/*-----------------------------------------------------------*/
#ifdef configASSERT
void vPortValidateInterruptPriority( void );
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority()
#endif
/* portNOP() is not required by this port. */
#define portNOP()
#define portINLINE __inline
#ifndef portFORCE_INLINE
#define portFORCE_INLINE inline __attribute__(( always_inline))
#endif
portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void )
{
uint32_t ulCurrentInterrupt;
BaseType_t xReturn;
/* Obtain the number of the currently executing interrupt. */
__asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" );
if( ulCurrentInterrupt == 0 )
{
xReturn = pdFALSE;
}
else
{
xReturn = pdTRUE;
}
return xReturn;
}
/*-----------------------------------------------------------*/
portFORCE_INLINE static void vPortRaiseBASEPRI( void )
{
uint32_t ulNewBASEPRI;
__asm volatile
(
" mov %0, %1 \n" \
" cpsid i \n" \
" msr basepri, %0 \n" \
" isb \n" \
" dsb \n" \
" cpsie i \n" \
:"=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
}
/*-----------------------------------------------------------*/
portFORCE_INLINE static uint32_t ulPortRaiseBASEPRI( void )
{
uint32_t ulOriginalBASEPRI, ulNewBASEPRI;
__asm volatile
(
" mrs %0, basepri \n" \
" mov %1, %2 \n" \
" cpsid i \n" \
" msr basepri, %1 \n" \
" isb \n" \
" dsb \n" \
" cpsie i \n" \
:"=r" (ulOriginalBASEPRI), "=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory"
);
/* This return will not be reached but is necessary to prevent compiler
warnings. */
return ulOriginalBASEPRI;
}
/*-----------------------------------------------------------*/
portFORCE_INLINE static void vPortSetBASEPRI( uint32_t ulNewMaskValue )
{
__asm volatile
(
" msr basepri, %0 " :: "r" ( ulNewMaskValue ) : "memory"
);
}
/*-----------------------------------------------------------*/
#define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" )
#ifdef __cplusplus
}
#endif
#endif /* PORTMACRO_H */

View File

@ -1,436 +0,0 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/*
* A sample implementation of pvPortMalloc() and vPortFree() that combines
* (coalescences) adjacent memory blocks as they are freed, and in so doing
* limits memory fragmentation.
*
* See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the
* memory management pages of http://www.FreeRTOS.org for more information.
*/
#include <stdlib.h>
/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
all the API functions to use the MPU wrappers. That should only be done when
task.h is included from an application file. */
#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#include "FreeRTOS.h"
#include "task.h"
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#if( configSUPPORT_DYNAMIC_ALLOCATION == 0 )
#error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
#endif
/* Block sizes must not get too small. */
#define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
/* Assumes 8bit bytes! */
#define heapBITS_PER_BYTE ( ( size_t ) 8 )
/* Allocate the memory for the heap. */
#if( configAPPLICATION_ALLOCATED_HEAP == 1 )
/* The application writer has already defined the array used for the RTOS
heap - probably so it can be placed in a special segment or address. */
extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
#else
static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
#endif /* configAPPLICATION_ALLOCATED_HEAP */
/* Define the linked list structure. This is used to link free blocks in order
of their memory address. */
typedef struct A_BLOCK_LINK
{
struct A_BLOCK_LINK *pxNextFreeBlock; /*<< The next free block in the list. */
size_t xBlockSize; /*<< The size of the free block. */
} BlockLink_t;
/*-----------------------------------------------------------*/
/*
* Inserts a block of memory that is being freed into the correct position in
* the list of free memory blocks. The block being freed will be merged with
* the block in front it and/or the block behind it if the memory blocks are
* adjacent to each other.
*/
static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert );
/*
* Called automatically to setup the required heap structures the first time
* pvPortMalloc() is called.
*/
static void prvHeapInit( void );
/*-----------------------------------------------------------*/
/* The size of the structure placed at the beginning of each allocated memory
block must by correctly byte aligned. */
static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
/* Create a couple of list links to mark the start and end of the list. */
static BlockLink_t xStart, *pxEnd = NULL;
/* Keeps track of the number of free bytes remaining, but says nothing about
fragmentation. */
static size_t xFreeBytesRemaining = 0U;
static size_t xMinimumEverFreeBytesRemaining = 0U;
/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize
member of an BlockLink_t structure is set then the block belongs to the
application. When the bit is free the block is still part of the free heap
space. */
static size_t xBlockAllocatedBit = 0;
/*-----------------------------------------------------------*/
void *pvPortMalloc( size_t xWantedSize )
{
BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink;
void *pvReturn = NULL;
vTaskSuspendAll();
{
/* If this is the first call to malloc then the heap will require
initialisation to setup the list of free blocks. */
if( pxEnd == NULL )
{
prvHeapInit();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Check the requested block size is not so large that the top bit is
set. The top bit of the block size member of the BlockLink_t structure
is used to determine who owns the block - the application or the
kernel, so it must be free. */
if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
{
/* The wanted size is increased so it can contain a BlockLink_t
structure in addition to the requested amount of bytes. */
if( xWantedSize > 0 )
{
xWantedSize += xHeapStructSize;
/* Ensure that blocks are always aligned to the required number
of bytes. */
if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
{
/* Byte alignment required. */
xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
{
/* Traverse the list from the start (lowest address) block until
one of adequate size is found. */
pxPreviousBlock = &xStart;
pxBlock = xStart.pxNextFreeBlock;
while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
{
pxPreviousBlock = pxBlock;
pxBlock = pxBlock->pxNextFreeBlock;
}
/* If the end marker was reached then a block of adequate size
was not found. */
if( pxBlock != pxEnd )
{
/* Return the memory space pointed to - jumping over the
BlockLink_t structure at its start. */
pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize );
/* This block is being returned for use so must be taken out
of the list of free blocks. */
pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
/* If the block is larger than required it can be split into
two. */
if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
{
/* This block is to be split into two. Create a new
block following the number of bytes requested. The void
cast is used to prevent byte alignment warnings from the
compiler. */
pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
/* Calculate the sizes of two blocks split from the
single block. */
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
pxBlock->xBlockSize = xWantedSize;
/* Insert the new block into the list of free blocks. */
prvInsertBlockIntoFreeList( pxNewBlockLink );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xFreeBytesRemaining -= pxBlock->xBlockSize;
if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
{
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The block is being returned - it is allocated and owned
by the application and has no "next" block. */
pxBlock->xBlockSize |= xBlockAllocatedBit;
pxBlock->pxNextFreeBlock = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceMALLOC( pvReturn, xWantedSize );
}
( void ) xTaskResumeAll();
#if( configUSE_MALLOC_FAILED_HOOK == 1 )
{
if( pvReturn == NULL )
{
extern void vApplicationMallocFailedHook( void );
vApplicationMallocFailedHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 );
return pvReturn;
}
/*-----------------------------------------------------------*/
void vPortFree( void *pv )
{
uint8_t *puc = ( uint8_t * ) pv;
BlockLink_t *pxLink;
if( pv != NULL )
{
/* The memory being freed will have an BlockLink_t structure immediately
before it. */
puc -= xHeapStructSize;
/* This casting is to keep the compiler from issuing warnings. */
pxLink = ( void * ) puc;
/* Check the block is actually allocated. */
configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 );
configASSERT( pxLink->pxNextFreeBlock == NULL );
if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 )
{
if( pxLink->pxNextFreeBlock == NULL )
{
/* The block is being returned to the heap - it is no longer
allocated. */
pxLink->xBlockSize &= ~xBlockAllocatedBit;
vTaskSuspendAll();
{
/* Add this block to the list of free blocks. */
xFreeBytesRemaining += pxLink->xBlockSize;
traceFREE( pv, pxLink->xBlockSize );
prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
}
( void ) xTaskResumeAll();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
/*-----------------------------------------------------------*/
size_t xPortGetFreeHeapSize( void )
{
return xFreeBytesRemaining;
}
/*-----------------------------------------------------------*/
size_t xPortGetMinimumEverFreeHeapSize( void )
{
return xMinimumEverFreeBytesRemaining;
}
/*-----------------------------------------------------------*/
void vPortInitialiseBlocks( void )
{
/* This just exists to keep the linker quiet. */
}
/*-----------------------------------------------------------*/
static void prvHeapInit( void )
{
BlockLink_t *pxFirstFreeBlock;
uint8_t *pucAlignedHeap;
size_t uxAddress;
size_t xTotalHeapSize = configTOTAL_HEAP_SIZE;
/* Ensure the heap starts on a correctly aligned boundary. */
uxAddress = ( size_t ) ucHeap;
if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 )
{
uxAddress += ( portBYTE_ALIGNMENT - 1 );
uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
xTotalHeapSize -= uxAddress - ( size_t ) ucHeap;
}
pucAlignedHeap = ( uint8_t * ) uxAddress;
/* xStart is used to hold a pointer to the first item in the list of free
blocks. The void cast is used to prevent compiler warnings. */
xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap;
xStart.xBlockSize = ( size_t ) 0;
/* pxEnd is used to mark the end of the list of free blocks and is inserted
at the end of the heap space. */
uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize;
uxAddress -= xHeapStructSize;
uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
pxEnd = ( void * ) uxAddress;
pxEnd->xBlockSize = 0;
pxEnd->pxNextFreeBlock = NULL;
/* To start with there is a single free block that is sized to take up the
entire heap space, minus the space taken by pxEnd. */
pxFirstFreeBlock = ( void * ) pucAlignedHeap;
pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock;
pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
/* Only one block exists - and it covers the entire usable heap space. */
xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
/* Work out the position of the top bit in a size_t variable. */
xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 );
}
/*-----------------------------------------------------------*/
static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert )
{
BlockLink_t *pxIterator;
uint8_t *puc;
/* Iterate through the list until a block is found that has a higher address
than the block being inserted. */
for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock )
{
/* Nothing to do here, just iterate to the right position. */
}
/* Do the block being inserted, and the block it is being inserted after
make a contiguous block of memory? */
puc = ( uint8_t * ) pxIterator;
if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
{
pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
pxBlockToInsert = pxIterator;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Do the block being inserted, and the block it is being inserted before
make a contiguous block of memory? */
puc = ( uint8_t * ) pxBlockToInsert;
if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock )
{
if( pxIterator->pxNextFreeBlock != pxEnd )
{
/* Form one big block from the two blocks. */
pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxEnd;
}
}
else
{
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
}
/* If the block being inserted plugged a gab, so was merged with the block
before and the block after, then it's pxNextFreeBlock pointer will have
already been set, and should not be set here as that would make it point
to itself. */
if( pxIterator != pxBlockToInsert )
{
pxIterator->pxNextFreeBlock = pxBlockToInsert;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}

View File

@ -1,115 +0,0 @@
/**
* @file
* Error Management module
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/err.h"
#include "lwip/def.h"
#include "lwip/sys.h"
#include "lwip/errno.h"
#if !NO_SYS
/** Table to quickly map an lwIP error (err_t) to a socket error
* by using -err as an index */
static const int err_to_errno_table[] = {
0, /* ERR_OK 0 No error, everything OK. */
ENOMEM, /* ERR_MEM -1 Out of memory error. */
ENOBUFS, /* ERR_BUF -2 Buffer error. */
EWOULDBLOCK, /* ERR_TIMEOUT -3 Timeout */
EHOSTUNREACH, /* ERR_RTE -4 Routing problem. */
EINPROGRESS, /* ERR_INPROGRESS -5 Operation in progress */
EINVAL, /* ERR_VAL -6 Illegal value. */
EWOULDBLOCK, /* ERR_WOULDBLOCK -7 Operation would block. */
EADDRINUSE, /* ERR_USE -8 Address in use. */
EALREADY, /* ERR_ALREADY -9 Already connecting. */
EISCONN, /* ERR_ISCONN -10 Conn already established.*/
ENOTCONN, /* ERR_CONN -11 Not connected. */
-1, /* ERR_IF -12 Low-level netif error */
ECONNABORTED, /* ERR_ABRT -13 Connection aborted. */
ECONNRESET, /* ERR_RST -14 Connection reset. */
ENOTCONN, /* ERR_CLSD -15 Connection closed. */
EIO /* ERR_ARG -16 Illegal argument. */
};
int
err_to_errno(err_t err)
{
if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_to_errno_table))) {
return EIO;
}
return err_to_errno_table[-err];
}
#endif /* !NO_SYS */
#ifdef LWIP_DEBUG
static const char *err_strerr[] = {
"Ok.", /* ERR_OK 0 */
"Out of memory error.", /* ERR_MEM -1 */
"Buffer error.", /* ERR_BUF -2 */
"Timeout.", /* ERR_TIMEOUT -3 */
"Routing problem.", /* ERR_RTE -4 */
"Operation in progress.", /* ERR_INPROGRESS -5 */
"Illegal value.", /* ERR_VAL -6 */
"Operation would block.", /* ERR_WOULDBLOCK -7 */
"Address in use.", /* ERR_USE -8 */
"Already connecting.", /* ERR_ALREADY -9 */
"Already connected.", /* ERR_ISCONN -10 */
"Not connected.", /* ERR_CONN -11 */
"Low-level netif error.", /* ERR_IF -12 */
"Connection aborted.", /* ERR_ABRT -13 */
"Connection reset.", /* ERR_RST -14 */
"Connection closed.", /* ERR_CLSD -15 */
"Illegal argument." /* ERR_ARG -16 */
};
/**
* Convert an lwip internal error to a string representation.
*
* @param err an lwip internal err_t
* @return a string representation for err
*/
const char *
lwip_strerr(err_t err)
{
if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_strerr))) {
return "Unknown error.";
}
return err_strerr[-err];
}
#endif /* LWIP_DEBUG */

View File

@ -1,102 +0,0 @@
/**
* @file
* Interface Identification APIs from:
* RFC 3493: Basic Socket Interface Extensions for IPv6
* Section 4: Interface Identification
*
* @defgroup if_api Interface Identification API
* @ingroup socket
*/
/*
* Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. <joel.cunningham@garmin.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Joel Cunningham <joel.cunningham@me.com>
*
*/
#include "lwip/opt.h"
#if LWIP_SOCKET
#include "lwip/errno.h"
#include "lwip/if_api.h"
#include "lwip/netifapi.h"
#include "lwip/priv/sockets_priv.h"
/**
* @ingroup if_api
* Maps an interface index to its corresponding name.
* @param ifindex interface index
* @param ifname shall point to a buffer of at least {IF_NAMESIZE} bytes
* @return If ifindex is an interface index, then the function shall return the
* value supplied in ifname, which points to a buffer now containing the interface name.
* Otherwise, the function shall return a NULL pointer.
*/
char *
lwip_if_indextoname(unsigned int ifindex, char *ifname)
{
#if LWIP_NETIF_API
if (ifindex <= 0xff) {
err_t err = netifapi_netif_index_to_name((u8_t)ifindex, ifname);
if (!err && ifname[0] != '\0') {
return ifname;
}
}
#else /* LWIP_NETIF_API */
LWIP_UNUSED_ARG(ifindex);
LWIP_UNUSED_ARG(ifname);
#endif /* LWIP_NETIF_API */
set_errno(ENXIO);
return NULL;
}
/**
* @ingroup if_api
* Returs the interface index corresponding to name ifname.
* @param ifname Interface name
* @return The corresponding index if ifname is the name of an interface;
* otherwise, zero.
*/
unsigned int
lwip_if_nametoindex(const char *ifname)
{
#if LWIP_NETIF_API
err_t err;
u8_t idx;
err = netifapi_netif_name_to_index(ifname, &idx);
if (!err) {
return idx;
}
#else /* LWIP_NETIF_API */
LWIP_UNUSED_ARG(ifname);
#endif /* LWIP_NETIF_API */
return 0; /* invalid index */
}
#endif /* LWIP_SOCKET */

View File

@ -1,250 +0,0 @@
/**
* @file
* Network buffer management
*
* @defgroup netbuf Network buffers
* @ingroup netconn
* Network buffer descriptor for @ref netconn. Based on @ref pbuf internally
* to avoid copying data around.\n
* Buffers must not be shared accross multiple threads, all functions except
* netbuf_new() and netbuf_delete() are not thread-safe.
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */
#include "lwip/netbuf.h"
#include "lwip/memp.h"
#include <string.h>
/**
* @ingroup netbuf
* Create (allocate) and initialize a new netbuf.
* The netbuf doesn't yet contain a packet buffer!
*
* @return a pointer to a new netbuf
* NULL on lack of memory
*/
struct
netbuf *netbuf_new(void)
{
struct netbuf *buf;
buf = (struct netbuf *)memp_malloc(MEMP_NETBUF);
if (buf != NULL) {
memset(buf, 0, sizeof(struct netbuf));
}
return buf;
}
/**
* @ingroup netbuf
* Deallocate a netbuf allocated by netbuf_new().
*
* @param buf pointer to a netbuf allocated by netbuf_new()
*/
void
netbuf_delete(struct netbuf *buf)
{
if (buf != NULL) {
if (buf->p != NULL) {
pbuf_free(buf->p);
buf->p = buf->ptr = NULL;
}
memp_free(MEMP_NETBUF, buf);
}
}
/**
* @ingroup netbuf
* Allocate memory for a packet buffer for a given netbuf.
*
* @param buf the netbuf for which to allocate a packet buffer
* @param size the size of the packet buffer to allocate
* @return pointer to the allocated memory
* NULL if no memory could be allocated
*/
void *
netbuf_alloc(struct netbuf *buf, u16_t size)
{
LWIP_ERROR("netbuf_alloc: invalid buf", (buf != NULL), return NULL;);
/* Deallocate any previously allocated memory. */
if (buf->p != NULL) {
pbuf_free(buf->p);
}
buf->p = pbuf_alloc(PBUF_TRANSPORT, size, PBUF_RAM);
if (buf->p == NULL) {
return NULL;
}
LWIP_ASSERT("check that first pbuf can hold size",
(buf->p->len >= size));
buf->ptr = buf->p;
return buf->p->payload;
}
/**
* @ingroup netbuf
* Free the packet buffer included in a netbuf
*
* @param buf pointer to the netbuf which contains the packet buffer to free
*/
void
netbuf_free(struct netbuf *buf)
{
LWIP_ERROR("netbuf_free: invalid buf", (buf != NULL), return;);
if (buf->p != NULL) {
pbuf_free(buf->p);
}
buf->p = buf->ptr = NULL;
#if LWIP_CHECKSUM_ON_COPY
buf->flags = 0;
buf->toport_chksum = 0;
#endif /* LWIP_CHECKSUM_ON_COPY */
}
/**
* @ingroup netbuf
* Let a netbuf reference existing (non-volatile) data.
*
* @param buf netbuf which should reference the data
* @param dataptr pointer to the data to reference
* @param size size of the data
* @return ERR_OK if data is referenced
* ERR_MEM if data couldn't be referenced due to lack of memory
*/
err_t
netbuf_ref(struct netbuf *buf, const void *dataptr, u16_t size)
{
LWIP_ERROR("netbuf_ref: invalid buf", (buf != NULL), return ERR_ARG;);
if (buf->p != NULL) {
pbuf_free(buf->p);
}
buf->p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
if (buf->p == NULL) {
buf->ptr = NULL;
return ERR_MEM;
}
((struct pbuf_rom *)buf->p)->payload = dataptr;
buf->p->len = buf->p->tot_len = size;
buf->ptr = buf->p;
return ERR_OK;
}
/**
* @ingroup netbuf
* Chain one netbuf to another (@see pbuf_chain)
*
* @param head the first netbuf
* @param tail netbuf to chain after head, freed by this function, may not be reference after returning
*/
void
netbuf_chain(struct netbuf *head, struct netbuf *tail)
{
LWIP_ERROR("netbuf_chain: invalid head", (head != NULL), return;);
LWIP_ERROR("netbuf_chain: invalid tail", (tail != NULL), return;);
pbuf_cat(head->p, tail->p);
head->ptr = head->p;
memp_free(MEMP_NETBUF, tail);
}
/**
* @ingroup netbuf
* Get the data pointer and length of the data inside a netbuf.
*
* @param buf netbuf to get the data from
* @param dataptr pointer to a void pointer where to store the data pointer
* @param len pointer to an u16_t where the length of the data is stored
* @return ERR_OK if the information was retrieved,
* ERR_BUF on error.
*/
err_t
netbuf_data(struct netbuf *buf, void **dataptr, u16_t *len)
{
LWIP_ERROR("netbuf_data: invalid buf", (buf != NULL), return ERR_ARG;);
LWIP_ERROR("netbuf_data: invalid dataptr", (dataptr != NULL), return ERR_ARG;);
LWIP_ERROR("netbuf_data: invalid len", (len != NULL), return ERR_ARG;);
if (buf->ptr == NULL) {
return ERR_BUF;
}
*dataptr = buf->ptr->payload;
*len = buf->ptr->len;
return ERR_OK;
}
/**
* @ingroup netbuf
* Move the current data pointer of a packet buffer contained in a netbuf
* to the next part.
* The packet buffer itself is not modified.
*
* @param buf the netbuf to modify
* @return -1 if there is no next part
* 1 if moved to the next part but now there is no next part
* 0 if moved to the next part and there are still more parts
*/
s8_t
netbuf_next(struct netbuf *buf)
{
LWIP_ERROR("netbuf_next: invalid buf", (buf != NULL), return -1;);
if (buf->ptr->next == NULL) {
return -1;
}
buf->ptr = buf->ptr->next;
if (buf->ptr->next == NULL) {
return 1;
}
return 0;
}
/**
* @ingroup netbuf
* Move the current data pointer of a packet buffer contained in a netbuf
* to the beginning of the packet.
* The packet buffer itself is not modified.
*
* @param buf the netbuf to modify
*/
void
netbuf_first(struct netbuf *buf)
{
LWIP_ERROR("netbuf_first: invalid buf", (buf != NULL), return;);
buf->ptr = buf->p;
}
#endif /* LWIP_NETCONN */

View File

@ -1,414 +0,0 @@
/**
* @file
* API functions for name resolving
*
* @defgroup netdbapi NETDB API
* @ingroup socket
*/
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt
*
*/
#include "lwip/netdb.h"
#if LWIP_DNS && LWIP_SOCKET
#include "lwip/err.h"
#include "lwip/mem.h"
#include "lwip/memp.h"
#include "lwip/ip_addr.h"
#include "lwip/api.h"
#include "lwip/dns.h"
#include <string.h> /* memset */
#include <stdlib.h> /* atoi */
/** helper struct for gethostbyname_r to access the char* buffer */
struct gethostbyname_r_helper {
ip_addr_t *addr_list[2];
ip_addr_t addr;
char *aliases;
};
/** h_errno is exported in netdb.h for access by applications. */
#if LWIP_DNS_API_DECLARE_H_ERRNO
int h_errno;
#endif /* LWIP_DNS_API_DECLARE_H_ERRNO */
/** define "hostent" variables storage: 0 if we use a static (but unprotected)
* set of variables for lwip_gethostbyname, 1 if we use a local storage */
#ifndef LWIP_DNS_API_HOSTENT_STORAGE
#define LWIP_DNS_API_HOSTENT_STORAGE 0
#endif
/** define "hostent" variables storage */
#if LWIP_DNS_API_HOSTENT_STORAGE
#define HOSTENT_STORAGE
#else
#define HOSTENT_STORAGE static
#endif /* LWIP_DNS_API_STATIC_HOSTENT */
/**
* Returns an entry containing addresses of address family AF_INET
* for the host with name name.
* Due to dns_gethostbyname limitations, only one address is returned.
*
* @param name the hostname to resolve
* @return an entry containing addresses of address family AF_INET
* for the host with name name
*/
struct hostent *
lwip_gethostbyname(const char *name)
{
err_t err;
ip_addr_t addr;
/* buffer variables for lwip_gethostbyname() */
HOSTENT_STORAGE struct hostent s_hostent;
HOSTENT_STORAGE char *s_aliases;
HOSTENT_STORAGE ip_addr_t s_hostent_addr;
HOSTENT_STORAGE ip_addr_t *s_phostent_addr[2];
HOSTENT_STORAGE char s_hostname[DNS_MAX_NAME_LENGTH + 1];
/* query host IP address */
err = netconn_gethostbyname(name, &addr);
if (err != ERR_OK) {
LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err));
h_errno = HOST_NOT_FOUND;
return NULL;
}
/* fill hostent */
s_hostent_addr = addr;
s_phostent_addr[0] = &s_hostent_addr;
s_phostent_addr[1] = NULL;
strncpy(s_hostname, name, DNS_MAX_NAME_LENGTH);
s_hostname[DNS_MAX_NAME_LENGTH] = 0;
s_hostent.h_name = s_hostname;
s_aliases = NULL;
s_hostent.h_aliases = &s_aliases;
s_hostent.h_addrtype = AF_INET;
s_hostent.h_length = sizeof(ip_addr_t);
s_hostent.h_addr_list = (char **)&s_phostent_addr;
#if DNS_DEBUG
/* dump hostent */
LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_name == %s\n", s_hostent.h_name));
LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_aliases == %p\n", (void *)s_hostent.h_aliases));
/* h_aliases are always empty */
LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addrtype == %d\n", s_hostent.h_addrtype));
LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_length == %d\n", s_hostent.h_length));
LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list == %p\n", (void *)s_hostent.h_addr_list));
if (s_hostent.h_addr_list != NULL) {
u8_t idx;
for (idx = 0; s_hostent.h_addr_list[idx]; idx++) {
LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i] == %p\n", idx, s_hostent.h_addr_list[idx]));
LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i]-> == %s\n", idx, ipaddr_ntoa((ip_addr_t *)s_hostent.h_addr_list[idx])));
}
}
#endif /* DNS_DEBUG */
#if LWIP_DNS_API_HOSTENT_STORAGE
/* this function should return the "per-thread" hostent after copy from s_hostent */
return sys_thread_hostent(&s_hostent);
#else
return &s_hostent;
#endif /* LWIP_DNS_API_HOSTENT_STORAGE */
}
/**
* Thread-safe variant of lwip_gethostbyname: instead of using a static
* buffer, this function takes buffer and errno pointers as arguments
* and uses these for the result.
*
* @param name the hostname to resolve
* @param ret pre-allocated struct where to store the result
* @param buf pre-allocated buffer where to store additional data
* @param buflen the size of buf
* @param result pointer to a hostent pointer that is set to ret on success
* and set to zero on error
* @param h_errnop pointer to an int where to store errors (instead of modifying
* the global h_errno)
* @return 0 on success, non-zero on error, additional error information
* is stored in *h_errnop instead of h_errno to be thread-safe
*/
int
lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf,
size_t buflen, struct hostent **result, int *h_errnop)
{
err_t err;
struct gethostbyname_r_helper *h;
char *hostname;
size_t namelen;
int lh_errno;
if (h_errnop == NULL) {
/* ensure h_errnop is never NULL */
h_errnop = &lh_errno;
}
if (result == NULL) {
/* not all arguments given */
*h_errnop = EINVAL;
return -1;
}
/* first thing to do: set *result to nothing */
*result = NULL;
if ((name == NULL) || (ret == NULL) || (buf == NULL)) {
/* not all arguments given */
*h_errnop = EINVAL;
return -1;
}
namelen = strlen(name);
if (buflen < (sizeof(struct gethostbyname_r_helper) + LWIP_MEM_ALIGN_BUFFER(namelen + 1))) {
/* buf can't hold the data needed + a copy of name */
*h_errnop = ERANGE;
return -1;
}
h = (struct gethostbyname_r_helper *)LWIP_MEM_ALIGN(buf);
hostname = ((char *)h) + sizeof(struct gethostbyname_r_helper);
/* query host IP address */
err = netconn_gethostbyname(name, &h->addr);
if (err != ERR_OK) {
LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err));
*h_errnop = HOST_NOT_FOUND;
return -1;
}
/* copy the hostname into buf */
MEMCPY(hostname, name, namelen);
hostname[namelen] = 0;
/* fill hostent */
h->addr_list[0] = &h->addr;
h->addr_list[1] = NULL;
h->aliases = NULL;
ret->h_name = hostname;
ret->h_aliases = &h->aliases;
ret->h_addrtype = AF_INET;
ret->h_length = sizeof(ip_addr_t);
ret->h_addr_list = (char **)&h->addr_list;
/* set result != NULL */
*result = ret;
/* return success */
return 0;
}
/**
* Frees one or more addrinfo structures returned by getaddrinfo(), along with
* any additional storage associated with those structures. If the ai_next field
* of the structure is not null, the entire list of structures is freed.
*
* @param ai struct addrinfo to free
*/
void
lwip_freeaddrinfo(struct addrinfo *ai)
{
struct addrinfo *next;
while (ai != NULL) {
next = ai->ai_next;
memp_free(MEMP_NETDB, ai);
ai = next;
}
}
/**
* Translates the name of a service location (for example, a host name) and/or
* a service name and returns a set of socket addresses and associated
* information to be used in creating a socket with which to address the
* specified service.
* Memory for the result is allocated internally and must be freed by calling
* lwip_freeaddrinfo()!
*
* Due to a limitation in dns_gethostbyname, only the first address of a
* host is returned.
* Also, service names are not supported (only port numbers)!
*
* @param nodename descriptive name or address string of the host
* (may be NULL -> local address)
* @param servname port number as string of NULL
* @param hints structure containing input values that set socktype and protocol
* @param res pointer to a pointer where to store the result (set to NULL on failure)
* @return 0 on success, non-zero on failure
*
* @todo: implement AI_V4MAPPED, AI_ADDRCONFIG
*/
int
lwip_getaddrinfo(const char *nodename, const char *servname,
const struct addrinfo *hints, struct addrinfo **res)
{
err_t err;
ip_addr_t addr;
struct addrinfo *ai;
struct sockaddr_storage *sa = NULL;
int port_nr = 0;
size_t total_size;
size_t namelen = 0;
int ai_family;
if (res == NULL) {
return EAI_FAIL;
}
*res = NULL;
if ((nodename == NULL) && (servname == NULL)) {
return EAI_NONAME;
}
if (hints != NULL) {
ai_family = hints->ai_family;
if ((ai_family != AF_UNSPEC)
#if LWIP_IPV4
&& (ai_family != AF_INET)
#endif /* LWIP_IPV4 */
#if LWIP_IPV6
&& (ai_family != AF_INET6)
#endif /* LWIP_IPV6 */
) {
return EAI_FAMILY;
}
} else {
ai_family = AF_UNSPEC;
}
if (servname != NULL) {
/* service name specified: convert to port number
* @todo?: currently, only ASCII integers (port numbers) are supported (AI_NUMERICSERV)! */
port_nr = atoi(servname);
if ((port_nr <= 0) || (port_nr > 0xffff)) {
return EAI_SERVICE;
}
}
if (nodename != NULL) {
/* service location specified, try to resolve */
if ((hints != NULL) && (hints->ai_flags & AI_NUMERICHOST)) {
/* no DNS lookup, just parse for an address string */
if (!ipaddr_aton(nodename, &addr)) {
return EAI_NONAME;
}
#if LWIP_IPV4 && LWIP_IPV6
if ((IP_IS_V6_VAL(addr) && ai_family == AF_INET) ||
(IP_IS_V4_VAL(addr) && ai_family == AF_INET6)) {
return EAI_NONAME;
}
#endif /* LWIP_IPV4 && LWIP_IPV6 */
} else {
#if LWIP_IPV4 && LWIP_IPV6
/* AF_UNSPEC: prefer IPv4 */
u8_t type = NETCONN_DNS_IPV4_IPV6;
if (ai_family == AF_INET) {
type = NETCONN_DNS_IPV4;
} else if (ai_family == AF_INET6) {
type = NETCONN_DNS_IPV6;
}
#endif /* LWIP_IPV4 && LWIP_IPV6 */
err = netconn_gethostbyname_addrtype(nodename, &addr, type);
if (err != ERR_OK) {
return EAI_FAIL;
}
}
} else {
/* service location specified, use loopback address */
if ((hints != NULL) && (hints->ai_flags & AI_PASSIVE)) {
ip_addr_set_any_val(ai_family == AF_INET6, addr);
} else {
ip_addr_set_loopback_val(ai_family == AF_INET6, addr);
}
}
total_size = sizeof(struct addrinfo) + sizeof(struct sockaddr_storage);
if (nodename != NULL) {
namelen = strlen(nodename);
if (namelen > DNS_MAX_NAME_LENGTH) {
/* invalid name length */
return EAI_FAIL;
}
LWIP_ASSERT("namelen is too long", total_size + namelen + 1 > total_size);
total_size += namelen + 1;
}
/* If this fails, please report to lwip-devel! :-) */
LWIP_ASSERT("total_size <= NETDB_ELEM_SIZE: please report this!",
total_size <= NETDB_ELEM_SIZE);
ai = (struct addrinfo *)memp_malloc(MEMP_NETDB);
if (ai == NULL) {
return EAI_MEMORY;
}
memset(ai, 0, total_size);
/* cast through void* to get rid of alignment warnings */
sa = (struct sockaddr_storage *)(void *)((u8_t *)ai + sizeof(struct addrinfo));
if (IP_IS_V6_VAL(addr)) {
#if LWIP_IPV6
struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa;
/* set up sockaddr */
inet6_addr_from_ip6addr(&sa6->sin6_addr, ip_2_ip6(&addr));
sa6->sin6_family = AF_INET6;
sa6->sin6_len = sizeof(struct sockaddr_in6);
sa6->sin6_port = lwip_htons((u16_t)port_nr);
sa6->sin6_scope_id = ip6_addr_zone(ip_2_ip6(&addr));
ai->ai_family = AF_INET6;
#endif /* LWIP_IPV6 */
} else {
#if LWIP_IPV4
struct sockaddr_in *sa4 = (struct sockaddr_in *)sa;
/* set up sockaddr */
inet_addr_from_ip4addr(&sa4->sin_addr, ip_2_ip4(&addr));
sa4->sin_family = AF_INET;
sa4->sin_len = sizeof(struct sockaddr_in);
sa4->sin_port = lwip_htons((u16_t)port_nr);
ai->ai_family = AF_INET;
#endif /* LWIP_IPV4 */
}
/* set up addrinfo */
if (hints != NULL) {
/* copy socktype & protocol from hints if specified */
ai->ai_socktype = hints->ai_socktype;
ai->ai_protocol = hints->ai_protocol;
}
if (nodename != NULL) {
/* copy nodename to canonname if specified */
ai->ai_canonname = ((char *)ai + sizeof(struct addrinfo) + sizeof(struct sockaddr_storage));
MEMCPY(ai->ai_canonname, nodename, namelen);
ai->ai_canonname[namelen] = 0;
}
ai->ai_addrlen = sizeof(struct sockaddr_storage);
ai->ai_addr = (struct sockaddr *)sa;
*res = ai;
return 0;
}
#endif /* LWIP_DNS && LWIP_SOCKET */

View File

@ -1,380 +0,0 @@
/**
* @file
* Network Interface Sequential API module
*
* @defgroup netifapi NETIF API
* @ingroup sequential_api
* Thread-safe functions to be called from non-TCPIP threads
*
* @defgroup netifapi_netif NETIF related
* @ingroup netifapi
* To be called from non-TCPIP threads
*/
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwip/opt.h"
#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */
#include "lwip/etharp.h"
#include "lwip/netifapi.h"
#include "lwip/memp.h"
#include "lwip/priv/tcpip_priv.h"
#include <string.h> /* strncpy */
#define NETIFAPI_VAR_REF(name) API_VAR_REF(name)
#define NETIFAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct netifapi_msg, name)
#define NETIFAPI_VAR_ALLOC(name) API_VAR_ALLOC(struct netifapi_msg, MEMP_NETIFAPI_MSG, name, ERR_MEM)
#define NETIFAPI_VAR_FREE(name) API_VAR_FREE(MEMP_NETIFAPI_MSG, name)
/**
* Call netif_add() inside the tcpip_thread context.
*/
static err_t
netifapi_do_netif_add(struct tcpip_api_call_data *m)
{
/* cast through void* to silence alignment warnings.
* We know it works because the structs have been instantiated as struct netifapi_msg */
struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m;
if (!netif_add( msg->netif,
#if LWIP_IPV4
API_EXPR_REF(msg->msg.add.ipaddr),
API_EXPR_REF(msg->msg.add.netmask),
API_EXPR_REF(msg->msg.add.gw),
#endif /* LWIP_IPV4 */
msg->msg.add.state,
msg->msg.add.init,
msg->msg.add.input)) {
return ERR_IF;
} else {
return ERR_OK;
}
}
#if LWIP_IPV4
/**
* Call netif_set_addr() inside the tcpip_thread context.
*/
static err_t
netifapi_do_netif_set_addr(struct tcpip_api_call_data *m)
{
/* cast through void* to silence alignment warnings.
* We know it works because the structs have been instantiated as struct netifapi_msg */
struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m;
netif_set_addr( msg->netif,
API_EXPR_REF(msg->msg.add.ipaddr),
API_EXPR_REF(msg->msg.add.netmask),
API_EXPR_REF(msg->msg.add.gw));
return ERR_OK;
}
#endif /* LWIP_IPV4 */
/**
* Call netif_name_to_index() inside the tcpip_thread context.
*/
static err_t
netifapi_do_name_to_index(struct tcpip_api_call_data *m)
{
/* cast through void* to silence alignment warnings.
* We know it works because the structs have been instantiated as struct netifapi_msg */
struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m;
msg->msg.ifs.index = netif_name_to_index(msg->msg.ifs.name);
return ERR_OK;
}
/**
* Call netif_index_to_name() inside the tcpip_thread context.
*/
static err_t
netifapi_do_index_to_name(struct tcpip_api_call_data *m)
{
/* cast through void* to silence alignment warnings.
* We know it works because the structs have been instantiated as struct netifapi_msg */
struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m;
if (!netif_index_to_name(msg->msg.ifs.index, msg->msg.ifs.name)) {
/* return failure via empty name */
msg->msg.ifs.name[0] = '\0';
}
return ERR_OK;
}
/**
* Call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) inside the
* tcpip_thread context.
*/
static err_t
netifapi_do_netif_common(struct tcpip_api_call_data *m)
{
/* cast through void* to silence alignment warnings.
* We know it works because the structs have been instantiated as struct netifapi_msg */
struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m;
if (msg->msg.common.errtfunc != NULL) {
return msg->msg.common.errtfunc(msg->netif);
} else {
msg->msg.common.voidfunc(msg->netif);
return ERR_OK;
}
}
#if LWIP_ARP && LWIP_IPV4
/**
* @ingroup netifapi_arp
* Add or update an entry in the ARP cache.
* For an update, ipaddr is used to find the cache entry.
*
* @param ipaddr IPv4 address of cache entry
* @param ethaddr hardware address mapped to ipaddr
* @param type type of ARP cache entry
* @return ERR_OK: entry added/updated, else error from err_t
*/
err_t
netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type)
{
err_t err;
/* We only support permanent entries currently */
LWIP_UNUSED_ARG(type);
#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING
LOCK_TCPIP_CORE();
err = etharp_add_static_entry(ipaddr, ethaddr);
UNLOCK_TCPIP_CORE();
#else
/* @todo add new vars to struct netifapi_msg and create a 'do' func */
LWIP_UNUSED_ARG(ipaddr);
LWIP_UNUSED_ARG(ethaddr);
err = ERR_VAL;
#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */
return err;
}
/**
* @ingroup netifapi_arp
* Remove an entry in the ARP cache identified by ipaddr
*
* @param ipaddr IPv4 address of cache entry
* @param type type of ARP cache entry
* @return ERR_OK: entry removed, else error from err_t
*/
err_t
netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type)
{
err_t err;
/* We only support permanent entries currently */
LWIP_UNUSED_ARG(type);
#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING
LOCK_TCPIP_CORE();
err = etharp_remove_static_entry(ipaddr);
UNLOCK_TCPIP_CORE();
#else
/* @todo add new vars to struct netifapi_msg and create a 'do' func */
LWIP_UNUSED_ARG(ipaddr);
err = ERR_VAL;
#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */
return err;
}
#endif /* LWIP_ARP && LWIP_IPV4 */
/**
* @ingroup netifapi_netif
* Call netif_add() in a thread-safe way by running that function inside the
* tcpip_thread context.
*
* @note for params @see netif_add()
*/
err_t
netifapi_netif_add(struct netif *netif,
#if LWIP_IPV4
const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw,
#endif /* LWIP_IPV4 */
void *state, netif_init_fn init, netif_input_fn input)
{
err_t err;
NETIFAPI_VAR_DECLARE(msg);
NETIFAPI_VAR_ALLOC(msg);
#if LWIP_IPV4
if (ipaddr == NULL) {
ipaddr = IP4_ADDR_ANY4;
}
if (netmask == NULL) {
netmask = IP4_ADDR_ANY4;
}
if (gw == NULL) {
gw = IP4_ADDR_ANY4;
}
#endif /* LWIP_IPV4 */
NETIFAPI_VAR_REF(msg).netif = netif;
#if LWIP_IPV4
NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr);
NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask);
NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw);
#endif /* LWIP_IPV4 */
NETIFAPI_VAR_REF(msg).msg.add.state = state;
NETIFAPI_VAR_REF(msg).msg.add.init = init;
NETIFAPI_VAR_REF(msg).msg.add.input = input;
err = tcpip_api_call(netifapi_do_netif_add, &API_VAR_REF(msg).call);
NETIFAPI_VAR_FREE(msg);
return err;
}
#if LWIP_IPV4
/**
* @ingroup netifapi_netif
* Call netif_set_addr() in a thread-safe way by running that function inside the
* tcpip_thread context.
*
* @note for params @see netif_set_addr()
*/
err_t
netifapi_netif_set_addr(struct netif *netif,
const ip4_addr_t *ipaddr,
const ip4_addr_t *netmask,
const ip4_addr_t *gw)
{
err_t err;
NETIFAPI_VAR_DECLARE(msg);
NETIFAPI_VAR_ALLOC(msg);
if (ipaddr == NULL) {
ipaddr = IP4_ADDR_ANY4;
}
if (netmask == NULL) {
netmask = IP4_ADDR_ANY4;
}
if (gw == NULL) {
gw = IP4_ADDR_ANY4;
}
NETIFAPI_VAR_REF(msg).netif = netif;
NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr);
NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask);
NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw);
err = tcpip_api_call(netifapi_do_netif_set_addr, &API_VAR_REF(msg).call);
NETIFAPI_VAR_FREE(msg);
return err;
}
#endif /* LWIP_IPV4 */
/**
* call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) in a thread-safe
* way by running that function inside the tcpip_thread context.
*
* @note use only for functions where there is only "netif" parameter.
*/
err_t
netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc,
netifapi_errt_fn errtfunc)
{
err_t err;
NETIFAPI_VAR_DECLARE(msg);
NETIFAPI_VAR_ALLOC(msg);
NETIFAPI_VAR_REF(msg).netif = netif;
NETIFAPI_VAR_REF(msg).msg.common.voidfunc = voidfunc;
NETIFAPI_VAR_REF(msg).msg.common.errtfunc = errtfunc;
err = tcpip_api_call(netifapi_do_netif_common, &API_VAR_REF(msg).call);
NETIFAPI_VAR_FREE(msg);
return err;
}
/**
* @ingroup netifapi_netif
* Call netif_name_to_index() in a thread-safe way by running that function inside the
* tcpip_thread context.
*
* @param name the interface name of the netif
* @param idx output index of the found netif
*/
err_t
netifapi_netif_name_to_index(const char *name, u8_t *idx)
{
err_t err;
NETIFAPI_VAR_DECLARE(msg);
NETIFAPI_VAR_ALLOC(msg);
*idx = 0;
#if LWIP_MPU_COMPATIBLE
strncpy(NETIFAPI_VAR_REF(msg).msg.ifs.name, name, NETIF_NAMESIZE - 1);
NETIFAPI_VAR_REF(msg).msg.ifs.name[NETIF_NAMESIZE - 1] = '\0';
#else
NETIFAPI_VAR_REF(msg).msg.ifs.name = LWIP_CONST_CAST(char *, name);
#endif /* LWIP_MPU_COMPATIBLE */
err = tcpip_api_call(netifapi_do_name_to_index, &API_VAR_REF(msg).call);
if (!err) {
*idx = NETIFAPI_VAR_REF(msg).msg.ifs.index;
}
NETIFAPI_VAR_FREE(msg);
return err;
}
/**
* @ingroup netifapi_netif
* Call netif_index_to_name() in a thread-safe way by running that function inside the
* tcpip_thread context.
*
* @param idx the interface index of the netif
* @param name output name of the found netif, empty '\0' string if netif not found.
* name should be of at least NETIF_NAMESIZE bytes
*/
err_t
netifapi_netif_index_to_name(u8_t idx, char *name)
{
err_t err;
NETIFAPI_VAR_DECLARE(msg);
NETIFAPI_VAR_ALLOC(msg);
NETIFAPI_VAR_REF(msg).msg.ifs.index = idx;
#if !LWIP_MPU_COMPATIBLE
NETIFAPI_VAR_REF(msg).msg.ifs.name = name;
#endif /* LWIP_MPU_COMPATIBLE */
err = tcpip_api_call(netifapi_do_index_to_name, &API_VAR_REF(msg).call);
#if LWIP_MPU_COMPATIBLE
if (!err) {
strncpy(name, NETIFAPI_VAR_REF(msg).msg.ifs.name, NETIF_NAMESIZE - 1);
name[NETIF_NAMESIZE - 1] = '\0';
}
#endif /* LWIP_MPU_COMPATIBLE */
NETIFAPI_VAR_FREE(msg);
return err;
}
#endif /* LWIP_NETIF_API */

View File

@ -1,658 +0,0 @@
/**
* @file
* Sequential API Main thread module
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if !NO_SYS /* don't build if not configured for use in lwipopts.h */
#include "lwip/priv/tcpip_priv.h"
#include "lwip/sys.h"
#include "lwip/memp.h"
#include "lwip/mem.h"
#include "lwip/init.h"
#include "lwip/ip.h"
#include "lwip/pbuf.h"
#include "lwip/etharp.h"
#include "netif/ethernet.h"
#define TCPIP_MSG_VAR_REF(name) API_VAR_REF(name)
#define TCPIP_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct tcpip_msg, name)
#define TCPIP_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct tcpip_msg, MEMP_TCPIP_MSG_API, name, ERR_MEM)
#define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name)
/* global variables */
static tcpip_init_done_fn tcpip_init_done;
static void *tcpip_init_done_arg;
static sys_mbox_t tcpip_mbox;
#if LWIP_TCPIP_CORE_LOCKING
/** The global semaphore to lock the stack. */
sys_mutex_t lock_tcpip_core;
#endif /* LWIP_TCPIP_CORE_LOCKING */
static void tcpip_thread_handle_msg(struct tcpip_msg *msg);
#if !LWIP_TIMERS
/* wait for a message with timers disabled (e.g. pass a timer-check trigger into tcpip_thread) */
#define TCPIP_MBOX_FETCH(mbox, msg) sys_mbox_fetch(mbox, msg)
#else /* !LWIP_TIMERS */
/* wait for a message, timeouts are processed while waiting */
#define TCPIP_MBOX_FETCH(mbox, msg) tcpip_timeouts_mbox_fetch(mbox, msg)
/**
* Wait (forever) for a message to arrive in an mbox.
* While waiting, timeouts are processed.
*
* @param mbox the mbox to fetch the message from
* @param msg the place to store the message
*/
static void
tcpip_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg)
{
u32_t sleeptime, res;
again:
LWIP_ASSERT_CORE_LOCKED();
sleeptime = sys_timeouts_sleeptime();
if (sleeptime == SYS_TIMEOUTS_SLEEPTIME_INFINITE) {
UNLOCK_TCPIP_CORE();
sys_arch_mbox_fetch(mbox, msg, 0);
LOCK_TCPIP_CORE();
return;
} else if (sleeptime == 0) {
sys_check_timeouts();
/* We try again to fetch a message from the mbox. */
goto again;
}
UNLOCK_TCPIP_CORE();
res = sys_arch_mbox_fetch(mbox, msg, sleeptime);
LOCK_TCPIP_CORE();
if (res == SYS_ARCH_TIMEOUT) {
/* If a SYS_ARCH_TIMEOUT value is returned, a timeout occurred
before a message could be fetched. */
sys_check_timeouts();
/* We try again to fetch a message from the mbox. */
goto again;
}
}
#endif /* !LWIP_TIMERS */
/**
* The main lwIP thread. This thread has exclusive access to lwIP core functions
* (unless access to them is not locked). Other threads communicate with this
* thread using message boxes.
*
* It also starts all the timers to make sure they are running in the right
* thread context.
*
* @param arg unused argument
*/
static void
tcpip_thread(void *arg)
{
struct tcpip_msg *msg;
LWIP_UNUSED_ARG(arg);
LWIP_MARK_TCPIP_THREAD();
LOCK_TCPIP_CORE();
if (tcpip_init_done != NULL) {
tcpip_init_done(tcpip_init_done_arg);
}
while (1) { /* MAIN Loop */
LWIP_TCPIP_THREAD_ALIVE();
/* wait for a message, timeouts are processed while waiting */
TCPIP_MBOX_FETCH(&tcpip_mbox, (void **)&msg);
if (msg == NULL) {
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n"));
LWIP_ASSERT("tcpip_thread: invalid message", 0);
continue;
}
tcpip_thread_handle_msg(msg);
}
}
/* Handle a single tcpip_msg
* This is in its own function for access by tests only.
*/
static void
tcpip_thread_handle_msg(struct tcpip_msg *msg)
{
switch (msg->type) {
#if !LWIP_TCPIP_CORE_LOCKING
case TCPIP_MSG_API:
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg));
msg->msg.api_msg.function(msg->msg.api_msg.msg);
break;
case TCPIP_MSG_API_CALL:
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg));
msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg);
sys_sem_signal(msg->msg.api_call.sem);
break;
#endif /* !LWIP_TCPIP_CORE_LOCKING */
#if !LWIP_TCPIP_CORE_LOCKING_INPUT
case TCPIP_MSG_INPKT:
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg));
if (msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif) != ERR_OK) {
pbuf_free(msg->msg.inp.p);
}
memp_free(MEMP_TCPIP_MSG_INPKT, msg);
break;
#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */
#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS
case TCPIP_MSG_TIMEOUT:
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg));
sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg);
memp_free(MEMP_TCPIP_MSG_API, msg);
break;
case TCPIP_MSG_UNTIMEOUT:
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg));
sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg);
memp_free(MEMP_TCPIP_MSG_API, msg);
break;
#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */
case TCPIP_MSG_CALLBACK:
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg));
msg->msg.cb.function(msg->msg.cb.ctx);
memp_free(MEMP_TCPIP_MSG_API, msg);
break;
case TCPIP_MSG_CALLBACK_STATIC:
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg));
msg->msg.cb.function(msg->msg.cb.ctx);
break;
default:
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type));
LWIP_ASSERT("tcpip_thread: invalid message", 0);
break;
}
}
#ifdef TCPIP_THREAD_TEST
/** Work on queued items in single-threaded test mode */
int
tcpip_thread_poll_one(void)
{
int ret = 0;
struct tcpip_msg *msg;
if (sys_arch_mbox_tryfetch(&tcpip_mbox, (void **)&msg) != SYS_ARCH_TIMEOUT) {
LOCK_TCPIP_CORE();
if (msg != NULL) {
tcpip_thread_handle_msg(msg);
ret = 1;
}
UNLOCK_TCPIP_CORE();
}
return ret;
}
#endif
/**
* Pass a received packet to tcpip_thread for input processing
*
* @param p the received packet
* @param inp the network interface on which the packet was received
* @param input_fn input function to call
*/
err_t
tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn)
{
#if LWIP_TCPIP_CORE_LOCKING_INPUT
err_t ret;
LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_inpkt: PACKET %p/%p\n", (void *)p, (void *)inp));
LOCK_TCPIP_CORE();
ret = input_fn(p, inp);
UNLOCK_TCPIP_CORE();
return ret;
#else /* LWIP_TCPIP_CORE_LOCKING_INPUT */
struct tcpip_msg *msg;
LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox));
msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_INPKT);
if (msg == NULL) {
return ERR_MEM;
}
msg->type = TCPIP_MSG_INPKT;
msg->msg.inp.p = p;
msg->msg.inp.netif = inp;
msg->msg.inp.input_fn = input_fn;
if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) {
memp_free(MEMP_TCPIP_MSG_INPKT, msg);
return ERR_MEM;
}
return ERR_OK;
#endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */
}
/**
* @ingroup lwip_os
* Pass a received packet to tcpip_thread for input processing with
* ethernet_input or ip_input. Don't call directly, pass to netif_add()
* and call netif->input().
*
* @param p the received packet, p->payload pointing to the Ethernet header or
* to an IP header (if inp doesn't have NETIF_FLAG_ETHARP or
* NETIF_FLAG_ETHERNET flags)
* @param inp the network interface on which the packet was received
*/
err_t
tcpip_input(struct pbuf *p, struct netif *inp)
{
#if LWIP_ETHERNET
if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) {
return tcpip_inpkt(p, inp, ethernet_input);
} else
#endif /* LWIP_ETHERNET */
return tcpip_inpkt(p, inp, ip_input);
}
/**
* @ingroup lwip_os
* Call a specific function in the thread context of
* tcpip_thread for easy access synchronization.
* A function called in that way may access lwIP core code
* without fearing concurrent access.
* Blocks until the request is posted.
* Must not be called from interrupt context!
*
* @param function the function to call
* @param ctx parameter passed to f
* @return ERR_OK if the function was called, another err_t if not
*
* @see tcpip_try_callback
*/
err_t
tcpip_callback(tcpip_callback_fn function, void *ctx)
{
struct tcpip_msg *msg;
LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox));
msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API);
if (msg == NULL) {
return ERR_MEM;
}
msg->type = TCPIP_MSG_CALLBACK;
msg->msg.cb.function = function;
msg->msg.cb.ctx = ctx;
sys_mbox_post(&tcpip_mbox, msg);
return ERR_OK;
}
/**
* @ingroup lwip_os
* Call a specific function in the thread context of
* tcpip_thread for easy access synchronization.
* A function called in that way may access lwIP core code
* without fearing concurrent access.
* Does NOT block when the request cannot be posted because the
* tcpip_mbox is full, but returns ERR_MEM instead.
* Can be called from interrupt context.
*
* @param function the function to call
* @param ctx parameter passed to f
* @return ERR_OK if the function was called, another err_t if not
*
* @see tcpip_callback
*/
err_t
tcpip_try_callback(tcpip_callback_fn function, void *ctx)
{
struct tcpip_msg *msg;
LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox));
msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API);
if (msg == NULL) {
return ERR_MEM;
}
msg->type = TCPIP_MSG_CALLBACK;
msg->msg.cb.function = function;
msg->msg.cb.ctx = ctx;
if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) {
memp_free(MEMP_TCPIP_MSG_API, msg);
return ERR_MEM;
}
return ERR_OK;
}
#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS
/**
* call sys_timeout in tcpip_thread
*
* @param msecs time in milliseconds for timeout
* @param h function to be called on timeout
* @param arg argument to pass to timeout function h
* @return ERR_MEM on memory error, ERR_OK otherwise
*/
err_t
tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg)
{
struct tcpip_msg *msg;
LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox));
msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API);
if (msg == NULL) {
return ERR_MEM;
}
msg->type = TCPIP_MSG_TIMEOUT;
msg->msg.tmo.msecs = msecs;
msg->msg.tmo.h = h;
msg->msg.tmo.arg = arg;
sys_mbox_post(&tcpip_mbox, msg);
return ERR_OK;
}
/**
* call sys_untimeout in tcpip_thread
*
* @param h function to be called on timeout
* @param arg argument to pass to timeout function h
* @return ERR_MEM on memory error, ERR_OK otherwise
*/
err_t
tcpip_untimeout(sys_timeout_handler h, void *arg)
{
struct tcpip_msg *msg;
LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox));
msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API);
if (msg == NULL) {
return ERR_MEM;
}
msg->type = TCPIP_MSG_UNTIMEOUT;
msg->msg.tmo.h = h;
msg->msg.tmo.arg = arg;
sys_mbox_post(&tcpip_mbox, msg);
return ERR_OK;
}
#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */
/**
* Sends a message to TCPIP thread to call a function. Caller thread blocks on
* on a provided semaphore, which ist NOT automatically signalled by TCPIP thread,
* this has to be done by the user.
* It is recommended to use LWIP_TCPIP_CORE_LOCKING since this is the way
* with least runtime overhead.
*
* @param fn function to be called from TCPIP thread
* @param apimsg argument to API function
* @param sem semaphore to wait on
* @return ERR_OK if the function was called, another err_t if not
*/
err_t
tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t *sem)
{
#if LWIP_TCPIP_CORE_LOCKING
LWIP_UNUSED_ARG(sem);
LOCK_TCPIP_CORE();
fn(apimsg);
UNLOCK_TCPIP_CORE();
return ERR_OK;
#else /* LWIP_TCPIP_CORE_LOCKING */
TCPIP_MSG_VAR_DECLARE(msg);
LWIP_ASSERT("semaphore not initialized", sys_sem_valid(sem));
LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox));
TCPIP_MSG_VAR_ALLOC(msg);
TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API;
TCPIP_MSG_VAR_REF(msg).msg.api_msg.function = fn;
TCPIP_MSG_VAR_REF(msg).msg.api_msg.msg = apimsg;
sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg));
sys_arch_sem_wait(sem, 0);
TCPIP_MSG_VAR_FREE(msg);
return ERR_OK;
#endif /* LWIP_TCPIP_CORE_LOCKING */
}
/**
* Synchronously calls function in TCPIP thread and waits for its completion.
* It is recommended to use LWIP_TCPIP_CORE_LOCKING (preferred) or
* LWIP_NETCONN_SEM_PER_THREAD.
* If not, a semaphore is created and destroyed on every call which is usually
* an expensive/slow operation.
* @param fn Function to call
* @param call Call parameters
* @return Return value from tcpip_api_call_fn
*/
err_t
tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call)
{
#if LWIP_TCPIP_CORE_LOCKING
err_t err;
LOCK_TCPIP_CORE();
err = fn(call);
UNLOCK_TCPIP_CORE();
return err;
#else /* LWIP_TCPIP_CORE_LOCKING */
TCPIP_MSG_VAR_DECLARE(msg);
#if !LWIP_NETCONN_SEM_PER_THREAD
err_t err = sys_sem_new(&call->sem, 0);
if (err != ERR_OK) {
return err;
}
#endif /* LWIP_NETCONN_SEM_PER_THREAD */
LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox));
TCPIP_MSG_VAR_ALLOC(msg);
TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API_CALL;
TCPIP_MSG_VAR_REF(msg).msg.api_call.arg = call;
TCPIP_MSG_VAR_REF(msg).msg.api_call.function = fn;
#if LWIP_NETCONN_SEM_PER_THREAD
TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = LWIP_NETCONN_THREAD_SEM_GET();
#else /* LWIP_NETCONN_SEM_PER_THREAD */
TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = &call->sem;
#endif /* LWIP_NETCONN_SEM_PER_THREAD */
sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg));
sys_arch_sem_wait(TCPIP_MSG_VAR_REF(msg).msg.api_call.sem, 0);
TCPIP_MSG_VAR_FREE(msg);
#if !LWIP_NETCONN_SEM_PER_THREAD
sys_sem_free(&call->sem);
#endif /* LWIP_NETCONN_SEM_PER_THREAD */
return call->err;
#endif /* LWIP_TCPIP_CORE_LOCKING */
}
/**
* @ingroup lwip_os
* Allocate a structure for a static callback message and initialize it.
* The message has a special type such that lwIP never frees it.
* This is intended to be used to send "static" messages from interrupt context,
* e.g. the message is allocated once and posted several times from an IRQ
* using tcpip_callbackmsg_trycallback().
* Example usage: Trigger execution of an ethernet IRQ DPC routine in lwIP thread context.
*
* @param function the function to call
* @param ctx parameter passed to function
* @return a struct pointer to pass to tcpip_callbackmsg_trycallback().
*
* @see tcpip_callbackmsg_trycallback()
* @see tcpip_callbackmsg_delete()
*/
struct tcpip_callback_msg *
tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx)
{
struct tcpip_msg *msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API);
if (msg == NULL) {
return NULL;
}
msg->type = TCPIP_MSG_CALLBACK_STATIC;
msg->msg.cb.function = function;
msg->msg.cb.ctx = ctx;
return (struct tcpip_callback_msg *)msg;
}
/**
* @ingroup lwip_os
* Free a callback message allocated by tcpip_callbackmsg_new().
*
* @param msg the message to free
*
* @see tcpip_callbackmsg_new()
*/
void
tcpip_callbackmsg_delete(struct tcpip_callback_msg *msg)
{
memp_free(MEMP_TCPIP_MSG_API, msg);
}
/**
* @ingroup lwip_os
* Try to post a callback-message to the tcpip_thread tcpip_mbox.
*
* @param msg pointer to the message to post
* @return sys_mbox_trypost() return code
*
* @see tcpip_callbackmsg_new()
*/
err_t
tcpip_callbackmsg_trycallback(struct tcpip_callback_msg *msg)
{
LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox));
return sys_mbox_trypost(&tcpip_mbox, msg);
}
/**
* @ingroup lwip_os
* Try to post a callback-message to the tcpip_thread mbox.
* Same as @ref tcpip_callbackmsg_trycallback but calls sys_mbox_trypost_fromisr(),
* mainly to help FreeRTOS, where calls differ between task level and ISR level.
*
* @param msg pointer to the message to post
* @return sys_mbox_trypost_fromisr() return code (without change, so this
* knowledge can be used to e.g. propagate "bool needs_scheduling")
*
* @see tcpip_callbackmsg_new()
*/
err_t
tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg *msg)
{
LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox));
return sys_mbox_trypost_fromisr(&tcpip_mbox, msg);
}
/**
* @ingroup lwip_os
* Initialize this module:
* - initialize all sub modules
* - start the tcpip_thread
*
* @param initfunc a function to call when tcpip_thread is running and finished initializing
* @param arg argument to pass to initfunc
*/
void
tcpip_init(tcpip_init_done_fn initfunc, void *arg)
{
lwip_init();
tcpip_init_done = initfunc;
tcpip_init_done_arg = arg;
if (sys_mbox_new(&tcpip_mbox, TCPIP_MBOX_SIZE) != ERR_OK) {
LWIP_ASSERT("failed to create tcpip_thread mbox", 0);
}
#if LWIP_TCPIP_CORE_LOCKING
if (sys_mutex_new(&lock_tcpip_core) != ERR_OK) {
LWIP_ASSERT("failed to create lock_tcpip_core", 0);
}
#endif /* LWIP_TCPIP_CORE_LOCKING */
sys_thread_new(TCPIP_THREAD_NAME, tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO);
}
/**
* Simple callback function used with tcpip_callback to free a pbuf
* (pbuf_free has a wrong signature for tcpip_callback)
*
* @param p The pbuf (chain) to be dereferenced.
*/
static void
pbuf_free_int(void *p)
{
struct pbuf *q = (struct pbuf *)p;
pbuf_free(q);
}
/**
* A simple wrapper function that allows you to free a pbuf from interrupt context.
*
* @param p The pbuf (chain) to be dereferenced.
* @return ERR_OK if callback could be enqueued, an err_t if not
*/
err_t
pbuf_free_callback(struct pbuf *p)
{
return tcpip_try_callback(pbuf_free_int, p);
}
/**
* A simple wrapper function that allows you to free heap memory from
* interrupt context.
*
* @param m the heap memory to free
* @return ERR_OK if callback could be enqueued, an err_t if not
*/
err_t
mem_free_callback(void *m)
{
return tcpip_try_callback(mem_free, m);
}
#endif /* !NO_SYS */

View File

@ -1,681 +0,0 @@
/**
* @file
* @defgroup altcp Application layered TCP Functions
* @ingroup altcp_api
*
* This file contains the common functions for altcp to work.
* For more details see @ref altcp_api.
*/
/**
* @defgroup altcp_api Application layered TCP Introduction
* @ingroup callbackstyle_api
*
* Overview
* --------
* altcp (application layered TCP connection API; to be used from TCPIP thread)
* is an abstraction layer that prevents applications linking hard against the
* @ref tcp.h functions while providing the same functionality. It is used to
* e.g. add SSL/TLS (see LWIP_ALTCP_TLS) or proxy-connect support to an application
* written for the tcp callback API without that application knowing the
* protocol details.
*
* * This interface mimics the tcp callback API to the application while preventing
* direct linking (much like virtual functions).
* * This way, an application can make use of other application layer protocols
* on top of TCP without knowing the details (e.g. TLS, proxy connection).
* * This is achieved by simply including "lwip/altcp.h" instead of "lwip/tcp.h",
* replacing "struct tcp_pcb" with "struct altcp_pcb" and prefixing all functions
* with "altcp_" instead of "tcp_".
*
* With altcp support disabled (LWIP_ALTCP==0), applications written against the
* altcp API can still be compiled but are directly linked against the tcp.h
* callback API and then cannot use layered protocols. To minimize code changes
* in this case, the use of altcp_allocators is strongly suggested.
*
* Usage
* -----
* To make use of this API from an existing tcp raw API application:
* * Include "lwip/altcp.h" instead of "lwip/tcp.h"
* * Replace "struct tcp_pcb" with "struct altcp_pcb"
* * Prefix all called tcp API functions with "altcp_" instead of "tcp_" to link
* against the altcp functions
* * @ref altcp_new (and @ref altcp_new_ip_type/@ref altcp_new_ip6) take
* an @ref altcp_allocator_t as an argument, whereas the original tcp API
* functions take no arguments.
* * An @ref altcp_allocator_t allocator is an object that holds a pointer to an
* allocator object and a corresponding state (e.g. for TLS, the corresponding
* state may hold certificates or keys). This way, the application does not
* even need to know if it uses TLS or pure TCP, this is handled at runtime
* by passing a specific allocator.
* * An application can alternatively bind hard to the altcp_tls API by calling
* @ref altcp_tls_new or @ref altcp_tls_wrap.
* * The TLS layer is not directly implemented by lwIP, but a port to mbedTLS is
* provided.
* * Another altcp layer is proxy-connect to use TLS behind a HTTP proxy (see
* @ref altcp_proxyconnect.h)
*
* altcp_allocator_t
* -----------------
* An altcp allocator is created by the application by combining an allocator
* callback function and a corresponding state, e.g.:\code{.c}
* static const unsigned char cert[] = {0x2D, ... (see mbedTLS doc for how to create this)};
* struct altcp_tls_config * conf = altcp_tls_create_config_client(cert, sizeof(cert));
* altcp_allocator_t tls_allocator = {
* altcp_tls_alloc, conf
* };
* \endcode
*
*
* struct altcp_tls_config
* -----------------------
* The struct altcp_tls_config holds state that is needed to create new TLS client
* or server connections (e.g. certificates and private keys).
*
* It is not defined by lwIP itself but by the TLS port (e.g. altcp_tls to mbedTLS
* adaption). However, the parameters used to create it are defined in @ref
* altcp_tls.h (see @ref altcp_tls_create_config_server_privkey_cert for servers
* and @ref altcp_tls_create_config_client/@ref altcp_tls_create_config_client_2wayauth
* for clients).
*
* For mbedTLS, ensure that certificates can be parsed by 'mbedtls_x509_crt_parse()' and
* private keys can be parsed by 'mbedtls_pk_parse_key()'.
*/
/*
* Copyright (c) 2017 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*
*/
#include "lwip/opt.h"
#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
#include "lwip/altcp.h"
#include "lwip/priv/altcp_priv.h"
#include "lwip/altcp_tcp.h"
#include "lwip/tcp.h"
#include "lwip/mem.h"
#include <string.h>
extern const struct altcp_functions altcp_tcp_functions;
/**
* For altcp layer implementations only: allocate a new struct altcp_pcb from the pool
* and zero the memory
*/
struct altcp_pcb *
altcp_alloc(void)
{
struct altcp_pcb *ret = (struct altcp_pcb *)memp_malloc(MEMP_ALTCP_PCB);
if (ret != NULL) {
memset(ret, 0, sizeof(struct altcp_pcb));
}
return ret;
}
/**
* For altcp layer implementations only: return a struct altcp_pcb to the pool
*/
void
altcp_free(struct altcp_pcb *conn)
{
if (conn) {
if (conn->fns && conn->fns->dealloc) {
conn->fns->dealloc(conn);
}
memp_free(MEMP_ALTCP_PCB, conn);
}
}
/**
* @ingroup altcp
* altcp_new_ip6: @ref altcp_new for IPv6
*/
struct altcp_pcb *
altcp_new_ip6(altcp_allocator_t *allocator)
{
return altcp_new_ip_type(allocator, IPADDR_TYPE_V6);
}
/**
* @ingroup altcp
* altcp_new: @ref altcp_new for IPv4
*/
struct altcp_pcb *
altcp_new(altcp_allocator_t *allocator)
{
return altcp_new_ip_type(allocator, IPADDR_TYPE_V4);
}
/**
* @ingroup altcp
* altcp_new_ip_type: called by applications to allocate a new pcb with the help of an
* allocator function.
*
* @param allocator allocator function and argument
* @param ip_type IP version of the pcb (@ref lwip_ip_addr_type)
* @return a new altcp_pcb or NULL on error
*/
struct altcp_pcb *
altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type)
{
struct altcp_pcb *conn;
if (allocator == NULL) {
/* no allocator given, create a simple TCP connection */
return altcp_tcp_new_ip_type(ip_type);
}
if (allocator->alloc == NULL) {
/* illegal allocator */
return NULL;
}
conn = allocator->alloc(allocator->arg, ip_type);
if (conn == NULL) {
/* allocation failed */
return NULL;
}
return conn;
}
/**
* @ingroup altcp
* @see tcp_arg()
*/
void
altcp_arg(struct altcp_pcb *conn, void *arg)
{
if (conn) {
conn->arg = arg;
}
}
/**
* @ingroup altcp
* @see tcp_accept()
*/
void
altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept)
{
if (conn != NULL) {
conn->accept = accept;
}
}
/**
* @ingroup altcp
* @see tcp_recv()
*/
void
altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv)
{
if (conn) {
conn->recv = recv;
}
}
/**
* @ingroup altcp
* @see tcp_sent()
*/
void
altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent)
{
if (conn) {
conn->sent = sent;
}
}
/**
* @ingroup altcp
* @see tcp_poll()
*/
void
altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval)
{
if (conn) {
conn->poll = poll;
conn->pollinterval = interval;
if (conn->fns && conn->fns->set_poll) {
conn->fns->set_poll(conn, interval);
}
}
}
/**
* @ingroup altcp
* @see tcp_err()
*/
void
altcp_err(struct altcp_pcb *conn, altcp_err_fn err)
{
if (conn) {
conn->err = err;
}
}
/* Generic functions calling the "virtual" ones */
/**
* @ingroup altcp
* @see tcp_recved()
*/
void
altcp_recved(struct altcp_pcb *conn, u16_t len)
{
if (conn && conn->fns && conn->fns->recved) {
conn->fns->recved(conn, len);
}
}
/**
* @ingroup altcp
* @see tcp_bind()
*/
err_t
altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port)
{
if (conn && conn->fns && conn->fns->bind) {
return conn->fns->bind(conn, ipaddr, port);
}
return ERR_VAL;
}
/**
* @ingroup altcp
* @see tcp_connect()
*/
err_t
altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected)
{
if (conn && conn->fns && conn->fns->connect) {
return conn->fns->connect(conn, ipaddr, port, connected);
}
return ERR_VAL;
}
/**
* @ingroup altcp
* @see tcp_listen_with_backlog_and_err()
*/
struct altcp_pcb *
altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err)
{
if (conn && conn->fns && conn->fns->listen) {
return conn->fns->listen(conn, backlog, err);
}
return NULL;
}
/**
* @ingroup altcp
* @see tcp_abort()
*/
void
altcp_abort(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->abort) {
conn->fns->abort(conn);
}
}
/**
* @ingroup altcp
* @see tcp_close()
*/
err_t
altcp_close(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->close) {
return conn->fns->close(conn);
}
return ERR_VAL;
}
/**
* @ingroup altcp
* @see tcp_shutdown()
*/
err_t
altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx)
{
if (conn && conn->fns && conn->fns->shutdown) {
return conn->fns->shutdown(conn, shut_rx, shut_tx);
}
return ERR_VAL;
}
/**
* @ingroup altcp
* @see tcp_write()
*/
err_t
altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags)
{
if (conn && conn->fns && conn->fns->write) {
return conn->fns->write(conn, dataptr, len, apiflags);
}
return ERR_VAL;
}
/**
* @ingroup altcp
* @see tcp_output()
*/
err_t
altcp_output(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->output) {
return conn->fns->output(conn);
}
return ERR_VAL;
}
/**
* @ingroup altcp
* @see tcp_mss()
*/
u16_t
altcp_mss(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->mss) {
return conn->fns->mss(conn);
}
return 0;
}
/**
* @ingroup altcp
* @see tcp_sndbuf()
*/
u16_t
altcp_sndbuf(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->sndbuf) {
return conn->fns->sndbuf(conn);
}
return 0;
}
/**
* @ingroup altcp
* @see tcp_sndqueuelen()
*/
u16_t
altcp_sndqueuelen(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->sndqueuelen) {
return conn->fns->sndqueuelen(conn);
}
return 0;
}
void
altcp_nagle_disable(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->nagle_disable) {
conn->fns->nagle_disable(conn);
}
}
void
altcp_nagle_enable(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->nagle_enable) {
conn->fns->nagle_enable(conn);
}
}
int
altcp_nagle_disabled(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->nagle_disabled) {
return conn->fns->nagle_disabled(conn);
}
return 0;
}
/**
* @ingroup altcp
* @see tcp_setprio()
*/
void
altcp_setprio(struct altcp_pcb *conn, u8_t prio)
{
if (conn && conn->fns && conn->fns->setprio) {
conn->fns->setprio(conn, prio);
}
}
err_t
altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port)
{
if (conn && conn->fns && conn->fns->addrinfo) {
return conn->fns->addrinfo(conn, local, addr, port);
}
return ERR_VAL;
}
ip_addr_t *
altcp_get_ip(struct altcp_pcb *conn, int local)
{
if (conn && conn->fns && conn->fns->getip) {
return conn->fns->getip(conn, local);
}
return NULL;
}
u16_t
altcp_get_port(struct altcp_pcb *conn, int local)
{
if (conn && conn->fns && conn->fns->getport) {
return conn->fns->getport(conn, local);
}
return 0;
}
#ifdef LWIP_DEBUG
enum tcp_state
altcp_dbg_get_tcp_state(struct altcp_pcb *conn)
{
if (conn && conn->fns && conn->fns->dbg_get_tcp_state) {
return conn->fns->dbg_get_tcp_state(conn);
}
return CLOSED;
}
#endif
/* Default implementations for the "virtual" functions */
void
altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval)
{
if (conn && conn->inner_conn) {
altcp_poll(conn->inner_conn, conn->poll, interval);
}
}
void
altcp_default_recved(struct altcp_pcb *conn, u16_t len)
{
if (conn && conn->inner_conn) {
altcp_recved(conn->inner_conn, len);
}
}
err_t
altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port)
{
if (conn && conn->inner_conn) {
return altcp_bind(conn->inner_conn, ipaddr, port);
}
return ERR_VAL;
}
err_t
altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx)
{
if (conn) {
if (shut_rx && shut_tx && conn->fns && conn->fns->close) {
/* default shutdown for both sides is close */
return conn->fns->close(conn);
}
if (conn->inner_conn) {
return altcp_shutdown(conn->inner_conn, shut_rx, shut_tx);
}
}
return ERR_VAL;
}
err_t
altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags)
{
if (conn && conn->inner_conn) {
return altcp_write(conn->inner_conn, dataptr, len, apiflags);
}
return ERR_VAL;
}
err_t
altcp_default_output(struct altcp_pcb *conn)
{
if (conn && conn->inner_conn) {
return altcp_output(conn->inner_conn);
}
return ERR_VAL;
}
u16_t
altcp_default_mss(struct altcp_pcb *conn)
{
if (conn && conn->inner_conn) {
return altcp_mss(conn->inner_conn);
}
return 0;
}
u16_t
altcp_default_sndbuf(struct altcp_pcb *conn)
{
if (conn && conn->inner_conn) {
return altcp_sndbuf(conn->inner_conn);
}
return 0;
}
u16_t
altcp_default_sndqueuelen(struct altcp_pcb *conn)
{
if (conn && conn->inner_conn) {
return altcp_sndqueuelen(conn->inner_conn);
}
return 0;
}
void
altcp_default_nagle_disable(struct altcp_pcb *conn)
{
if (conn && conn->inner_conn) {
altcp_nagle_disable(conn->inner_conn);
}
}
void
altcp_default_nagle_enable(struct altcp_pcb *conn)
{
if (conn && conn->inner_conn) {
altcp_nagle_enable(conn->inner_conn);
}
}
int
altcp_default_nagle_disabled(struct altcp_pcb *conn)
{
if (conn && conn->inner_conn) {
return altcp_nagle_disabled(conn->inner_conn);
}
return 0;
}
void
altcp_default_setprio(struct altcp_pcb *conn, u8_t prio)
{
if (conn && conn->inner_conn) {
altcp_setprio(conn->inner_conn, prio);
}
}
void
altcp_default_dealloc(struct altcp_pcb *conn)
{
LWIP_UNUSED_ARG(conn);
/* nothing to do */
}
err_t
altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port)
{
if (conn && conn->inner_conn) {
return altcp_get_tcp_addrinfo(conn->inner_conn, local, addr, port);
}
return ERR_VAL;
}
ip_addr_t *
altcp_default_get_ip(struct altcp_pcb *conn, int local)
{
if (conn && conn->inner_conn) {
return altcp_get_ip(conn->inner_conn, local);
}
return NULL;
}
u16_t
altcp_default_get_port(struct altcp_pcb *conn, int local)
{
if (conn && conn->inner_conn) {
return altcp_get_port(conn->inner_conn, local);
}
return 0;
}
#ifdef LWIP_DEBUG
enum tcp_state
altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn)
{
if (conn && conn->inner_conn) {
return altcp_dbg_get_tcp_state(conn->inner_conn);
}
return CLOSED;
}
#endif
#endif /* LWIP_ALTCP */

View File

@ -1,87 +0,0 @@
/**
* @file
* Application layered TCP connection API (to be used from TCPIP thread)\n
* This interface mimics the tcp callback API to the application while preventing
* direct linking (much like virtual functions).
* This way, an application can make use of other application layer protocols
* on top of TCP without knowing the details (e.g. TLS, proxy connection).
*
* This file contains allocation implementation that combine several layers.
*/
/*
* Copyright (c) 2017 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*
*/
#include "lwip/opt.h"
#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
#include "lwip/altcp.h"
#include "lwip/altcp_tcp.h"
#include "lwip/altcp_tls.h"
#include "lwip/priv/altcp_priv.h"
#include "lwip/mem.h"
#include <string.h>
#if LWIP_ALTCP_TLS
/** This standard allocator function creates an altcp pcb for
* TLS over TCP */
struct altcp_pcb *
altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type)
{
struct altcp_pcb *inner_conn, *ret;
LWIP_UNUSED_ARG(ip_type);
inner_conn = altcp_tcp_new_ip_type(ip_type);
if (inner_conn == NULL) {
return NULL;
}
ret = altcp_tls_wrap(config, inner_conn);
if (ret == NULL) {
altcp_close(inner_conn);
}
return ret;
}
/** This standard allocator function creates an altcp pcb for
* TLS over TCP */
struct altcp_pcb *
altcp_tls_alloc(void *arg, u8_t ip_type)
{
return altcp_tls_new((struct altcp_tls_config *)arg, ip_type);
}
#endif /* LWIP_ALTCP_TLS */
#endif /* LWIP_ALTCP */

View File

@ -1,543 +0,0 @@
/**
* @file
* Application layered TCP connection API (to be used from TCPIP thread)\n
* This interface mimics the tcp callback API to the application while preventing
* direct linking (much like virtual functions).
* This way, an application can make use of other application layer protocols
* on top of TCP without knowing the details (e.g. TLS, proxy connection).
*
* This file contains the base implementation calling into tcp.
*/
/*
* Copyright (c) 2017 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*
*/
#include "lwip/opt.h"
#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
#include "lwip/altcp.h"
#include "lwip/altcp_tcp.h"
#include "lwip/priv/altcp_priv.h"
#include "lwip/tcp.h"
#include "lwip/mem.h"
#include <string.h>
#define ALTCP_TCP_ASSERT_CONN(conn) do { \
LWIP_ASSERT("conn->inner_conn == NULL", (conn)->inner_conn == NULL); \
LWIP_UNUSED_ARG(conn); /* for LWIP_NOASSERT */ } while(0)
#define ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb) do { \
LWIP_ASSERT("pcb mismatch", (conn)->state == tpcb); \
LWIP_UNUSED_ARG(tpcb); /* for LWIP_NOASSERT */ \
ALTCP_TCP_ASSERT_CONN(conn); } while(0)
/* Variable prototype, the actual declaration is at the end of this file
since it contains pointers to static functions declared here */
extern const struct altcp_functions altcp_tcp_functions;
static void altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb);
/* callback functions for TCP */
static err_t
altcp_tcp_accept(void *arg, struct tcp_pcb *new_tpcb, err_t err)
{
struct altcp_pcb *listen_conn = (struct altcp_pcb *)arg;
if (listen_conn && listen_conn->accept) {
/* create a new altcp_conn to pass to the next 'accept' callback */
struct altcp_pcb *new_conn = altcp_alloc();
if (new_conn == NULL) {
return ERR_MEM;
}
altcp_tcp_setup(new_conn, new_tpcb);
return listen_conn->accept(listen_conn->arg, new_conn, err);
}
return ERR_ARG;
}
static err_t
altcp_tcp_connected(void *arg, struct tcp_pcb *tpcb, err_t err)
{
struct altcp_pcb *conn = (struct altcp_pcb *)arg;
if (conn) {
ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb);
if (conn->connected) {
return conn->connected(conn->arg, conn, err);
}
}
return ERR_OK;
}
static err_t
altcp_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err)
{
struct altcp_pcb *conn = (struct altcp_pcb *)arg;
if (conn) {
ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb);
if (conn->recv) {
return conn->recv(conn->arg, conn, p, err);
}
}
if (p != NULL) {
/* prevent memory leaks */
pbuf_free(p);
}
return ERR_OK;
}
static err_t
altcp_tcp_sent(void *arg, struct tcp_pcb *tpcb, u16_t len)
{
struct altcp_pcb *conn = (struct altcp_pcb *)arg;
if (conn) {
ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb);
if (conn->sent) {
return conn->sent(conn->arg, conn, len);
}
}
return ERR_OK;
}
static err_t
altcp_tcp_poll(void *arg, struct tcp_pcb *tpcb)
{
struct altcp_pcb *conn = (struct altcp_pcb *)arg;
if (conn) {
ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb);
if (conn->poll) {
return conn->poll(conn->arg, conn);
}
}
return ERR_OK;
}
static void
altcp_tcp_err(void *arg, err_t err)
{
struct altcp_pcb *conn = (struct altcp_pcb *)arg;
if (conn) {
conn->state = NULL; /* already freed */
if (conn->err) {
conn->err(conn->arg, err);
}
altcp_free(conn);
}
}
/* setup functions */
static void
altcp_tcp_remove_callbacks(struct tcp_pcb *tpcb)
{
tcp_arg(tpcb, NULL);
tcp_recv(tpcb, NULL);
tcp_sent(tpcb, NULL);
tcp_err(tpcb, NULL);
tcp_poll(tpcb, NULL, tpcb->pollinterval);
}
static void
altcp_tcp_setup_callbacks(struct altcp_pcb *conn, struct tcp_pcb *tpcb)
{
tcp_arg(tpcb, conn);
tcp_recv(tpcb, altcp_tcp_recv);
tcp_sent(tpcb, altcp_tcp_sent);
tcp_err(tpcb, altcp_tcp_err);
/* tcp_poll is set when interval is set by application */
/* listen is set totally different :-) */
}
static void
altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb)
{
altcp_tcp_setup_callbacks(conn, tpcb);
conn->state = tpcb;
conn->fns = &altcp_tcp_functions;
}
struct altcp_pcb *
altcp_tcp_new_ip_type(u8_t ip_type)
{
/* Allocate the tcp pcb first to invoke the priority handling code
if we're out of pcbs */
struct tcp_pcb *tpcb = tcp_new_ip_type(ip_type);
if (tpcb != NULL) {
struct altcp_pcb *ret = altcp_alloc();
if (ret != NULL) {
altcp_tcp_setup(ret, tpcb);
return ret;
} else {
/* altcp_pcb allocation failed -> free the tcp_pcb too */
tcp_close(tpcb);
}
}
return NULL;
}
/** altcp_tcp allocator function fitting to @ref altcp_allocator_t / @ref altcp_new.
*
* arg pointer is not used for TCP.
*/
struct altcp_pcb *
altcp_tcp_alloc(void *arg, u8_t ip_type)
{
LWIP_UNUSED_ARG(arg);
return altcp_tcp_new_ip_type(ip_type);
}
struct altcp_pcb *
altcp_tcp_wrap(struct tcp_pcb *tpcb)
{
if (tpcb != NULL) {
struct altcp_pcb *ret = altcp_alloc();
if (ret != NULL) {
altcp_tcp_setup(ret, tpcb);
return ret;
}
}
return NULL;
}
/* "virtual" functions calling into tcp */
static void
altcp_tcp_set_poll(struct altcp_pcb *conn, u8_t interval)
{
if (conn != NULL) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
tcp_poll(pcb, altcp_tcp_poll, interval);
}
}
static void
altcp_tcp_recved(struct altcp_pcb *conn, u16_t len)
{
if (conn != NULL) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
tcp_recved(pcb, len);
}
}
static err_t
altcp_tcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port)
{
struct tcp_pcb *pcb;
if (conn == NULL) {
return ERR_VAL;
}
ALTCP_TCP_ASSERT_CONN(conn);
pcb = (struct tcp_pcb *)conn->state;
return tcp_bind(pcb, ipaddr, port);
}
static err_t
altcp_tcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected)
{
struct tcp_pcb *pcb;
if (conn == NULL) {
return ERR_VAL;
}
ALTCP_TCP_ASSERT_CONN(conn);
conn->connected = connected;
pcb = (struct tcp_pcb *)conn->state;
return tcp_connect(pcb, ipaddr, port, altcp_tcp_connected);
}
static struct altcp_pcb *
altcp_tcp_listen(struct altcp_pcb *conn, u8_t backlog, err_t *err)
{
struct tcp_pcb *pcb;
struct tcp_pcb *lpcb;
if (conn == NULL) {
return NULL;
}
ALTCP_TCP_ASSERT_CONN(conn);
pcb = (struct tcp_pcb *)conn->state;
lpcb = tcp_listen_with_backlog_and_err(pcb, backlog, err);
if (lpcb != NULL) {
conn->state = lpcb;
tcp_accept(lpcb, altcp_tcp_accept);
return conn;
}
return NULL;
}
static void
altcp_tcp_abort(struct altcp_pcb *conn)
{
if (conn != NULL) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
if (pcb) {
tcp_abort(pcb);
}
}
}
static err_t
altcp_tcp_close(struct altcp_pcb *conn)
{
struct tcp_pcb *pcb;
if (conn == NULL) {
return ERR_VAL;
}
ALTCP_TCP_ASSERT_CONN(conn);
pcb = (struct tcp_pcb *)conn->state;
if (pcb) {
err_t err;
tcp_poll_fn oldpoll = pcb->poll;
altcp_tcp_remove_callbacks(pcb);
err = tcp_close(pcb);
if (err != ERR_OK) {
/* not closed, set up all callbacks again */
altcp_tcp_setup_callbacks(conn, pcb);
/* poll callback is not included in the above */
tcp_poll(pcb, oldpoll, pcb->pollinterval);
return err;
}
conn->state = NULL; /* unsafe to reference pcb after tcp_close(). */
}
altcp_free(conn);
return ERR_OK;
}
static err_t
altcp_tcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx)
{
struct tcp_pcb *pcb;
if (conn == NULL) {
return ERR_VAL;
}
ALTCP_TCP_ASSERT_CONN(conn);
pcb = (struct tcp_pcb *)conn->state;
return tcp_shutdown(pcb, shut_rx, shut_tx);
}
static err_t
altcp_tcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags)
{
struct tcp_pcb *pcb;
if (conn == NULL) {
return ERR_VAL;
}
ALTCP_TCP_ASSERT_CONN(conn);
pcb = (struct tcp_pcb *)conn->state;
return tcp_write(pcb, dataptr, len, apiflags);
}
static err_t
altcp_tcp_output(struct altcp_pcb *conn)
{
struct tcp_pcb *pcb;
if (conn == NULL) {
return ERR_VAL;
}
ALTCP_TCP_ASSERT_CONN(conn);
pcb = (struct tcp_pcb *)conn->state;
return tcp_output(pcb);
}
static u16_t
altcp_tcp_mss(struct altcp_pcb *conn)
{
struct tcp_pcb *pcb;
if (conn == NULL) {
return 0;
}
ALTCP_TCP_ASSERT_CONN(conn);
pcb = (struct tcp_pcb *)conn->state;
return tcp_mss(pcb);
}
static u16_t
altcp_tcp_sndbuf(struct altcp_pcb *conn)
{
struct tcp_pcb *pcb;
if (conn == NULL) {
return 0;
}
ALTCP_TCP_ASSERT_CONN(conn);
pcb = (struct tcp_pcb *)conn->state;
return tcp_sndbuf(pcb);
}
static u16_t
altcp_tcp_sndqueuelen(struct altcp_pcb *conn)
{
struct tcp_pcb *pcb;
if (conn == NULL) {
return 0;
}
ALTCP_TCP_ASSERT_CONN(conn);
pcb = (struct tcp_pcb *)conn->state;
return tcp_sndqueuelen(pcb);
}
static void
altcp_tcp_nagle_disable(struct altcp_pcb *conn)
{
if (conn && conn->state) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
tcp_nagle_disable(pcb);
}
}
static void
altcp_tcp_nagle_enable(struct altcp_pcb *conn)
{
if (conn && conn->state) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
tcp_nagle_enable(pcb);
}
}
static int
altcp_tcp_nagle_disabled(struct altcp_pcb *conn)
{
if (conn && conn->state) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
return tcp_nagle_disabled(pcb);
}
return 0;
}
static void
altcp_tcp_setprio(struct altcp_pcb *conn, u8_t prio)
{
if (conn != NULL) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
tcp_setprio(pcb, prio);
}
}
static void
altcp_tcp_dealloc(struct altcp_pcb *conn)
{
LWIP_UNUSED_ARG(conn);
ALTCP_TCP_ASSERT_CONN(conn);
/* no private state to clean up */
}
static err_t
altcp_tcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port)
{
if (conn) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
return tcp_tcp_get_tcp_addrinfo(pcb, local, addr, port);
}
return ERR_VAL;
}
static ip_addr_t *
altcp_tcp_get_ip(struct altcp_pcb *conn, int local)
{
if (conn) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
if (pcb) {
if (local) {
return &pcb->local_ip;
} else {
return &pcb->remote_ip;
}
}
}
return NULL;
}
static u16_t
altcp_tcp_get_port(struct altcp_pcb *conn, int local)
{
if (conn) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
if (pcb) {
if (local) {
return pcb->local_port;
} else {
return pcb->remote_port;
}
}
}
return 0;
}
#ifdef LWIP_DEBUG
static enum tcp_state
altcp_tcp_dbg_get_tcp_state(struct altcp_pcb *conn)
{
if (conn) {
struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state;
ALTCP_TCP_ASSERT_CONN(conn);
if (pcb) {
return pcb->state;
}
}
return CLOSED;
}
#endif
const struct altcp_functions altcp_tcp_functions = {
altcp_tcp_set_poll,
altcp_tcp_recved,
altcp_tcp_bind,
altcp_tcp_connect,
altcp_tcp_listen,
altcp_tcp_abort,
altcp_tcp_close,
altcp_tcp_shutdown,
altcp_tcp_write,
altcp_tcp_output,
altcp_tcp_mss,
altcp_tcp_sndbuf,
altcp_tcp_sndqueuelen,
altcp_tcp_nagle_disable,
altcp_tcp_nagle_enable,
altcp_tcp_nagle_disabled,
altcp_tcp_setprio,
altcp_tcp_dealloc,
altcp_tcp_get_tcp_addrinfo,
altcp_tcp_get_ip,
altcp_tcp_get_port
#ifdef LWIP_DEBUG
, altcp_tcp_dbg_get_tcp_state
#endif
};
#endif /* LWIP_ALTCP */

View File

@ -1,240 +0,0 @@
/**
* @file
* Common functions used throughout the stack.
*
* These are reference implementations of the byte swapping functions.
* Again with the aim of being simple, correct and fully portable.
* Byte swapping is the second thing you would want to optimize. You will
* need to port it to your architecture and in your cc.h:
*
* \#define lwip_htons(x) your_htons
* \#define lwip_htonl(x) your_htonl
*
* Note lwip_ntohs() and lwip_ntohl() are merely references to the htonx counterparts.
*
* If you \#define them to htons() and htonl(), you should
* \#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS to prevent lwIP from
* defining htonx/ntohx compatibility macros.
* @defgroup sys_nonstandard Non-standard functions
* @ingroup sys_layer
* lwIP provides default implementations for non-standard functions.
* These can be mapped to OS functions to reduce code footprint if desired.
* All defines related to this section must not be placed in lwipopts.h,
* but in arch/cc.h!
* These options cannot be \#defined in lwipopts.h since they are not options
* of lwIP itself, but options of the lwIP port to your system.
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt
*
*/
#include "lwip/opt.h"
#include "lwip/def.h"
#include <string.h>
#if BYTE_ORDER == LITTLE_ENDIAN
#if !defined(lwip_htons)
/**
* Convert an u16_t from host- to network byte order.
*
* @param n u16_t in host byte order
* @return n in network byte order
*/
u16_t
lwip_htons(u16_t n)
{
return PP_HTONS(n);
}
#endif /* lwip_htons */
#if !defined(lwip_htonl)
/**
* Convert an u32_t from host- to network byte order.
*
* @param n u32_t in host byte order
* @return n in network byte order
*/
u32_t
lwip_htonl(u32_t n)
{
return PP_HTONL(n);
}
#endif /* lwip_htonl */
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
#ifndef lwip_strnstr
/**
* @ingroup sys_nonstandard
* lwIP default implementation for strnstr() non-standard function.
* This can be \#defined to strnstr() depending on your platform port.
*/
char *
lwip_strnstr(const char *buffer, const char *token, size_t n)
{
const char *p;
size_t tokenlen = strlen(token);
if (tokenlen == 0) {
return LWIP_CONST_CAST(char *, buffer);
}
for (p = buffer; *p && (p + tokenlen <= buffer + n); p++) {
if ((*p == *token) && (strncmp(p, token, tokenlen) == 0)) {
return LWIP_CONST_CAST(char *, p);
}
}
return NULL;
}
#endif
#ifndef lwip_stricmp
/**
* @ingroup sys_nonstandard
* lwIP default implementation for stricmp() non-standard function.
* This can be \#defined to stricmp() depending on your platform port.
*/
int
lwip_stricmp(const char *str1, const char *str2)
{
char c1, c2;
do {
c1 = *str1++;
c2 = *str2++;
if (c1 != c2) {
char c1_upc = c1 | 0x20;
if ((c1_upc >= 'a') && (c1_upc <= 'z')) {
/* characters are not equal an one is in the alphabet range:
downcase both chars and check again */
char c2_upc = c2 | 0x20;
if (c1_upc != c2_upc) {
/* still not equal */
/* don't care for < or > */
return 1;
}
} else {
/* characters are not equal but none is in the alphabet range */
return 1;
}
}
} while (c1 != 0);
return 0;
}
#endif
#ifndef lwip_strnicmp
/**
* @ingroup sys_nonstandard
* lwIP default implementation for strnicmp() non-standard function.
* This can be \#defined to strnicmp() depending on your platform port.
*/
int
lwip_strnicmp(const char *str1, const char *str2, size_t len)
{
char c1, c2;
do {
c1 = *str1++;
c2 = *str2++;
if (c1 != c2) {
char c1_upc = c1 | 0x20;
if ((c1_upc >= 'a') && (c1_upc <= 'z')) {
/* characters are not equal an one is in the alphabet range:
downcase both chars and check again */
char c2_upc = c2 | 0x20;
if (c1_upc != c2_upc) {
/* still not equal */
/* don't care for < or > */
return 1;
}
} else {
/* characters are not equal but none is in the alphabet range */
return 1;
}
}
len--;
} while ((len != 0) && (c1 != 0));
return 0;
}
#endif
#ifndef lwip_itoa
/**
* @ingroup sys_nonstandard
* lwIP default implementation for itoa() non-standard function.
* This can be \#defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform port.
*/
void
lwip_itoa(char *result, size_t bufsize, int number)
{
char *res = result;
char *tmp = result + bufsize - 1;
int n = (number >= 0) ? number : -number;
/* handle invalid bufsize */
if (bufsize < 2) {
if (bufsize == 1) {
*result = 0;
}
return;
}
/* First, add sign */
if (number < 0) {
*res++ = '-';
}
/* Then create the string from the end and stop if buffer full,
and ensure output string is zero terminated */
*tmp = 0;
while ((n != 0) && (tmp > res)) {
char val = (char)('0' + (n % 10));
tmp--;
*tmp = val;
n = n / 10;
}
if (n) {
/* buffer is too small */
*result = 0;
return;
}
if (*tmp == 0) {
/* Nothing added? */
*res++ = '0';
*res++ = 0;
return;
}
/* move from temporary buffer to output buffer (sign is not moved) */
memmove(res, tmp, (size_t)((result + bufsize) - tmp));
}
#endif

View File

@ -1,608 +0,0 @@
/**
* @file
* Internet checksum functions.\n
*
* These are some reference implementations of the checksum algorithm, with the
* aim of being simple, correct and fully portable. Checksumming is the
* first thing you would want to optimize for your platform. If you create
* your own version, link it in and in your cc.h put:
*
* \#define LWIP_CHKSUM your_checksum_routine
*
* Or you can select from the implementations below by defining
* LWIP_CHKSUM_ALGORITHM to 1, 2 or 3.
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#include "lwip/inet_chksum.h"
#include "lwip/def.h"
#include "lwip/ip_addr.h"
#include <string.h>
#ifndef LWIP_CHKSUM
# define LWIP_CHKSUM lwip_standard_chksum
# ifndef LWIP_CHKSUM_ALGORITHM
# define LWIP_CHKSUM_ALGORITHM 2
# endif
u16_t lwip_standard_chksum(const void *dataptr, int len);
#endif
/* If none set: */
#ifndef LWIP_CHKSUM_ALGORITHM
# define LWIP_CHKSUM_ALGORITHM 0
#endif
#if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */
/**
* lwip checksum
*
* @param dataptr points to start of data to be summed at any boundary
* @param len length of data to be summed
* @return host order (!) lwip checksum (non-inverted Internet sum)
*
* @note accumulator size limits summable length to 64k
* @note host endianess is irrelevant (p3 RFC1071)
*/
u16_t
lwip_standard_chksum(const void *dataptr, int len)
{
u32_t acc;
u16_t src;
const u8_t *octetptr;
acc = 0;
/* dataptr may be at odd or even addresses */
octetptr = (const u8_t *)dataptr;
while (len > 1) {
/* declare first octet as most significant
thus assume network order, ignoring host order */
src = (*octetptr) << 8;
octetptr++;
/* declare second octet as least significant */
src |= (*octetptr);
octetptr++;
acc += src;
len -= 2;
}
if (len > 0) {
/* accumulate remaining octet */
src = (*octetptr) << 8;
acc += src;
}
/* add deferred carry bits */
acc = (acc >> 16) + (acc & 0x0000ffffUL);
if ((acc & 0xffff0000UL) != 0) {
acc = (acc >> 16) + (acc & 0x0000ffffUL);
}
/* This maybe a little confusing: reorder sum using lwip_htons()
instead of lwip_ntohs() since it has a little less call overhead.
The caller must invert bits for Internet sum ! */
return lwip_htons((u16_t)acc);
}
#endif
#if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */
/*
* Curt McDowell
* Broadcom Corp.
* csm@broadcom.com
*
* IP checksum two bytes at a time with support for
* unaligned buffer.
* Works for len up to and including 0x20000.
* by Curt McDowell, Broadcom Corp. 12/08/2005
*
* @param dataptr points to start of data to be summed at any boundary
* @param len length of data to be summed
* @return host order (!) lwip checksum (non-inverted Internet sum)
*/
u16_t
lwip_standard_chksum(const void *dataptr, int len)
{
const u8_t *pb = (const u8_t *)dataptr;
const u16_t *ps;
u16_t t = 0;
u32_t sum = 0;
int odd = ((mem_ptr_t)pb & 1);
/* Get aligned to u16_t */
if (odd && len > 0) {
((u8_t *)&t)[1] = *pb++;
len--;
}
/* Add the bulk of the data */
ps = (const u16_t *)(const void *)pb;
while (len > 1) {
sum += *ps++;
len -= 2;
}
/* Consume left-over byte, if any */
if (len > 0) {
((u8_t *)&t)[0] = *(const u8_t *)ps;
}
/* Add end bytes */
sum += t;
/* Fold 32-bit sum to 16 bits
calling this twice is probably faster than if statements... */
sum = FOLD_U32T(sum);
sum = FOLD_U32T(sum);
/* Swap if alignment was odd */
if (odd) {
sum = SWAP_BYTES_IN_WORD(sum);
}
return (u16_t)sum;
}
#endif
#if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */
/**
* An optimized checksum routine. Basically, it uses loop-unrolling on
* the checksum loop, treating the head and tail bytes specially, whereas
* the inner loop acts on 8 bytes at a time.
*
* @arg start of buffer to be checksummed. May be an odd byte address.
* @len number of bytes in the buffer to be checksummed.
* @return host order (!) lwip checksum (non-inverted Internet sum)
*
* by Curt McDowell, Broadcom Corp. December 8th, 2005
*/
u16_t
lwip_standard_chksum(const void *dataptr, int len)
{
const u8_t *pb = (const u8_t *)dataptr;
const u16_t *ps;
u16_t t = 0;
const u32_t *pl;
u32_t sum = 0, tmp;
/* starts at odd byte address? */
int odd = ((mem_ptr_t)pb & 1);
if (odd && len > 0) {
((u8_t *)&t)[1] = *pb++;
len--;
}
ps = (const u16_t *)(const void *)pb;
if (((mem_ptr_t)ps & 3) && len > 1) {
sum += *ps++;
len -= 2;
}
pl = (const u32_t *)(const void *)ps;
while (len > 7) {
tmp = sum + *pl++; /* ping */
if (tmp < sum) {
tmp++; /* add back carry */
}
sum = tmp + *pl++; /* pong */
if (sum < tmp) {
sum++; /* add back carry */
}
len -= 8;
}
/* make room in upper bits */
sum = FOLD_U32T(sum);
ps = (const u16_t *)pl;
/* 16-bit aligned word remaining? */
while (len > 1) {
sum += *ps++;
len -= 2;
}
/* dangling tail byte remaining? */
if (len > 0) { /* include odd byte */
((u8_t *)&t)[0] = *(const u8_t *)ps;
}
sum += t; /* add end bytes */
/* Fold 32-bit sum to 16 bits
calling this twice is probably faster than if statements... */
sum = FOLD_U32T(sum);
sum = FOLD_U32T(sum);
if (odd) {
sum = SWAP_BYTES_IN_WORD(sum);
}
return (u16_t)sum;
}
#endif
/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */
static u16_t
inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc)
{
struct pbuf *q;
int swapped = 0;
/* iterate through all pbuf in chain */
for (q = p; q != NULL; q = q->next) {
LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n",
(void *)q, (void *)q->next));
acc += LWIP_CHKSUM(q->payload, q->len);
/*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/
/* just executing this next line is probably faster that the if statement needed
to check whether we really need to execute it, and does no harm */
acc = FOLD_U32T(acc);
if (q->len % 2 != 0) {
swapped = !swapped;
acc = SWAP_BYTES_IN_WORD(acc);
}
/*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/
}
if (swapped) {
acc = SWAP_BYTES_IN_WORD(acc);
}
acc += (u32_t)lwip_htons((u16_t)proto);
acc += (u32_t)lwip_htons(proto_len);
/* Fold 32-bit sum to 16 bits
calling this twice is probably faster than if statements... */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc));
return (u16_t)~(acc & 0xffffUL);
}
#if LWIP_IPV4
/* inet_chksum_pseudo:
*
* Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
* IP addresses are expected to be in network byte order.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ip address (used for checksum of pseudo header)
* @param dst destination ip address (used for checksum of pseudo header)
* @param proto ip protocol (used for checksum of pseudo header)
* @param proto_len length of the ip data part (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len,
const ip4_addr_t *src, const ip4_addr_t *dest)
{
u32_t acc;
u32_t addr;
addr = ip4_addr_get_u32(src);
acc = (addr & 0xffffUL);
acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
addr = ip4_addr_get_u32(dest);
acc = (u32_t)(acc + (addr & 0xffffUL));
acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
/* fold down to 16 bits */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
return inet_cksum_pseudo_base(p, proto, proto_len, acc);
}
#endif /* LWIP_IPV4 */
#if LWIP_IPV6
/**
* Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain.
* IPv6 addresses are expected to be in network byte order.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param proto ipv6 protocol/next header (used for checksum of pseudo header)
* @param proto_len length of the ipv6 payload (used for checksum of pseudo header)
* @param src source ipv6 address (used for checksum of pseudo header)
* @param dest destination ipv6 address (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len,
const ip6_addr_t *src, const ip6_addr_t *dest)
{
u32_t acc = 0;
u32_t addr;
u8_t addr_part;
for (addr_part = 0; addr_part < 4; addr_part++) {
addr = src->addr[addr_part];
acc = (u32_t)(acc + (addr & 0xffffUL));
acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
addr = dest->addr[addr_part];
acc = (u32_t)(acc + (addr & 0xffffUL));
acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
}
/* fold down to 16 bits */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
return inet_cksum_pseudo_base(p, proto, proto_len, acc);
}
#endif /* LWIP_IPV6 */
/* ip_chksum_pseudo:
*
* Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
* IP addresses are expected to be in network byte order.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ip address (used for checksum of pseudo header)
* @param dst destination ip address (used for checksum of pseudo header)
* @param proto ip protocol (used for checksum of pseudo header)
* @param proto_len length of the ip data part (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len,
const ip_addr_t *src, const ip_addr_t *dest)
{
#if LWIP_IPV6
if (IP_IS_V6(dest)) {
return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest));
}
#endif /* LWIP_IPV6 */
#if LWIP_IPV4 && LWIP_IPV6
else
#endif /* LWIP_IPV4 && LWIP_IPV6 */
#if LWIP_IPV4
{
return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest));
}
#endif /* LWIP_IPV4 */
}
/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */
static u16_t
inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len,
u16_t chksum_len, u32_t acc)
{
struct pbuf *q;
int swapped = 0;
u16_t chklen;
/* iterate through all pbuf in chain */
for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) {
LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n",
(void *)q, (void *)q->next));
chklen = q->len;
if (chklen > chksum_len) {
chklen = chksum_len;
}
acc += LWIP_CHKSUM(q->payload, chklen);
chksum_len = (u16_t)(chksum_len - chklen);
LWIP_ASSERT("delete me", chksum_len < 0x7fff);
/*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/
/* fold the upper bit down */
acc = FOLD_U32T(acc);
if (q->len % 2 != 0) {
swapped = !swapped;
acc = SWAP_BYTES_IN_WORD(acc);
}
/*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/
}
if (swapped) {
acc = SWAP_BYTES_IN_WORD(acc);
}
acc += (u32_t)lwip_htons((u16_t)proto);
acc += (u32_t)lwip_htons(proto_len);
/* Fold 32-bit sum to 16 bits
calling this twice is probably faster than if statements... */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc));
return (u16_t)~(acc & 0xffffUL);
}
#if LWIP_IPV4
/* inet_chksum_pseudo_partial:
*
* Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
* IP addresses are expected to be in network byte order.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ip address (used for checksum of pseudo header)
* @param dst destination ip address (used for checksum of pseudo header)
* @param proto ip protocol (used for checksum of pseudo header)
* @param proto_len length of the ip data part (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len,
u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest)
{
u32_t acc;
u32_t addr;
addr = ip4_addr_get_u32(src);
acc = (addr & 0xffffUL);
acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
addr = ip4_addr_get_u32(dest);
acc = (u32_t)(acc + (addr & 0xffffUL));
acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
/* fold down to 16 bits */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc);
}
#endif /* LWIP_IPV4 */
#if LWIP_IPV6
/**
* Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain.
* IPv6 addresses are expected to be in network byte order. Will only compute for a
* portion of the payload.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param proto ipv6 protocol/next header (used for checksum of pseudo header)
* @param proto_len length of the ipv6 payload (used for checksum of pseudo header)
* @param chksum_len number of payload bytes used to compute chksum
* @param src source ipv6 address (used for checksum of pseudo header)
* @param dest destination ipv6 address (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len,
u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest)
{
u32_t acc = 0;
u32_t addr;
u8_t addr_part;
for (addr_part = 0; addr_part < 4; addr_part++) {
addr = src->addr[addr_part];
acc = (u32_t)(acc + (addr & 0xffffUL));
acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
addr = dest->addr[addr_part];
acc = (u32_t)(acc + (addr & 0xffffUL));
acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL));
}
/* fold down to 16 bits */
acc = FOLD_U32T(acc);
acc = FOLD_U32T(acc);
return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc);
}
#endif /* LWIP_IPV6 */
/* ip_chksum_pseudo_partial:
*
* Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain.
*
* @param p chain of pbufs over that a checksum should be calculated (ip data part)
* @param src source ip address (used for checksum of pseudo header)
* @param dst destination ip address (used for checksum of pseudo header)
* @param proto ip protocol (used for checksum of pseudo header)
* @param proto_len length of the ip data part (used for checksum of pseudo header)
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len,
u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest)
{
#if LWIP_IPV6
if (IP_IS_V6(dest)) {
return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest));
}
#endif /* LWIP_IPV6 */
#if LWIP_IPV4 && LWIP_IPV6
else
#endif /* LWIP_IPV4 && LWIP_IPV6 */
#if LWIP_IPV4
{
return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest));
}
#endif /* LWIP_IPV4 */
}
/* inet_chksum:
*
* Calculates the Internet checksum over a portion of memory. Used primarily for IP
* and ICMP.
*
* @param dataptr start of the buffer to calculate the checksum (no alignment needed)
* @param len length of the buffer to calculate the checksum
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
inet_chksum(const void *dataptr, u16_t len)
{
return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len);
}
/**
* Calculate a checksum over a chain of pbufs (without pseudo-header, much like
* inet_chksum only pbufs are used).
*
* @param p pbuf chain over that the checksum should be calculated
* @return checksum (as u16_t) to be saved directly in the protocol header
*/
u16_t
inet_chksum_pbuf(struct pbuf *p)
{
u32_t acc;
struct pbuf *q;
int swapped = 0;
acc = 0;
for (q = p; q != NULL; q = q->next) {
acc += LWIP_CHKSUM(q->payload, q->len);
acc = FOLD_U32T(acc);
if (q->len % 2 != 0) {
swapped = !swapped;
acc = SWAP_BYTES_IN_WORD(acc);
}
}
if (swapped) {
acc = SWAP_BYTES_IN_WORD(acc);
}
return (u16_t)~(acc & 0xffffUL);
}
/* These are some implementations for LWIP_CHKSUM_COPY, which copies data
* like MEMCPY but generates a checksum at the same time. Since this is a
* performance-sensitive function, you might want to create your own version
* in assembly targeted at your hardware by defining it in lwipopts.h:
* #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len)
*/
#if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */
/** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM.
* For architectures with big caches, data might still be in cache when
* generating the checksum after copying.
*/
u16_t
lwip_chksum_copy(void *dst, const void *src, u16_t len)
{
MEMCPY(dst, src, len);
return LWIP_CHKSUM(dst, len);
}
#endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */

View File

@ -1,380 +0,0 @@
/**
* @file
* Modules initialization
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*/
#include "lwip/opt.h"
#include "lwip/init.h"
#include "lwip/stats.h"
#include "lwip/sys.h"
#include "lwip/mem.h"
#include "lwip/memp.h"
#include "lwip/pbuf.h"
#include "lwip/netif.h"
#include "lwip/sockets.h"
#include "lwip/ip.h"
#include "lwip/raw.h"
#include "lwip/udp.h"
#include "lwip/priv/tcp_priv.h"
#include "lwip/igmp.h"
#include "lwip/dns.h"
#include "lwip/timeouts.h"
#include "lwip/etharp.h"
#include "lwip/ip6.h"
#include "lwip/nd6.h"
#include "lwip/mld6.h"
#include "lwip/api.h"
#include "netif/ppp/ppp_opts.h"
#include "netif/ppp/ppp_impl.h"
#ifndef LWIP_SKIP_PACKING_CHECK
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/bpstruct.h"
#endif
PACK_STRUCT_BEGIN
struct packed_struct_test {
PACK_STRUCT_FLD_8(u8_t dummy1);
PACK_STRUCT_FIELD(u32_t dummy2);
} PACK_STRUCT_STRUCT;
PACK_STRUCT_END
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/epstruct.h"
#endif
#define PACKED_STRUCT_TEST_EXPECTED_SIZE 5
#endif
/* Compile-time sanity checks for configuration errors.
* These can be done independently of LWIP_DEBUG, without penalty.
*/
#ifndef BYTE_ORDER
#error "BYTE_ORDER is not defined, you have to define it in your cc.h"
#endif
#if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV)
#error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h"
#endif
#if (!LWIP_UDP && LWIP_UDPLITE)
#error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h"
#endif
#if (!LWIP_UDP && LWIP_DHCP)
#error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h"
#endif
#if (!LWIP_UDP && !LWIP_RAW && LWIP_MULTICAST_TX_OPTIONS)
#error "If you want to use LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 and/or LWIP_RAW=1 in your lwipopts.h"
#endif
#if (!LWIP_UDP && LWIP_DNS)
#error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h"
#endif
#if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */
#if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0))
#error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h"
#endif
#if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0))
#error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h"
#endif
#if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0))
#error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h"
#endif
#if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0))
#error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h"
#endif
#if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1))
#error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h"
#endif
#if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS)
#error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h"
#endif
#if (LWIP_IGMP && !LWIP_IPV4)
#error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h"
#endif
#if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0))
#error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h"
#endif
/* There must be sufficient timeouts, taking into account requirements of the subsystems. */
#if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < LWIP_NUM_SYS_TIMEOUT_INTERNAL)
#error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts"
#endif
#if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS))
#error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!"
#endif
#endif /* !MEMP_MEM_MALLOC */
#if LWIP_WND_SCALE
#if (LWIP_TCP && (TCP_WND > 0xffffffff))
#error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h"
#endif
#if (LWIP_TCP && (TCP_RCV_SCALE > 14))
#error "The maximum valid window scale value is 14!"
#endif
#if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE)))
#error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!"
#endif
#if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0))
#error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!"
#endif
#else /* LWIP_WND_SCALE */
#if (LWIP_TCP && (TCP_WND > 0xffff))
#error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)"
#endif
#endif /* LWIP_WND_SCALE */
#if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff))
#error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h"
#endif
#if (LWIP_TCP && (TCP_SND_QUEUELEN < 2))
#error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work"
#endif
#if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12)))
#error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h"
#endif
#if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff)))
#error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t"
#endif
#if (LWIP_TCP && LWIP_TCP_SACK_OUT && !TCP_QUEUE_OOSEQ)
#error "To use LWIP_TCP_SACK_OUT, TCP_QUEUE_OOSEQ needs to be enabled"
#endif
#if (LWIP_TCP && LWIP_TCP_SACK_OUT && (LWIP_TCP_MAX_SACK_NUM < 1))
#error "LWIP_TCP_MAX_SACK_NUM must be greater than 0"
#endif
#if (LWIP_NETIF_API && (NO_SYS==1))
#error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h"
#endif
#if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1))
#error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h"
#endif
#if (LWIP_PPP_API && (NO_SYS==1))
#error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h"
#endif
#if (LWIP_PPP_API && (PPP_SUPPORT==0))
#error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h"
#endif
#if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP)
#error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h"
#endif
#if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK)
#error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h"
#endif
#if (!LWIP_ARP && LWIP_AUTOIP)
#error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h"
#endif
#if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API)))
#error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h"
#endif
#if (LWIP_ALTCP && LWIP_EVENT_API)
#error "The application layered tcp API does not work with LWIP_EVENT_API"
#endif
#if (MEM_LIBC_MALLOC && MEM_USE_POOLS)
#error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h"
#endif
#if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS)
#error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h"
#endif
#if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT)
#error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf"
#endif
#if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT)))
#error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST"
#endif
#if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT
#error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on"
#endif
#if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT
#error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on"
#endif
#if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4
#error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on"
#endif
#if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6
#error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on"
#endif
#if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT)
#error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT"
#endif
#if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING
#error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too"
#endif
#if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE
#error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets"
#endif
#if LWIP_NETCONN && LWIP_TCP
#if NETCONN_COPY != TCP_WRITE_FLAG_COPY
#error "NETCONN_COPY != TCP_WRITE_FLAG_COPY"
#endif
#if NETCONN_MORE != TCP_WRITE_FLAG_MORE
#error "NETCONN_MORE != TCP_WRITE_FLAG_MORE"
#endif
#endif /* LWIP_NETCONN && LWIP_TCP */
#if LWIP_SOCKET
#endif /* LWIP_SOCKET */
/* Compile-time checks for deprecated options.
*/
#ifdef MEMP_NUM_TCPIP_MSG
#error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h."
#endif
#ifdef TCP_REXMIT_DEBUG
#error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h."
#endif
#ifdef RAW_STATS
#error "RAW_STATS option is deprecated. Remove it from your lwipopts.h."
#endif
#ifdef ETHARP_QUEUE_FIRST
#error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h."
#endif
#ifdef ETHARP_ALWAYS_INSERT
#error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h."
#endif
#if !NO_SYS && LWIP_TCPIP_CORE_LOCKING && LWIP_COMPAT_MUTEX && !defined(LWIP_COMPAT_MUTEX_ALLOWED)
#error "LWIP_COMPAT_MUTEX cannot prevent priority inversion. It is recommended to implement priority-aware mutexes. (Define LWIP_COMPAT_MUTEX_ALLOWED to disable this error.)"
#endif
#ifndef LWIP_DISABLE_TCP_SANITY_CHECKS
#define LWIP_DISABLE_TCP_SANITY_CHECKS 0
#endif
#ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS
#define LWIP_DISABLE_MEMP_SANITY_CHECKS 0
#endif
/* MEMP sanity checks */
#if MEMP_MEM_MALLOC
#if !LWIP_DISABLE_MEMP_SANITY_CHECKS
#if LWIP_NETCONN || LWIP_SOCKET
#if !MEMP_NUM_NETCONN && LWIP_SOCKET
#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!"
#endif
#else /* MEMP_MEM_MALLOC */
#if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB)
#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error."
#endif
#endif /* LWIP_NETCONN || LWIP_SOCKET */
#endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */
#if MEM_USE_POOLS
#error "MEMP_MEM_MALLOC and MEM_USE_POOLS cannot be enabled at the same time"
#endif
#ifdef LWIP_HOOK_MEMP_AVAILABLE
#error "LWIP_HOOK_MEMP_AVAILABLE doesn't make sense with MEMP_MEM_MALLOC"
#endif
#endif /* MEMP_MEM_MALLOC */
/* TCP sanity checks */
#if !LWIP_DISABLE_TCP_SANITY_CHECKS
#if LWIP_TCP
#if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN)
#error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_SND_BUF < (2 * TCP_MSS)
#error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS))
#error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_SNDLOWAT >= TCP_SND_BUF
#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS))
#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!"
#endif
#if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN
#error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN))
#error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN))))
#error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#if TCP_WND < TCP_MSS
#error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error."
#endif
#endif /* LWIP_TCP */
#endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */
/**
* @ingroup lwip_nosys
* Initialize all modules.
* Use this in NO_SYS mode. Use tcpip_init() otherwise.
*/
void
lwip_init(void)
{
#ifndef LWIP_SKIP_CONST_CHECK
int a = 0;
LWIP_UNUSED_ARG(a);
LWIP_ASSERT("LWIP_CONST_CAST not implemented correctly. Check your lwIP port.", LWIP_CONST_CAST(void *, &a) == &a);
#endif
#ifndef LWIP_SKIP_PACKING_CHECK
LWIP_ASSERT("Struct packing not implemented correctly. Check your lwIP port.", sizeof(struct packed_struct_test) == PACKED_STRUCT_TEST_EXPECTED_SIZE);
#endif
/* Modules initialization */
stats_init();
#if !NO_SYS
sys_init();
#endif /* !NO_SYS */
mem_init();
memp_init();
pbuf_init();
netif_init();
#if LWIP_IPV4
ip_init();
#if LWIP_ARP
etharp_init();
#endif /* LWIP_ARP */
#endif /* LWIP_IPV4 */
#if LWIP_RAW
raw_init();
#endif /* LWIP_RAW */
#if LWIP_UDP
udp_init();
#endif /* LWIP_UDP */
#if LWIP_TCP
tcp_init();
#endif /* LWIP_TCP */
#if LWIP_IGMP
igmp_init();
#endif /* LWIP_IGMP */
#if LWIP_DNS
dns_init();
#endif /* LWIP_DNS */
#if PPP_SUPPORT
ppp_init();
#endif
#if LWIP_TIMERS
sys_timeouts_init();
#endif /* LWIP_TIMERS */
}

View File

@ -1,167 +0,0 @@
/**
* @file
* Common IPv4 and IPv6 code
*
* @defgroup ip IP
* @ingroup callbackstyle_api
*
* @defgroup ip4 IPv4
* @ingroup ip
*
* @defgroup ip6 IPv6
* @ingroup ip
*
* @defgroup ipaddr IP address handling
* @ingroup infrastructure
*
* @defgroup ip4addr IPv4 only
* @ingroup ipaddr
*
* @defgroup ip6addr IPv6 only
* @ingroup ipaddr
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_IPV4 || LWIP_IPV6
#include "lwip/ip_addr.h"
#include "lwip/ip.h"
/** Global data for both IPv4 and IPv6 */
struct ip_globals ip_data;
#if LWIP_IPV4 && LWIP_IPV6
const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT;
/**
* @ingroup ipaddr
* Convert numeric IP address (both versions) into ASCII representation.
* returns ptr to static buffer; not reentrant!
*
* @param addr ip address in network order to convert
* @return pointer to a global static (!) buffer that holds the ASCII
* representation of addr
*/
char *ipaddr_ntoa(const ip_addr_t *addr)
{
if (addr == NULL) {
return NULL;
}
if (IP_IS_V6(addr)) {
return ip6addr_ntoa(ip_2_ip6(addr));
} else {
return ip4addr_ntoa(ip_2_ip4(addr));
}
}
/**
* @ingroup ipaddr
* Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used.
*
* @param addr ip address in network order to convert
* @param buf target buffer where the string is stored
* @param buflen length of buf
* @return either pointer to buf which now holds the ASCII
* representation of addr or NULL if buf was too small
*/
char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen)
{
if (addr == NULL) {
return NULL;
}
if (IP_IS_V6(addr)) {
return ip6addr_ntoa_r(ip_2_ip6(addr), buf, buflen);
} else {
return ip4addr_ntoa_r(ip_2_ip4(addr), buf, buflen);
}
}
/**
* @ingroup ipaddr
* Convert IP address string (both versions) to numeric.
* The version is auto-detected from the string.
*
* @param cp IP address string to convert
* @param addr conversion result is stored here
* @return 1 on success, 0 on error
*/
int
ipaddr_aton(const char *cp, ip_addr_t *addr)
{
if (cp != NULL) {
const char *c;
for (c = cp; *c != 0; c++) {
if (*c == ':') {
/* contains a colon: IPv6 address */
if (addr) {
IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6);
}
return ip6addr_aton(cp, ip_2_ip6(addr));
} else if (*c == '.') {
/* contains a dot: IPv4 address */
break;
}
}
/* call ip4addr_aton as fallback or if IPv4 was found */
if (addr) {
IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4);
}
return ip4addr_aton(cp, ip_2_ip4(addr));
}
return 0;
}
/**
* @ingroup lwip_nosys
* If both IP versions are enabled, this function can dispatch packets to the correct one.
* Don't call directly, pass to netif_add() and call netif->input().
*/
err_t
ip_input(struct pbuf *p, struct netif *inp)
{
if (p != NULL) {
if (IP_HDR_GET_VERSION(p->payload) == 6) {
return ip6_input(p, inp);
}
return ip4_input(p, inp);
}
return ERR_VAL;
}
#endif /* LWIP_IPV4 && LWIP_IPV6 */
#endif /* LWIP_IPV4 || LWIP_IPV6 */

View File

@ -1,527 +0,0 @@
/**
* @file
* AutoIP Automatic LinkLocal IP Configuration
*
* This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform
* with RFC 3927.
*
* @defgroup autoip AUTOIP
* @ingroup ip4
* AUTOIP related functions
* USAGE:
*
* define @ref LWIP_AUTOIP 1 in your lwipopts.h
* Options:
* AUTOIP_TMR_INTERVAL msecs,
* I recommend a value of 100. The value must divide 1000 with a remainder almost 0.
* Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 ....
*
* Without DHCP:
* - Call autoip_start() after netif_add().
*
* With DHCP:
* - define @ref LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h.
* - Configure your DHCP Client.
*
* @see netifapi_autoip
*/
/*
*
* Copyright (c) 2007 Dominik Spies <kontakt@dspies.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* Author: Dominik Spies <kontakt@dspies.de>
*/
#include "lwip/opt.h"
#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */
#include "lwip/mem.h"
/* #include "lwip/udp.h" */
#include "lwip/ip_addr.h"
#include "lwip/netif.h"
#include "lwip/autoip.h"
#include "lwip/etharp.h"
#include "lwip/prot/autoip.h"
#include <string.h>
/** Pseudo random macro based on netif informations.
* You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */
#ifndef LWIP_AUTOIP_RAND
#define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \
((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \
((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \
((u32_t)((netif->hwaddr[4]) & 0xff))) + \
(netif_autoip_data(netif)? netif_autoip_data(netif)->tried_llipaddr : 0))
#endif /* LWIP_AUTOIP_RAND */
/**
* Macro that generates the initial IP address to be tried by AUTOIP.
* If you want to override this, define it to something else in lwipopts.h.
*/
#ifndef LWIP_AUTOIP_CREATE_SEED_ADDR
#define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \
lwip_htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \
((u32_t)((u8_t)(netif->hwaddr[5]))) << 8)))
#endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */
/* static functions */
static err_t autoip_arp_announce(struct netif *netif);
static void autoip_start_probing(struct netif *netif);
/**
* @ingroup autoip
* Set a statically allocated struct autoip to work with.
* Using this prevents autoip_start to allocate it using mem_malloc.
*
* @param netif the netif for which to set the struct autoip
* @param autoip (uninitialised) autoip struct allocated by the application
*/
void
autoip_set_struct(struct netif *netif, struct autoip *autoip)
{
LWIP_ASSERT_CORE_LOCKED();
LWIP_ASSERT("netif != NULL", netif != NULL);
LWIP_ASSERT("autoip != NULL", autoip != NULL);
LWIP_ASSERT("netif already has a struct autoip set",
netif_autoip_data(netif) == NULL);
/* clear data structure */
memset(autoip, 0, sizeof(struct autoip));
/* autoip->state = AUTOIP_STATE_OFF; */
netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip);
}
/** Restart AutoIP client and check the next address (conflict detected)
*
* @param netif The netif under AutoIP control
*/
static void
autoip_restart(struct netif *netif)
{
struct autoip *autoip = netif_autoip_data(netif);
autoip->tried_llipaddr++;
autoip_start(netif);
}
/**
* Handle a IP address conflict after an ARP conflict detection
*/
static void
autoip_handle_arp_conflict(struct netif *netif)
{
struct autoip *autoip = netif_autoip_data(netif);
/* RFC3927, 2.5 "Conflict Detection and Defense" allows two options where
a) means retreat on the first conflict and
b) allows to keep an already configured address when having only one
conflict in 10 seconds
We use option b) since it helps to improve the chance that one of the two
conflicting hosts may be able to retain its address. */
if (autoip->lastconflict > 0) {
/* retreat, there was a conflicting ARP in the last DEFEND_INTERVAL seconds */
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n"));
/* Active TCP sessions are aborted when removing the ip addresss */
autoip_restart(netif);
} else {
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n"));
autoip_arp_announce(netif);
autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND;
}
}
/**
* Create an IP-Address out of range 169.254.1.0 to 169.254.254.255
*
* @param netif network interface on which create the IP-Address
* @param ipaddr ip address to initialize
*/
static void
autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr)
{
struct autoip *autoip = netif_autoip_data(netif);
/* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255
* compliant to RFC 3927 Section 2.1
* We have 254 * 256 possibilities */
u32_t addr = lwip_ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif));
addr += autoip->tried_llipaddr;
addr = AUTOIP_NET | (addr & 0xffff);
/* Now, 169.254.0.0 <= addr <= 169.254.255.255 */
if (addr < AUTOIP_RANGE_START) {
addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1;
}
if (addr > AUTOIP_RANGE_END) {
addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1;
}
LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) &&
(addr <= AUTOIP_RANGE_END));
ip4_addr_set_u32(ipaddr, lwip_htonl(addr));
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
(u16_t)(autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr),
ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr)));
}
/**
* Sends an ARP probe from a network interface
*
* @param netif network interface used to send the probe
*/
static err_t
autoip_arp_probe(struct netif *netif)
{
struct autoip *autoip = netif_autoip_data(netif);
/* this works because netif->ip_addr is ANY */
return etharp_request(netif, &autoip->llipaddr);
}
/**
* Sends an ARP announce from a network interface
*
* @param netif network interface used to send the announce
*/
static err_t
autoip_arp_announce(struct netif *netif)
{
return etharp_gratuitous(netif);
}
/**
* Configure interface for use with current LL IP-Address
*
* @param netif network interface to configure with current LL IP-Address
*/
static err_t
autoip_bind(struct netif *netif)
{
struct autoip *autoip = netif_autoip_data(netif);
ip4_addr_t sn_mask, gw_addr;
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
(void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num,
ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr),
ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr)));
IP4_ADDR(&sn_mask, 255, 255, 0, 0);
IP4_ADDR(&gw_addr, 0, 0, 0, 0);
netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr);
/* interface is used by routing now that an address is set */
return ERR_OK;
}
/**
* @ingroup autoip
* Start AutoIP client
*
* @param netif network interface on which start the AutoIP client
*/
err_t
autoip_start(struct netif *netif)
{
struct autoip *autoip = netif_autoip_data(netif);
err_t result = ERR_OK;
LWIP_ASSERT_CORE_LOCKED();
LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;);
/* Set IP-Address, Netmask and Gateway to 0 to make sure that
* ARP Packets are formed correctly
*/
netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4);
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0],
netif->name[1], (u16_t)netif->num));
if (autoip == NULL) {
/* no AutoIP client attached yet? */
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_start(): starting new AUTOIP client\n"));
autoip = (struct autoip *)mem_calloc(1, sizeof(struct autoip));
if (autoip == NULL) {
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_start(): could not allocate autoip\n"));
return ERR_MEM;
}
/* store this AutoIP client in the netif */
netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip);
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip"));
} else {
autoip->state = AUTOIP_STATE_OFF;
autoip->ttw = 0;
autoip->sent_num = 0;
ip4_addr_set_zero(&autoip->llipaddr);
autoip->lastconflict = 0;
}
autoip_create_addr(netif, &(autoip->llipaddr));
autoip_start_probing(netif);
return result;
}
static void
autoip_start_probing(struct netif *netif)
{
struct autoip *autoip = netif_autoip_data(netif);
autoip->state = AUTOIP_STATE_PROBING;
autoip->sent_num = 0;
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr),
ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr)));
/* time to wait to first probe, this is randomly
* chosen out of 0 to PROBE_WAIT seconds.
* compliant to RFC 3927 Section 2.2.1
*/
autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND));
/*
* if we tried more then MAX_CONFLICTS we must limit our rate for
* acquiring and probing address
* compliant to RFC 3927 Section 2.2.1
*/
if (autoip->tried_llipaddr > MAX_CONFLICTS) {
autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND;
}
}
/**
* Handle a possible change in the network configuration.
*
* If there is an AutoIP address configured, take the interface down
* and begin probing with the same address.
*/
void
autoip_network_changed(struct netif *netif)
{
struct autoip *autoip = netif_autoip_data(netif);
if (autoip && (autoip->state != AUTOIP_STATE_OFF)) {
autoip_start_probing(netif);
}
}
/**
* @ingroup autoip
* Stop AutoIP client
*
* @param netif network interface on which stop the AutoIP client
*/
err_t
autoip_stop(struct netif *netif)
{
struct autoip *autoip = netif_autoip_data(netif);
LWIP_ASSERT_CORE_LOCKED();
if (autoip != NULL) {
autoip->state = AUTOIP_STATE_OFF;
if (ip4_addr_islinklocal(netif_ip4_addr(netif))) {
netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4);
}
}
return ERR_OK;
}
/**
* Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds
*/
void
autoip_tmr(void)
{
struct netif *netif;
/* loop through netif's */
NETIF_FOREACH(netif) {
struct autoip *autoip = netif_autoip_data(netif);
/* only act on AutoIP configured interfaces */
if (autoip != NULL) {
if (autoip->lastconflict > 0) {
autoip->lastconflict--;
}
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE,
("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n",
(u16_t)(autoip->state), autoip->ttw));
if (autoip->ttw > 0) {
autoip->ttw--;
}
switch (autoip->state) {
case AUTOIP_STATE_PROBING:
if (autoip->ttw == 0) {
if (autoip->sent_num >= PROBE_NUM) {
/* Switch to ANNOUNCING: now we can bind to an IP address and use it */
autoip->state = AUTOIP_STATE_ANNOUNCING;
autoip_bind(netif);
/* autoip_bind() calls netif_set_addr(): this triggers a gratuitous ARP
which counts as an announcement */
autoip->sent_num = 1;
autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND;
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr),
ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr)));
} else {
autoip_arp_probe(netif);
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() PROBING Sent Probe\n"));
autoip->sent_num++;
if (autoip->sent_num == PROBE_NUM) {
/* calculate time to wait to for announce */
autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND;
} else {
/* calculate time to wait to next probe */
autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) %
((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) +
PROBE_MIN * AUTOIP_TICKS_PER_SECOND);
}
}
}
break;
case AUTOIP_STATE_ANNOUNCING:
if (autoip->ttw == 0) {
autoip_arp_announce(netif);
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() ANNOUNCING Sent Announce\n"));
autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND;
autoip->sent_num++;
if (autoip->sent_num >= ANNOUNCE_NUM) {
autoip->state = AUTOIP_STATE_BOUND;
autoip->sent_num = 0;
autoip->ttw = 0;
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE,
("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr),
ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr)));
}
}
break;
default:
/* nothing to do in other states */
break;
}
}
}
}
/**
* Handles every incoming ARP Packet, called by etharp_input().
*
* @param netif network interface to use for autoip processing
* @param hdr Incoming ARP packet
*/
void
autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr)
{
struct autoip *autoip = netif_autoip_data(netif);
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n"));
if ((autoip != NULL) && (autoip->state != AUTOIP_STATE_OFF)) {
/* when ip.src == llipaddr && hw.src != netif->hwaddr
*
* when probing ip.dst == llipaddr && hw.src != netif->hwaddr
* we have a conflict and must solve it
*/
ip4_addr_t sipaddr, dipaddr;
struct eth_addr netifaddr;
SMEMCPY(netifaddr.addr, netif->hwaddr, ETH_HWADDR_LEN);
/* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without
* structure packing (not using structure copy which breaks strict-aliasing rules).
*/
IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr);
IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr);
if (autoip->state == AUTOIP_STATE_PROBING) {
/* RFC 3927 Section 2.2.1:
* from beginning to after ANNOUNCE_WAIT
* seconds we have a conflict if
* ip.src == llipaddr OR
* ip.dst == llipaddr && hw.src != own hwaddr
*/
if ((ip4_addr_cmp(&sipaddr, &autoip->llipaddr)) ||
(ip4_addr_isany_val(sipaddr) &&
ip4_addr_cmp(&dipaddr, &autoip->llipaddr) &&
!eth_addr_cmp(&netifaddr, &hdr->shwaddr))) {
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING,
("autoip_arp_reply(): Probe Conflict detected\n"));
autoip_restart(netif);
}
} else {
/* RFC 3927 Section 2.5:
* in any state we have a conflict if
* ip.src == llipaddr && hw.src != own hwaddr
*/
if (ip4_addr_cmp(&sipaddr, &autoip->llipaddr) &&
!eth_addr_cmp(&netifaddr, &hdr->shwaddr)) {
LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING,
("autoip_arp_reply(): Conflicting ARP-Packet detected\n"));
autoip_handle_arp_conflict(netif);
}
}
}
}
/** check if AutoIP supplied netif->ip_addr
*
* @param netif the netif to check
* @return 1 if AutoIP supplied netif->ip_addr (state BOUND or ANNOUNCING),
* 0 otherwise
*/
u8_t
autoip_supplied_address(const struct netif *netif)
{
if ((netif != NULL) && (netif_autoip_data(netif) != NULL)) {
struct autoip *autoip = netif_autoip_data(netif);
return (autoip->state == AUTOIP_STATE_BOUND) || (autoip->state == AUTOIP_STATE_ANNOUNCING);
}
return 0;
}
u8_t
autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr)
{
struct autoip *autoip = netif_autoip_data(netif);
return (autoip != NULL) && ip4_addr_cmp(addr, &(autoip->llipaddr));
}
#endif /* LWIP_IPV4 && LWIP_AUTOIP */

View File

@ -1,404 +0,0 @@
/**
* @file
* ICMP - Internet Control Message Protocol
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
/* Some ICMP messages should be passed to the transport protocols. This
is not implemented. */
#include "lwip/opt.h"
#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */
#include "lwip/icmp.h"
#include "lwip/inet_chksum.h"
#include "lwip/ip.h"
#include "lwip/def.h"
#include "lwip/stats.h"
#include <string.h>
#ifdef LWIP_HOOK_FILENAME
#include LWIP_HOOK_FILENAME
#endif
/** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be
* used to modify and send a response packet (and to 1 if this is not the case,
* e.g. when link header is stripped off when receiving) */
#ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN
#define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1
#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */
/* The amount of data from the original packet to return in a dest-unreachable */
#define ICMP_DEST_UNREACH_DATASIZE 8
static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code);
/**
* Processes ICMP input packets, called from ip_input().
*
* Currently only processes icmp echo requests and sends
* out the echo response.
*
* @param p the icmp echo request packet, p->payload pointing to the icmp header
* @param inp the netif on which this packet was received
*/
void
icmp_input(struct pbuf *p, struct netif *inp)
{
u8_t type;
#ifdef LWIP_DEBUG
u8_t code;
#endif /* LWIP_DEBUG */
struct icmp_echo_hdr *iecho;
const struct ip_hdr *iphdr_in;
u16_t hlen;
const ip4_addr_t *src;
ICMP_STATS_INC(icmp.recv);
MIB2_STATS_INC(mib2.icmpinmsgs);
iphdr_in = ip4_current_header();
hlen = IPH_HL_BYTES(iphdr_in);
if (hlen < IP_HLEN) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen));
goto lenerr;
}
if (p->len < sizeof(u16_t) * 2) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len));
goto lenerr;
}
type = *((u8_t *)p->payload);
#ifdef LWIP_DEBUG
code = *(((u8_t *)p->payload) + 1);
/* if debug is enabled but debug statement below is somehow disabled: */
LWIP_UNUSED_ARG(code);
#endif /* LWIP_DEBUG */
switch (type) {
case ICMP_ER:
/* This is OK, echo reply might have been parsed by a raw PCB
(as obviously, an echo request has been sent, too). */
MIB2_STATS_INC(mib2.icmpinechoreps);
break;
case ICMP_ECHO:
MIB2_STATS_INC(mib2.icmpinechos);
src = ip4_current_dest_addr();
/* multicast destination address? */
if (ip4_addr_ismulticast(ip4_current_dest_addr())) {
#if LWIP_MULTICAST_PING
/* For multicast, use address of receiving interface as source address */
src = netif_ip4_addr(inp);
#else /* LWIP_MULTICAST_PING */
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n"));
goto icmperr;
#endif /* LWIP_MULTICAST_PING */
}
/* broadcast destination address? */
if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) {
#if LWIP_BROADCAST_PING
/* For broadcast, use address of receiving interface as source address */
src = netif_ip4_addr(inp);
#else /* LWIP_BROADCAST_PING */
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n"));
goto icmperr;
#endif /* LWIP_BROADCAST_PING */
}
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n"));
if (p->tot_len < sizeof(struct icmp_echo_hdr)) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n"));
goto lenerr;
}
#if CHECKSUM_CHECK_ICMP
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) {
if (inet_chksum_pbuf(p) != 0) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n"));
pbuf_free(p);
ICMP_STATS_INC(icmp.chkerr);
MIB2_STATS_INC(mib2.icmpinerrors);
return;
}
}
#endif
#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN
if (pbuf_add_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) {
/* p is not big enough to contain link headers
* allocate a new one and copy p into it
*/
struct pbuf *r;
u16_t alloc_len = (u16_t)(p->tot_len + hlen);
if (alloc_len < p->tot_len) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed (tot_len overflow)\n"));
goto icmperr;
}
/* allocate new packet buffer with space for link headers */
r = pbuf_alloc(PBUF_LINK, alloc_len, PBUF_RAM);
if (r == NULL) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n"));
goto icmperr;
}
if (r->len < hlen + sizeof(struct icmp_echo_hdr)) {
LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header"));
pbuf_free(r);
goto icmperr;
}
/* copy the ip header */
MEMCPY(r->payload, iphdr_in, hlen);
/* switch r->payload back to icmp header (cannot fail) */
if (pbuf_remove_header(r, hlen)) {
LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0);
pbuf_free(r);
goto icmperr;
}
/* copy the rest of the packet without ip header */
if (pbuf_copy(r, p) != ERR_OK) {
LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed"));
pbuf_free(r);
goto icmperr;
}
/* free the original p */
pbuf_free(p);
/* we now have an identical copy of p that has room for link headers */
p = r;
} else {
/* restore p->payload to point to icmp header (cannot fail) */
if (pbuf_remove_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) {
LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0);
goto icmperr;
}
}
#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */
/* At this point, all checks are OK. */
/* We generate an answer by switching the dest and src ip addresses,
* setting the icmp type to ECHO_RESPONSE and updating the checksum. */
iecho = (struct icmp_echo_hdr *)p->payload;
if (pbuf_add_header(p, hlen)) {
LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet"));
} else {
err_t ret;
struct ip_hdr *iphdr = (struct ip_hdr *)p->payload;
ip4_addr_copy(iphdr->src, *src);
ip4_addr_copy(iphdr->dest, *ip4_current_src_addr());
ICMPH_TYPE_SET(iecho, ICMP_ER);
#if CHECKSUM_GEN_ICMP
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) {
/* adjust the checksum */
if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) {
iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS((u16_t)(ICMP_ECHO << 8)) + 1);
} else {
iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS(ICMP_ECHO << 8));
}
}
#if LWIP_CHECKSUM_CTRL_PER_NETIF
else {
iecho->chksum = 0;
}
#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */
#else /* CHECKSUM_GEN_ICMP */
iecho->chksum = 0;
#endif /* CHECKSUM_GEN_ICMP */
/* Set the correct TTL and recalculate the header checksum. */
IPH_TTL_SET(iphdr, ICMP_TTL);
IPH_CHKSUM_SET(iphdr, 0);
#if CHECKSUM_GEN_IP
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) {
IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen));
}
#endif /* CHECKSUM_GEN_IP */
ICMP_STATS_INC(icmp.xmit);
/* increase number of messages attempted to send */
MIB2_STATS_INC(mib2.icmpoutmsgs);
/* increase number of echo replies attempted to send */
MIB2_STATS_INC(mib2.icmpoutechoreps);
/* send an ICMP packet */
ret = ip4_output_if(p, src, LWIP_IP_HDRINCL,
ICMP_TTL, 0, IP_PROTO_ICMP, inp);
if (ret != ERR_OK) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret)));
}
}
break;
default:
if (type == ICMP_DUR) {
MIB2_STATS_INC(mib2.icmpindestunreachs);
} else if (type == ICMP_TE) {
MIB2_STATS_INC(mib2.icmpintimeexcds);
} else if (type == ICMP_PP) {
MIB2_STATS_INC(mib2.icmpinparmprobs);
} else if (type == ICMP_SQ) {
MIB2_STATS_INC(mib2.icmpinsrcquenchs);
} else if (type == ICMP_RD) {
MIB2_STATS_INC(mib2.icmpinredirects);
} else if (type == ICMP_TS) {
MIB2_STATS_INC(mib2.icmpintimestamps);
} else if (type == ICMP_TSR) {
MIB2_STATS_INC(mib2.icmpintimestampreps);
} else if (type == ICMP_AM) {
MIB2_STATS_INC(mib2.icmpinaddrmasks);
} else if (type == ICMP_AMR) {
MIB2_STATS_INC(mib2.icmpinaddrmaskreps);
}
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n",
(s16_t)type, (s16_t)code));
ICMP_STATS_INC(icmp.proterr);
ICMP_STATS_INC(icmp.drop);
}
pbuf_free(p);
return;
lenerr:
pbuf_free(p);
ICMP_STATS_INC(icmp.lenerr);
MIB2_STATS_INC(mib2.icmpinerrors);
return;
#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING
icmperr:
pbuf_free(p);
ICMP_STATS_INC(icmp.err);
MIB2_STATS_INC(mib2.icmpinerrors);
return;
#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */
}
/**
* Send an icmp 'destination unreachable' packet, called from ip_input() if
* the transport layer protocol is unknown and from udp_input() if the local
* port is not bound.
*
* @param p the input packet for which the 'unreachable' should be sent,
* p->payload pointing to the IP header
* @param t type of the 'unreachable' packet
*/
void
icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t)
{
MIB2_STATS_INC(mib2.icmpoutdestunreachs);
icmp_send_response(p, ICMP_DUR, t);
}
#if IP_FORWARD || IP_REASSEMBLY
/**
* Send a 'time exceeded' packet, called from ip_forward() if TTL is 0.
*
* @param p the input packet for which the 'time exceeded' should be sent,
* p->payload pointing to the IP header
* @param t type of the 'time exceeded' packet
*/
void
icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t)
{
MIB2_STATS_INC(mib2.icmpouttimeexcds);
icmp_send_response(p, ICMP_TE, t);
}
#endif /* IP_FORWARD || IP_REASSEMBLY */
/**
* Send an icmp packet in response to an incoming packet.
*
* @param p the input packet for which the 'unreachable' should be sent,
* p->payload pointing to the IP header
* @param type Type of the ICMP header
* @param code Code of the ICMP header
*/
static void
icmp_send_response(struct pbuf *p, u8_t type, u8_t code)
{
struct pbuf *q;
struct ip_hdr *iphdr;
/* we can use the echo header here */
struct icmp_echo_hdr *icmphdr;
ip4_addr_t iphdr_src;
struct netif *netif;
/* increase number of messages attempted to send */
MIB2_STATS_INC(mib2.icmpoutmsgs);
/* ICMP header + IP header + 8 bytes of data */
q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE,
PBUF_RAM);
if (q == NULL) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n"));
MIB2_STATS_INC(mib2.icmpouterrors);
return;
}
LWIP_ASSERT("check that first pbuf can hold icmp message",
(q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE)));
iphdr = (struct ip_hdr *)p->payload;
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from "));
ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src);
LWIP_DEBUGF(ICMP_DEBUG, (" to "));
ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest);
LWIP_DEBUGF(ICMP_DEBUG, ("\n"));
icmphdr = (struct icmp_echo_hdr *)q->payload;
icmphdr->type = type;
icmphdr->code = code;
icmphdr->id = 0;
icmphdr->seqno = 0;
/* copy fields from original packet */
SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload,
IP_HLEN + ICMP_DEST_UNREACH_DATASIZE);
ip4_addr_copy(iphdr_src, iphdr->src);
#ifdef LWIP_HOOK_IP4_ROUTE_SRC
{
ip4_addr_t iphdr_dst;
ip4_addr_copy(iphdr_dst, iphdr->dest);
netif = ip4_route_src(&iphdr_dst, &iphdr_src);
}
#else
netif = ip4_route(&iphdr_src);
#endif
if (netif != NULL) {
/* calculate checksum */
icmphdr->chksum = 0;
#if CHECKSUM_GEN_ICMP
IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) {
icmphdr->chksum = inet_chksum(icmphdr, q->len);
}
#endif
ICMP_STATS_INC(icmp.xmit);
ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif);
}
pbuf_free(q);
}
#endif /* LWIP_IPV4 && LWIP_ICMP */

View File

@ -1,801 +0,0 @@
/**
* @file
* IGMP - Internet Group Management Protocol
*
* @defgroup igmp IGMP
* @ingroup ip4
* To be called from TCPIP thread
*/
/*
* Copyright (c) 2002 CITEL Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file is a contribution to the lwIP TCP/IP stack.
* The Swedish Institute of Computer Science and Adam Dunkels
* are specifically granted permission to redistribute this
* source code.
*/
/*-------------------------------------------------------------
Note 1)
Although the rfc requires V1 AND V2 capability
we will only support v2 since now V1 is very old (August 1989)
V1 can be added if required
a debug print and statistic have been implemented to
show this up.
-------------------------------------------------------------
-------------------------------------------------------------
Note 2)
A query for a specific group address (as opposed to ALLHOSTS)
has now been implemented as I am unsure if it is required
a debug print and statistic have been implemented to
show this up.
-------------------------------------------------------------
-------------------------------------------------------------
Note 3)
The router alert rfc 2113 is implemented in outgoing packets
but not checked rigorously incoming
-------------------------------------------------------------
Steve Reynolds
------------------------------------------------------------*/
/*-----------------------------------------------------------------------------
* RFC 988 - Host extensions for IP multicasting - V0
* RFC 1054 - Host extensions for IP multicasting -
* RFC 1112 - Host extensions for IP multicasting - V1
* RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard)
* RFC 3376 - Internet Group Management Protocol, Version 3 - V3
* RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+
* RFC 2113 - IP Router Alert Option -
*----------------------------------------------------------------------------*/
/*-----------------------------------------------------------------------------
* Includes
*----------------------------------------------------------------------------*/
#include "lwip/opt.h"
#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */
#include "lwip/igmp.h"
#include "lwip/debug.h"
#include "lwip/def.h"
#include "lwip/mem.h"
#include "lwip/ip.h"
#include "lwip/inet_chksum.h"
#include "lwip/netif.h"
#include "lwip/stats.h"
#include "lwip/prot/igmp.h"
#include <string.h>
static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr);
static err_t igmp_remove_group(struct netif *netif, struct igmp_group *group);
static void igmp_timeout(struct netif *netif, struct igmp_group *group);
static void igmp_start_timer(struct igmp_group *group, u8_t max_time);
static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp);
static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif);
static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type);
static ip4_addr_t allsystems;
static ip4_addr_t allrouters;
/**
* Initialize the IGMP module
*/
void
igmp_init(void)
{
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n"));
IP4_ADDR(&allsystems, 224, 0, 0, 1);
IP4_ADDR(&allrouters, 224, 0, 0, 2);
}
/**
* Start IGMP processing on interface
*
* @param netif network interface on which start IGMP processing
*/
err_t
igmp_start(struct netif *netif)
{
struct igmp_group *group;
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void *)netif));
group = igmp_lookup_group(netif, &allsystems);
if (group != NULL) {
group->group_state = IGMP_GROUP_IDLE_MEMBER;
group->use++;
/* Allow the igmp messages at the MAC level */
if (netif->igmp_mac_filter != NULL) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD "));
ip4_addr_debug_print_val(IGMP_DEBUG, allsystems);
LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif));
netif->igmp_mac_filter(netif, &allsystems, NETIF_ADD_MAC_FILTER);
}
return ERR_OK;
}
return ERR_MEM;
}
/**
* Stop IGMP processing on interface
*
* @param netif network interface on which stop IGMP processing
*/
err_t
igmp_stop(struct netif *netif)
{
struct igmp_group *group = netif_igmp_data(netif);
netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, NULL);
while (group != NULL) {
struct igmp_group *next = group->next; /* avoid use-after-free below */
/* disable the group at the MAC level */
if (netif->igmp_mac_filter != NULL) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL "));
ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address);
LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif));
netif->igmp_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER);
}
/* free group */
memp_free(MEMP_IGMP_GROUP, group);
/* move to "next" */
group = next;
}
return ERR_OK;
}
/**
* Report IGMP memberships for this interface
*
* @param netif network interface on which report IGMP memberships
*/
void
igmp_report_groups(struct netif *netif)
{
struct igmp_group *group = netif_igmp_data(netif);
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void *)netif));
/* Skip the first group in the list, it is always the allsystems group added in igmp_start() */
if (group != NULL) {
group = group->next;
}
while (group != NULL) {
igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR);
group = group->next;
}
}
/**
* Search for a group in the netif's igmp group list
*
* @param ifp the network interface for which to look
* @param addr the group ip address to search for
* @return a struct igmp_group* if the group has been found,
* NULL if the group wasn't found.
*/
struct igmp_group *
igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr)
{
struct igmp_group *group = netif_igmp_data(ifp);
while (group != NULL) {
if (ip4_addr_cmp(&(group->group_address), addr)) {
return group;
}
group = group->next;
}
/* to be clearer, we return NULL here instead of
* 'group' (which is also NULL at this point).
*/
return NULL;
}
/**
* Search for a specific igmp group and create a new one if not found-
*
* @param ifp the network interface for which to look
* @param addr the group ip address to search
* @return a struct igmp_group*,
* NULL on memory error.
*/
static struct igmp_group *
igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr)
{
struct igmp_group *group;
struct igmp_group *list_head = netif_igmp_data(ifp);
/* Search if the group already exists */
group = igmp_lookfor_group(ifp, addr);
if (group != NULL) {
/* Group already exists. */
return group;
}
/* Group doesn't exist yet, create a new one */
group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP);
if (group != NULL) {
ip4_addr_set(&(group->group_address), addr);
group->timer = 0; /* Not running */
group->group_state = IGMP_GROUP_NON_MEMBER;
group->last_reporter_flag = 0;
group->use = 0;
/* Ensure allsystems group is always first in list */
if (list_head == NULL) {
/* this is the first entry in linked list */
LWIP_ASSERT("igmp_lookup_group: first group must be allsystems",
(ip4_addr_cmp(addr, &allsystems) != 0));
group->next = NULL;
netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, group);
} else {
/* append _after_ first entry */
LWIP_ASSERT("igmp_lookup_group: all except first group must not be allsystems",
(ip4_addr_cmp(addr, &allsystems) == 0));
group->next = list_head->next;
list_head->next = group;
}
}
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group ? "" : "impossible to ")));
ip4_addr_debug_print(IGMP_DEBUG, addr);
LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)ifp));
return group;
}
/**
* Remove a group from netif's igmp group list, but don't free it yet
*
* @param group the group to remove from the netif's igmp group list
* @return ERR_OK if group was removed from the list, an err_t otherwise
*/
static err_t
igmp_remove_group(struct netif *netif, struct igmp_group *group)
{
err_t err = ERR_OK;
struct igmp_group *tmp_group;
/* Skip the first group in the list, it is always the allsystems group added in igmp_start() */
for (tmp_group = netif_igmp_data(netif); tmp_group != NULL; tmp_group = tmp_group->next) {
if (tmp_group->next == group) {
tmp_group->next = group->next;
break;
}
}
/* Group not found in netif's igmp group list */
if (tmp_group == NULL) {
err = ERR_ARG;
}
return err;
}
/**
* Called from ip_input() if a new IGMP packet is received.
*
* @param p received igmp packet, p->payload pointing to the igmp header
* @param inp network interface on which the packet was received
* @param dest destination ip address of the igmp packet
*/
void
igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest)
{
struct igmp_msg *igmp;
struct igmp_group *group;
struct igmp_group *groupref;
IGMP_STATS_INC(igmp.recv);
/* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */
if (p->len < IGMP_MINLEN) {
pbuf_free(p);
IGMP_STATS_INC(igmp.lenerr);
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n"));
return;
}
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from "));
ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->src);
LWIP_DEBUGF(IGMP_DEBUG, (" to address "));
ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->dest);
LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)inp));
/* Now calculate and check the checksum */
igmp = (struct igmp_msg *)p->payload;
if (inet_chksum(igmp, p->len)) {
pbuf_free(p);
IGMP_STATS_INC(igmp.chkerr);
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n"));
return;
}
/* Packet is ok so find an existing group */
group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */
/* If group can be found or create... */
if (!group) {
pbuf_free(p);
IGMP_STATS_INC(igmp.drop);
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n"));
return;
}
/* NOW ACT ON THE INCOMING MESSAGE TYPE... */
switch (igmp->igmp_msgtype) {
case IGMP_MEMB_QUERY:
/* IGMP_MEMB_QUERY to the "all systems" address ? */
if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) {
/* THIS IS THE GENERAL QUERY */
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp)));
if (igmp->igmp_maxresp == 0) {
IGMP_STATS_INC(igmp.rx_v1);
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n"));
igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR;
} else {
IGMP_STATS_INC(igmp.rx_general);
}
groupref = netif_igmp_data(inp);
/* Do not send messages on the all systems group address! */
/* Skip the first group in the list, it is always the allsystems group added in igmp_start() */
if (groupref != NULL) {
groupref = groupref->next;
}
while (groupref) {
igmp_delaying_member(groupref, igmp->igmp_maxresp);
groupref = groupref->next;
}
} else {
/* IGMP_MEMB_QUERY to a specific group ? */
if (!ip4_addr_isany(&igmp->igmp_group_address)) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group "));
ip4_addr_debug_print_val(IGMP_DEBUG, igmp->igmp_group_address);
if (ip4_addr_cmp(dest, &allsystems)) {
ip4_addr_t groupaddr;
LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp)));
/* we first need to re-look for the group since we used dest last time */
ip4_addr_copy(groupaddr, igmp->igmp_group_address);
group = igmp_lookfor_group(inp, &groupaddr);
} else {
LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp)));
}
if (group != NULL) {
IGMP_STATS_INC(igmp.rx_group);
igmp_delaying_member(group, igmp->igmp_maxresp);
} else {
IGMP_STATS_INC(igmp.drop);
}
} else {
IGMP_STATS_INC(igmp.proterr);
}
}
break;
case IGMP_V2_MEMB_REPORT:
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n"));
IGMP_STATS_INC(igmp.rx_report);
if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) {
/* This is on a specific group we have already looked up */
group->timer = 0; /* stopped */
group->group_state = IGMP_GROUP_IDLE_MEMBER;
group->last_reporter_flag = 0;
}
break;
default:
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n",
igmp->igmp_msgtype, group->group_state, (void *)&group, (void *)inp));
IGMP_STATS_INC(igmp.proterr);
break;
}
pbuf_free(p);
return;
}
/**
* @ingroup igmp
* Join a group on one network interface.
*
* @param ifaddr ip address of the network interface which should join a new group
* @param groupaddr the ip address of the group which to join
* @return ERR_OK if group was joined on the netif(s), an err_t otherwise
*/
err_t
igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr)
{
err_t err = ERR_VAL; /* no matching interface */
struct netif *netif;
LWIP_ASSERT_CORE_LOCKED();
/* make sure it is multicast address */
LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
/* loop through netif's */
NETIF_FOREACH(netif) {
/* Should we join this interface ? */
if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) {
err = igmp_joingroup_netif(netif, groupaddr);
if (err != ERR_OK) {
/* Return an error even if some network interfaces are joined */
/** @todo undo any other netif already joined */
return err;
}
}
}
return err;
}
/**
* @ingroup igmp
* Join a group on one network interface.
*
* @param netif the network interface which should join a new group
* @param groupaddr the ip address of the group which to join
* @return ERR_OK if group was joined on the netif, an err_t otherwise
*/
err_t
igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr)
{
struct igmp_group *group;
LWIP_ASSERT_CORE_LOCKED();
/* make sure it is multicast address */
LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
/* make sure it is an igmp-enabled netif */
LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;);
/* find group or create a new one if not found */
group = igmp_lookup_group(netif, groupaddr);
if (group != NULL) {
/* This should create a new group, check the state to make sure */
if (group->group_state != IGMP_GROUP_NON_MEMBER) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n"));
} else {
/* OK - it was new group */
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: "));
ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
LWIP_DEBUGF(IGMP_DEBUG, ("\n"));
/* If first use of the group, allow the group at the MAC level */
if ((group->use == 0) && (netif->igmp_mac_filter != NULL)) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD "));
ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif));
netif->igmp_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER);
}
IGMP_STATS_INC(igmp.tx_join);
igmp_send(netif, group, IGMP_V2_MEMB_REPORT);
igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR);
/* Need to work out where this timer comes from */
group->group_state = IGMP_GROUP_DELAYING_MEMBER;
}
/* Increment group use */
group->use++;
/* Join on this interface */
return ERR_OK;
} else {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n"));
return ERR_MEM;
}
}
/**
* @ingroup igmp
* Leave a group on one network interface.
*
* @param ifaddr ip address of the network interface which should leave a group
* @param groupaddr the ip address of the group which to leave
* @return ERR_OK if group was left on the netif(s), an err_t otherwise
*/
err_t
igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr)
{
err_t err = ERR_VAL; /* no matching interface */
struct netif *netif;
LWIP_ASSERT_CORE_LOCKED();
/* make sure it is multicast address */
LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
/* loop through netif's */
NETIF_FOREACH(netif) {
/* Should we leave this interface ? */
if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) {
err_t res = igmp_leavegroup_netif(netif, groupaddr);
if (err != ERR_OK) {
/* Store this result if we have not yet gotten a success */
err = res;
}
}
}
return err;
}
/**
* @ingroup igmp
* Leave a group on one network interface.
*
* @param netif the network interface which should leave a group
* @param groupaddr the ip address of the group which to leave
* @return ERR_OK if group was left on the netif, an err_t otherwise
*/
err_t
igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr)
{
struct igmp_group *group;
LWIP_ASSERT_CORE_LOCKED();
/* make sure it is multicast address */
LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;);
LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;);
/* make sure it is an igmp-enabled netif */
LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;);
/* find group */
group = igmp_lookfor_group(netif, groupaddr);
if (group != NULL) {
/* Only send a leave if the flag is set according to the state diagram */
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: "));
ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
LWIP_DEBUGF(IGMP_DEBUG, ("\n"));
/* If there is no other use of the group */
if (group->use <= 1) {
/* Remove the group from the list */
igmp_remove_group(netif, group);
/* If we are the last reporter for this group */
if (group->last_reporter_flag) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n"));
IGMP_STATS_INC(igmp.tx_leave);
igmp_send(netif, group, IGMP_LEAVE_GROUP);
}
/* Disable the group at the MAC level */
if (netif->igmp_mac_filter != NULL) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL "));
ip4_addr_debug_print(IGMP_DEBUG, groupaddr);
LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif));
netif->igmp_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER);
}
/* Free group struct */
memp_free(MEMP_IGMP_GROUP, group);
} else {
/* Decrement group use */
group->use--;
}
return ERR_OK;
} else {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n"));
return ERR_VAL;
}
}
/**
* The igmp timer function (both for NO_SYS=1 and =0)
* Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default).
*/
void
igmp_tmr(void)
{
struct netif *netif;
NETIF_FOREACH(netif) {
struct igmp_group *group = netif_igmp_data(netif);
while (group != NULL) {
if (group->timer > 0) {
group->timer--;
if (group->timer == 0) {
igmp_timeout(netif, group);
}
}
group = group->next;
}
}
}
/**
* Called if a timeout for one group is reached.
* Sends a report for this group.
*
* @param group an igmp_group for which a timeout is reached
*/
static void
igmp_timeout(struct netif *netif, struct igmp_group *group)
{
/* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group
(unless it is the allsystems group) */
if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) &&
(!(ip4_addr_cmp(&(group->group_address), &allsystems)))) {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address "));
ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address);
LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)netif));
group->group_state = IGMP_GROUP_IDLE_MEMBER;
IGMP_STATS_INC(igmp.tx_report);
igmp_send(netif, group, IGMP_V2_MEMB_REPORT);
}
}
/**
* Start a timer for an igmp group
*
* @param group the igmp_group for which to start a timer
* @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with
* every call to igmp_tmr())
*/
static void
igmp_start_timer(struct igmp_group *group, u8_t max_time)
{
#ifdef LWIP_RAND
group->timer = (u16_t)(max_time > 2 ? (LWIP_RAND() % max_time) : 1);
#else /* LWIP_RAND */
/* ATTENTION: use this only if absolutely necessary! */
group->timer = max_time / 2;
#endif /* LWIP_RAND */
if (group->timer == 0) {
group->timer = 1;
}
}
/**
* Delaying membership report for a group if necessary
*
* @param group the igmp_group for which "delaying" membership report
* @param maxresp query delay
*/
static void
igmp_delaying_member(struct igmp_group *group, u8_t maxresp)
{
if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) ||
((group->group_state == IGMP_GROUP_DELAYING_MEMBER) &&
((group->timer == 0) || (maxresp < group->timer)))) {
igmp_start_timer(group, maxresp);
group->group_state = IGMP_GROUP_DELAYING_MEMBER;
}
}
/**
* Sends an IP packet on a network interface. This function constructs the IP header
* and calculates the IP header checksum. If the source IP address is NULL,
* the IP address of the outgoing network interface is filled in as source address.
*
* @param p the packet to send (p->payload points to the data, e.g. next
protocol header; if dest == LWIP_IP_HDRINCL, p already includes an
IP header and p->payload points to that IP header)
* @param src the source IP address to send from (if src == IP4_ADDR_ANY, the
* IP address of the netif used to send is used as source address)
* @param dest the destination IP address to send the packet to
* @param netif the netif on which to send this packet
* @return ERR_OK if the packet was sent OK
* ERR_BUF if p doesn't have enough space for IP/LINK headers
* returns errors returned by netif->output
*/
static err_t
igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif)
{
/* This is the "router alert" option */
u16_t ra[2];
ra[0] = PP_HTONS(ROUTER_ALERT);
ra[1] = 0x0000; /* Router shall examine packet */
IGMP_STATS_INC(igmp.xmit);
return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN);
}
/**
* Send an igmp packet to a specific group.
*
* @param group the group to which to send the packet
* @param type the type of igmp packet to send
*/
static void
igmp_send(struct netif *netif, struct igmp_group *group, u8_t type)
{
struct pbuf *p = NULL;
struct igmp_msg *igmp = NULL;
ip4_addr_t src = *IP4_ADDR_ANY4;
ip4_addr_t *dest = NULL;
/* IP header + "router alert" option + IGMP header */
p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM);
if (p) {
igmp = (struct igmp_msg *)p->payload;
LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg",
(p->len >= sizeof(struct igmp_msg)));
ip4_addr_copy(src, *netif_ip4_addr(netif));
if (type == IGMP_V2_MEMB_REPORT) {
dest = &(group->group_address);
ip4_addr_copy(igmp->igmp_group_address, group->group_address);
group->last_reporter_flag = 1; /* Remember we were the last to report */
} else {
if (type == IGMP_LEAVE_GROUP) {
dest = &allrouters;
ip4_addr_copy(igmp->igmp_group_address, group->group_address);
}
}
if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) {
igmp->igmp_msgtype = type;
igmp->igmp_maxresp = 0;
igmp->igmp_checksum = 0;
igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN);
igmp_ip_output_if(p, &src, dest, netif);
}
pbuf_free(p);
} else {
LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n"));
IGMP_STATS_INC(igmp.memerr);
}
}
#endif /* LWIP_IPV4 && LWIP_IGMP */

View File

@ -1,321 +0,0 @@
/**
* @file
* This is the IPv4 address tools implementation.
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_IPV4
#include "lwip/ip_addr.h"
#include "lwip/netif.h"
/* used by IP4_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */
const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY);
const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST);
/**
* Determine if an address is a broadcast address on a network interface
*
* @param addr address to be checked
* @param netif the network interface against which the address is checked
* @return returns non-zero if the address is a broadcast address
*/
u8_t
ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif)
{
ip4_addr_t ipaddr;
ip4_addr_set_u32(&ipaddr, addr);
/* all ones (broadcast) or all zeroes (old skool broadcast) */
if ((~addr == IPADDR_ANY) ||
(addr == IPADDR_ANY)) {
return 1;
/* no broadcast support on this network interface? */
} else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) {
/* the given address cannot be a broadcast address
* nor can we check against any broadcast addresses */
return 0;
/* address matches network interface address exactly? => no broadcast */
} else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) {
return 0;
/* on the same (sub) network... */
} else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif))
/* ...and host identifier bits are all ones? =>... */
&& ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) ==
(IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) {
/* => network broadcast address */
return 1;
} else {
return 0;
}
}
/** Checks if a netmask is valid (starting with ones, then only zeros)
*
* @param netmask the IPv4 netmask to check (in network byte order!)
* @return 1 if the netmask is valid, 0 if it is not
*/
u8_t
ip4_addr_netmask_valid(u32_t netmask)
{
u32_t mask;
u32_t nm_hostorder = lwip_htonl(netmask);
/* first, check for the first zero */
for (mask = 1UL << 31 ; mask != 0; mask >>= 1) {
if ((nm_hostorder & mask) == 0) {
break;
}
}
/* then check that there is no one */
for (; mask != 0; mask >>= 1) {
if ((nm_hostorder & mask) != 0) {
/* there is a one after the first zero -> invalid */
return 0;
}
}
/* no one after the first zero -> valid */
return 1;
}
/**
* Ascii internet address interpretation routine.
* The value returned is in network order.
*
* @param cp IP address in ascii representation (e.g. "127.0.0.1")
* @return ip address in network order
*/
u32_t
ipaddr_addr(const char *cp)
{
ip4_addr_t val;
if (ip4addr_aton(cp, &val)) {
return ip4_addr_get_u32(&val);
}
return (IPADDR_NONE);
}
/**
* Check whether "cp" is a valid ascii representation
* of an Internet address and convert to a binary address.
* Returns 1 if the address is valid, 0 if not.
* This replaces inet_addr, the return value from which
* cannot distinguish between failure and a local broadcast address.
*
* @param cp IP address in ascii representation (e.g. "127.0.0.1")
* @param addr pointer to which to save the ip address in network order
* @return 1 if cp could be converted to addr, 0 on failure
*/
int
ip4addr_aton(const char *cp, ip4_addr_t *addr)
{
u32_t val;
u8_t base;
char c;
u32_t parts[4];
u32_t *pp = parts;
c = *cp;
for (;;) {
/*
* Collect number up to ``.''.
* Values are specified as for C:
* 0x=hex, 0=octal, 1-9=decimal.
*/
if (!lwip_isdigit(c)) {
return 0;
}
val = 0;
base = 10;
if (c == '0') {
c = *++cp;
if (c == 'x' || c == 'X') {
base = 16;
c = *++cp;
} else {
base = 8;
}
}
for (;;) {
if (lwip_isdigit(c)) {
val = (val * base) + (u32_t)(c - '0');
c = *++cp;
} else if (base == 16 && lwip_isxdigit(c)) {
val = (val << 4) | (u32_t)(c + 10 - (lwip_islower(c) ? 'a' : 'A'));
c = *++cp;
} else {
break;
}
}
if (c == '.') {
/*
* Internet format:
* a.b.c.d
* a.b.c (with c treated as 16 bits)
* a.b (with b treated as 24 bits)
*/
if (pp >= parts + 3) {
return 0;
}
*pp++ = val;
c = *++cp;
} else {
break;
}
}
/*
* Check for trailing characters.
*/
if (c != '\0' && !lwip_isspace(c)) {
return 0;
}
/*
* Concoct the address according to
* the number of parts specified.
*/
switch (pp - parts + 1) {
case 0:
return 0; /* initial nondigit */
case 1: /* a -- 32 bits */
break;
case 2: /* a.b -- 8.24 bits */
if (val > 0xffffffUL) {
return 0;
}
if (parts[0] > 0xff) {
return 0;
}
val |= parts[0] << 24;
break;
case 3: /* a.b.c -- 8.8.16 bits */
if (val > 0xffff) {
return 0;
}
if ((parts[0] > 0xff) || (parts[1] > 0xff)) {
return 0;
}
val |= (parts[0] << 24) | (parts[1] << 16);
break;
case 4: /* a.b.c.d -- 8.8.8.8 bits */
if (val > 0xff) {
return 0;
}
if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) {
return 0;
}
val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8);
break;
default:
LWIP_ASSERT("unhandled", 0);
break;
}
if (addr) {
ip4_addr_set_u32(addr, lwip_htonl(val));
}
return 1;
}
/**
* Convert numeric IP address into decimal dotted ASCII representation.
* returns ptr to static buffer; not reentrant!
*
* @param addr ip address in network order to convert
* @return pointer to a global static (!) buffer that holds the ASCII
* representation of addr
*/
char *
ip4addr_ntoa(const ip4_addr_t *addr)
{
static char str[IP4ADDR_STRLEN_MAX];
return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX);
}
/**
* Same as ip4addr_ntoa, but reentrant since a user-supplied buffer is used.
*
* @param addr ip address in network order to convert
* @param buf target buffer where the string is stored
* @param buflen length of buf
* @return either pointer to buf which now holds the ASCII
* representation of addr or NULL if buf was too small
*/
char *
ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen)
{
u32_t s_addr;
char inv[3];
char *rp;
u8_t *ap;
u8_t rem;
u8_t n;
u8_t i;
int len = 0;
s_addr = ip4_addr_get_u32(addr);
rp = buf;
ap = (u8_t *)&s_addr;
for (n = 0; n < 4; n++) {
i = 0;
do {
rem = *ap % (u8_t)10;
*ap /= (u8_t)10;
inv[i++] = (char)('0' + rem);
} while (*ap);
while (i--) {
if (len++ >= buflen) {
return NULL;
}
*rp++ = inv[i];
}
if (len++ >= buflen) {
return NULL;
}
*rp++ = '.';
ap++;
}
*--rp = 0;
return buf;
}
#endif /* LWIP_IPV4 */

View File

@ -1,894 +0,0 @@
/**
* @file
* This is the IPv4 packet segmentation and reassembly implementation.
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Jani Monoses <jani@iv.ro>
* Simon Goldschmidt
* original reassembly code by Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_IPV4
#include "lwip/ip4_frag.h"
#include "lwip/def.h"
#include "lwip/inet_chksum.h"
#include "lwip/netif.h"
#include "lwip/stats.h"
#include "lwip/icmp.h"
#include <string.h>
#if IP_REASSEMBLY
/**
* The IP reassembly code currently has the following limitations:
* - IP header options are not supported
* - fragments must not overlap (e.g. due to different routes),
* currently, overlapping or duplicate fragments are thrown away
* if IP_REASS_CHECK_OVERLAP=1 (the default)!
*
* @todo: work with IP header options
*/
/** Setting this to 0, you can turn off checking the fragments for overlapping
* regions. The code gets a little smaller. Only use this if you know that
* overlapping won't occur on your network! */
#ifndef IP_REASS_CHECK_OVERLAP
#define IP_REASS_CHECK_OVERLAP 1
#endif /* IP_REASS_CHECK_OVERLAP */
/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is
* full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller.
* Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA
* is set to 1, so one datagram can be reassembled at a time, only. */
#ifndef IP_REASS_FREE_OLDEST
#define IP_REASS_FREE_OLDEST 1
#endif /* IP_REASS_FREE_OLDEST */
#define IP_REASS_FLAG_LASTFRAG 0x01
#define IP_REASS_VALIDATE_TELEGRAM_FINISHED 1
#define IP_REASS_VALIDATE_PBUF_QUEUED 0
#define IP_REASS_VALIDATE_PBUF_DROPPED -1
/** This is a helper struct which holds the starting
* offset and the ending offset of this fragment to
* easily chain the fragments.
* It has the same packing requirements as the IP header, since it replaces
* the IP header in memory in incoming fragments (after copying it) to keep
* track of the various fragments. (-> If the IP header doesn't need packing,
* this struct doesn't need packing, too.)
*/
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/bpstruct.h"
#endif
PACK_STRUCT_BEGIN
struct ip_reass_helper {
PACK_STRUCT_FIELD(struct pbuf *next_pbuf);
PACK_STRUCT_FIELD(u16_t start);
PACK_STRUCT_FIELD(u16_t end);
} PACK_STRUCT_STRUCT;
PACK_STRUCT_END
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/epstruct.h"
#endif
#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \
(ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \
ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \
IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0
/* global variables */
static struct ip_reassdata *reassdatagrams;
static u16_t ip_reass_pbufcount;
/* function prototypes */
static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
/**
* Reassembly timer base function
* for both NO_SYS == 0 and 1 (!).
*
* Should be called every 1000 msec (defined by IP_TMR_INTERVAL).
*/
void
ip_reass_tmr(void)
{
struct ip_reassdata *r, *prev = NULL;
r = reassdatagrams;
while (r != NULL) {
/* Decrement the timer. Once it reaches 0,
* clean up the incomplete fragment assembly */
if (r->timer > 0) {
r->timer--;
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n", (u16_t)r->timer));
prev = r;
r = r->next;
} else {
/* reassembly timed out */
struct ip_reassdata *tmp;
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n"));
tmp = r;
/* get the next pointer before freeing */
r = r->next;
/* free the helper struct and all enqueued pbufs */
ip_reass_free_complete_datagram(tmp, prev);
}
}
}
/**
* Free a datagram (struct ip_reassdata) and all its pbufs.
* Updates the total count of enqueued pbufs (ip_reass_pbufcount),
* SNMP counters and sends an ICMP time exceeded packet.
*
* @param ipr datagram to free
* @param prev the previous datagram in the linked list
* @return the number of pbufs freed
*/
static int
ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
{
u16_t pbufs_freed = 0;
u16_t clen;
struct pbuf *p;
struct ip_reass_helper *iprh;
LWIP_ASSERT("prev != ipr", prev != ipr);
if (prev != NULL) {
LWIP_ASSERT("prev->next == ipr", prev->next == ipr);
}
MIB2_STATS_INC(mib2.ipreasmfails);
#if LWIP_ICMP
iprh = (struct ip_reass_helper *)ipr->p->payload;
if (iprh->start == 0) {
/* The first fragment was received, send ICMP time exceeded. */
/* First, de-queue the first pbuf from r->p. */
p = ipr->p;
ipr->p = iprh->next_pbuf;
/* Then, copy the original header into it. */
SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN);
icmp_time_exceeded(p, ICMP_TE_FRAG);
clen = pbuf_clen(p);
LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
pbufs_freed = (u16_t)(pbufs_freed + clen);
pbuf_free(p);
}
#endif /* LWIP_ICMP */
/* First, free all received pbufs. The individual pbufs need to be released
separately as they have not yet been chained */
p = ipr->p;
while (p != NULL) {
struct pbuf *pcur;
iprh = (struct ip_reass_helper *)p->payload;
pcur = p;
/* get the next pointer before freeing */
p = iprh->next_pbuf;
clen = pbuf_clen(pcur);
LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
pbufs_freed = (u16_t)(pbufs_freed + clen);
pbuf_free(pcur);
}
/* Then, unchain the struct ip_reassdata from the list and free it. */
ip_reass_dequeue_datagram(ipr, prev);
LWIP_ASSERT("ip_reass_pbufcount >= pbufs_freed", ip_reass_pbufcount >= pbufs_freed);
ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - pbufs_freed);
return pbufs_freed;
}
#if IP_REASS_FREE_OLDEST
/**
* Free the oldest datagram to make room for enqueueing new fragments.
* The datagram 'fraghdr' belongs to is not freed!
*
* @param fraghdr IP header of the current fragment
* @param pbufs_needed number of pbufs needed to enqueue
* (used for freeing other datagrams if not enough space)
* @return the number of pbufs freed
*/
static int
ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed)
{
/* @todo Can't we simply remove the last datagram in the
* linked list behind reassdatagrams?
*/
struct ip_reassdata *r, *oldest, *prev, *oldest_prev;
int pbufs_freed = 0, pbufs_freed_current;
int other_datagrams;
/* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs,
* but don't free the datagram that 'fraghdr' belongs to! */
do {
oldest = NULL;
prev = NULL;
oldest_prev = NULL;
other_datagrams = 0;
r = reassdatagrams;
while (r != NULL) {
if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) {
/* Not the same datagram as fraghdr */
other_datagrams++;
if (oldest == NULL) {
oldest = r;
oldest_prev = prev;
} else if (r->timer <= oldest->timer) {
/* older than the previous oldest */
oldest = r;
oldest_prev = prev;
}
}
if (r->next != NULL) {
prev = r;
}
r = r->next;
}
if (oldest != NULL) {
pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev);
pbufs_freed += pbufs_freed_current;
}
} while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1));
return pbufs_freed;
}
#endif /* IP_REASS_FREE_OLDEST */
/**
* Enqueues a new fragment into the fragment queue
* @param fraghdr points to the new fragments IP hdr
* @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space)
* @return A pointer to the queue location into which the fragment was enqueued
*/
static struct ip_reassdata *
ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen)
{
struct ip_reassdata *ipr;
#if ! IP_REASS_FREE_OLDEST
LWIP_UNUSED_ARG(clen);
#endif
/* No matching previous fragment found, allocate a new reassdata struct */
ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
if (ipr == NULL) {
#if IP_REASS_FREE_OLDEST
if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) {
ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
}
if (ipr == NULL)
#endif /* IP_REASS_FREE_OLDEST */
{
IPFRAG_STATS_INC(ip_frag.memerr);
LWIP_DEBUGF(IP_REASS_DEBUG, ("Failed to alloc reassdata struct\n"));
return NULL;
}
}
memset(ipr, 0, sizeof(struct ip_reassdata));
ipr->timer = IP_REASS_MAXAGE;
/* enqueue the new structure to the front of the list */
ipr->next = reassdatagrams;
reassdatagrams = ipr;
/* copy the ip header for later tests and input */
/* @todo: no ip options supported? */
SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN);
return ipr;
}
/**
* Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs.
* @param ipr points to the queue entry to dequeue
*/
static void
ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
{
/* dequeue the reass struct */
if (reassdatagrams == ipr) {
/* it was the first in the list */
reassdatagrams = ipr->next;
} else {
/* it wasn't the first, so it must have a valid 'prev' */
LWIP_ASSERT("sanity check linked list", prev != NULL);
prev->next = ipr->next;
}
/* now we can free the ip_reassdata struct */
memp_free(MEMP_REASSDATA, ipr);
}
/**
* Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list
* will grow over time as new pbufs are rx.
* Also checks that the datagram passes basic continuity checks (if the last
* fragment was received at least once).
* @param ipr points to the reassembly state
* @param new_p points to the pbuf for the current fragment
* @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet)
* @return see IP_REASS_VALIDATE_* defines
*/
static int
ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last)
{
struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev = NULL;
struct pbuf *q;
u16_t offset, len;
u8_t hlen;
struct ip_hdr *fraghdr;
int valid = 1;
/* Extract length and fragment offset from current fragment */
fraghdr = (struct ip_hdr *)new_p->payload;
len = lwip_ntohs(IPH_LEN(fraghdr));
hlen = IPH_HL_BYTES(fraghdr);
if (hlen > len) {
/* invalid datagram */
return IP_REASS_VALIDATE_PBUF_DROPPED;
}
len = (u16_t)(len - hlen);
offset = IPH_OFFSET_BYTES(fraghdr);
/* overwrite the fragment's ip header from the pbuf with our helper struct,
* and setup the embedded helper structure. */
/* make sure the struct ip_reass_helper fits into the IP header */
LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN",
sizeof(struct ip_reass_helper) <= IP_HLEN);
iprh = (struct ip_reass_helper *)new_p->payload;
iprh->next_pbuf = NULL;
iprh->start = offset;
iprh->end = (u16_t)(offset + len);
if (iprh->end < offset) {
/* u16_t overflow, cannot handle this */
return IP_REASS_VALIDATE_PBUF_DROPPED;
}
/* Iterate through until we either get to the end of the list (append),
* or we find one with a larger offset (insert). */
for (q = ipr->p; q != NULL;) {
iprh_tmp = (struct ip_reass_helper *)q->payload;
if (iprh->start < iprh_tmp->start) {
/* the new pbuf should be inserted before this */
iprh->next_pbuf = q;
if (iprh_prev != NULL) {
/* not the fragment with the lowest offset */
#if IP_REASS_CHECK_OVERLAP
if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) {
/* fragment overlaps with previous or following, throw away */
return IP_REASS_VALIDATE_PBUF_DROPPED;
}
#endif /* IP_REASS_CHECK_OVERLAP */
iprh_prev->next_pbuf = new_p;
if (iprh_prev->end != iprh->start) {
/* There is a fragment missing between the current
* and the previous fragment */
valid = 0;
}
} else {
#if IP_REASS_CHECK_OVERLAP
if (iprh->end > iprh_tmp->start) {
/* fragment overlaps with following, throw away */
return IP_REASS_VALIDATE_PBUF_DROPPED;
}
#endif /* IP_REASS_CHECK_OVERLAP */
/* fragment with the lowest offset */
ipr->p = new_p;
}
break;
} else if (iprh->start == iprh_tmp->start) {
/* received the same datagram twice: no need to keep the datagram */
return IP_REASS_VALIDATE_PBUF_DROPPED;
#if IP_REASS_CHECK_OVERLAP
} else if (iprh->start < iprh_tmp->end) {
/* overlap: no need to keep the new datagram */
return IP_REASS_VALIDATE_PBUF_DROPPED;
#endif /* IP_REASS_CHECK_OVERLAP */
} else {
/* Check if the fragments received so far have no holes. */
if (iprh_prev != NULL) {
if (iprh_prev->end != iprh_tmp->start) {
/* There is a fragment missing between the current
* and the previous fragment */
valid = 0;
}
}
}
q = iprh_tmp->next_pbuf;
iprh_prev = iprh_tmp;
}
/* If q is NULL, then we made it to the end of the list. Determine what to do now */
if (q == NULL) {
if (iprh_prev != NULL) {
/* this is (for now), the fragment with the highest offset:
* chain it to the last fragment */
#if IP_REASS_CHECK_OVERLAP
LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start);
#endif /* IP_REASS_CHECK_OVERLAP */
iprh_prev->next_pbuf = new_p;
if (iprh_prev->end != iprh->start) {
valid = 0;
}
} else {
#if IP_REASS_CHECK_OVERLAP
LWIP_ASSERT("no previous fragment, this must be the first fragment!",
ipr->p == NULL);
#endif /* IP_REASS_CHECK_OVERLAP */
/* this is the first fragment we ever received for this ip datagram */
ipr->p = new_p;
}
}
/* At this point, the validation part begins: */
/* If we already received the last fragment */
if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) {
/* and had no holes so far */
if (valid) {
/* then check if the rest of the fragments is here */
/* Check if the queue starts with the first datagram */
if ((ipr->p == NULL) || (((struct ip_reass_helper *)ipr->p->payload)->start != 0)) {
valid = 0;
} else {
/* and check that there are no holes after this datagram */
iprh_prev = iprh;
q = iprh->next_pbuf;
while (q != NULL) {
iprh = (struct ip_reass_helper *)q->payload;
if (iprh_prev->end != iprh->start) {
valid = 0;
break;
}
iprh_prev = iprh;
q = iprh->next_pbuf;
}
/* if still valid, all fragments are received
* (because to the MF==0 already arrived */
if (valid) {
LWIP_ASSERT("sanity check", ipr->p != NULL);
LWIP_ASSERT("sanity check",
((struct ip_reass_helper *)ipr->p->payload) != iprh);
LWIP_ASSERT("validate_datagram:next_pbuf!=NULL",
iprh->next_pbuf == NULL);
}
}
}
/* If valid is 0 here, there are some fragments missing in the middle
* (since MF == 0 has already arrived). Such datagrams simply time out if
* no more fragments are received... */
return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED;
}
/* If we come here, not all fragments were received, yet! */
return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */
}
/**
* Reassembles incoming IP fragments into an IP datagram.
*
* @param p points to a pbuf chain of the fragment
* @return NULL if reassembly is incomplete, ? otherwise
*/
struct pbuf *
ip4_reass(struct pbuf *p)
{
struct pbuf *r;
struct ip_hdr *fraghdr;
struct ip_reassdata *ipr;
struct ip_reass_helper *iprh;
u16_t offset, len, clen;
u8_t hlen;
int valid;
int is_last;
IPFRAG_STATS_INC(ip_frag.recv);
MIB2_STATS_INC(mib2.ipreasmreqds);
fraghdr = (struct ip_hdr *)p->payload;
if (IPH_HL_BYTES(fraghdr) != IP_HLEN) {
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: IP options currently not supported!\n"));
IPFRAG_STATS_INC(ip_frag.err);
goto nullreturn;
}
offset = IPH_OFFSET_BYTES(fraghdr);
len = lwip_ntohs(IPH_LEN(fraghdr));
hlen = IPH_HL_BYTES(fraghdr);
if (hlen > len) {
/* invalid datagram */
goto nullreturn;
}
len = (u16_t)(len - hlen);
/* Check if we are allowed to enqueue more datagrams. */
clen = pbuf_clen(p);
if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
#if IP_REASS_FREE_OLDEST
if (!ip_reass_remove_oldest_datagram(fraghdr, clen) ||
((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS))
#endif /* IP_REASS_FREE_OLDEST */
{
/* No datagram could be freed and still too many pbufs enqueued */
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n",
ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS));
IPFRAG_STATS_INC(ip_frag.memerr);
/* @todo: send ICMP time exceeded here? */
/* drop this pbuf */
goto nullreturn;
}
}
/* Look for the datagram the fragment belongs to in the current datagram queue,
* remembering the previous in the queue for later dequeueing. */
for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) {
/* Check if the incoming fragment matches the one currently present
in the reassembly buffer. If so, we proceed with copying the
fragment into the buffer. */
if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) {
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n",
lwip_ntohs(IPH_ID(fraghdr))));
IPFRAG_STATS_INC(ip_frag.cachehit);
break;
}
}
if (ipr == NULL) {
/* Enqueue a new datagram into the datagram queue */
ipr = ip_reass_enqueue_new_datagram(fraghdr, clen);
/* Bail if unable to enqueue */
if (ipr == NULL) {
goto nullreturn;
}
} else {
if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) &&
((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) {
/* ipr->iphdr is not the header from the first fragment, but fraghdr is
* -> copy fraghdr into ipr->iphdr since we want to have the header
* of the first fragment (for ICMP time exceeded and later, for copying
* all options, if supported)*/
SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN);
}
}
/* At this point, we have either created a new entry or pointing
* to an existing one */
/* check for 'no more fragments', and update queue entry*/
is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0;
if (is_last) {
u16_t datagram_len = (u16_t)(offset + len);
if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) {
/* u16_t overflow, cannot handle this */
goto nullreturn_ipr;
}
}
/* find the right place to insert this pbuf */
/* @todo: trim pbufs if fragments are overlapping */
valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last);
if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) {
goto nullreturn_ipr;
}
/* if we come here, the pbuf has been enqueued */
/* Track the current number of pbufs current 'in-flight', in order to limit
the number of fragments that may be enqueued at any one time
(overflow checked by testing against IP_REASS_MAX_PBUFS) */
ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen);
if (is_last) {
u16_t datagram_len = (u16_t)(offset + len);
ipr->datagram_len = datagram_len;
ipr->flags |= IP_REASS_FLAG_LASTFRAG;
LWIP_DEBUGF(IP_REASS_DEBUG,
("ip4_reass: last fragment seen, total len %"S16_F"\n",
ipr->datagram_len));
}
if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) {
struct ip_reassdata *ipr_prev;
/* the totally last fragment (flag more fragments = 0) was received at least
* once AND all fragments are received */
u16_t datagram_len = (u16_t)(ipr->datagram_len + IP_HLEN);
/* save the second pbuf before copying the header over the pointer */
r = ((struct ip_reass_helper *)ipr->p->payload)->next_pbuf;
/* copy the original ip header back to the first pbuf */
fraghdr = (struct ip_hdr *)(ipr->p->payload);
SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN);
IPH_LEN_SET(fraghdr, lwip_htons(datagram_len));
IPH_OFFSET_SET(fraghdr, 0);
IPH_CHKSUM_SET(fraghdr, 0);
/* @todo: do we need to set/calculate the correct checksum? */
#if CHECKSUM_GEN_IP
IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) {
IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN));
}
#endif /* CHECKSUM_GEN_IP */
p = ipr->p;
/* chain together the pbufs contained within the reass_data list. */
while (r != NULL) {
iprh = (struct ip_reass_helper *)r->payload;
/* hide the ip header for every succeeding fragment */
pbuf_remove_header(r, IP_HLEN);
pbuf_cat(p, r);
r = iprh->next_pbuf;
}
/* find the previous entry in the linked list */
if (ipr == reassdatagrams) {
ipr_prev = NULL;
} else {
for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
if (ipr_prev->next == ipr) {
break;
}
}
}
/* release the sources allocate for the fragment queue entry */
ip_reass_dequeue_datagram(ipr, ipr_prev);
/* and adjust the number of pbufs currently queued for reassembly. */
clen = pbuf_clen(p);
LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= clen);
ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - clen);
MIB2_STATS_INC(mib2.ipreasmoks);
/* Return the pbuf chain */
return p;
}
/* the datagram is not (yet?) reassembled completely */
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount));
return NULL;
nullreturn_ipr:
LWIP_ASSERT("ipr != NULL", ipr != NULL);
if (ipr->p == NULL) {
/* dropped pbuf after creating a new datagram entry: remove the entry, too */
LWIP_ASSERT("not firstalthough just enqueued", ipr == reassdatagrams);
ip_reass_dequeue_datagram(ipr, NULL);
}
nullreturn:
LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: nullreturn\n"));
IPFRAG_STATS_INC(ip_frag.drop);
pbuf_free(p);
return NULL;
}
#endif /* IP_REASSEMBLY */
#if IP_FRAG
#if !LWIP_NETIF_TX_SINGLE_PBUF
/** Allocate a new struct pbuf_custom_ref */
static struct pbuf_custom_ref *
ip_frag_alloc_pbuf_custom_ref(void)
{
return (struct pbuf_custom_ref *)memp_malloc(MEMP_FRAG_PBUF);
}
/** Free a struct pbuf_custom_ref */
static void
ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref *p)
{
LWIP_ASSERT("p != NULL", p != NULL);
memp_free(MEMP_FRAG_PBUF, p);
}
/** Free-callback function to free a 'struct pbuf_custom_ref', called by
* pbuf_free. */
static void
ipfrag_free_pbuf_custom(struct pbuf *p)
{
struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref *)p;
LWIP_ASSERT("pcr != NULL", pcr != NULL);
LWIP_ASSERT("pcr == p", (void *)pcr == (void *)p);
if (pcr->original != NULL) {
pbuf_free(pcr->original);
}
ip_frag_free_pbuf_custom_ref(pcr);
}
#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */
/**
* Fragment an IP datagram if too large for the netif.
*
* Chop the datagram in MTU sized chunks and send them in order
* by pointing PBUF_REFs into p.
*
* @param p ip packet to send
* @param netif the netif on which to send
* @param dest destination ip address to which to send
*
* @return ERR_OK if sent successfully, err_t otherwise
*/
err_t
ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest)
{
struct pbuf *rambuf;
#if !LWIP_NETIF_TX_SINGLE_PBUF
struct pbuf *newpbuf;
u16_t newpbuflen = 0;
u16_t left_to_copy;
#endif
struct ip_hdr *original_iphdr;
struct ip_hdr *iphdr;
const u16_t nfb = (u16_t)((netif->mtu - IP_HLEN) / 8);
u16_t left, fragsize;
u16_t ofo;
int last;
u16_t poff = IP_HLEN;
u16_t tmp;
int mf_set;
original_iphdr = (struct ip_hdr *)p->payload;
iphdr = original_iphdr;
if (IPH_HL_BYTES(iphdr) != IP_HLEN) {
/* ip4_frag() does not support IP options */
return ERR_VAL;
}
LWIP_ERROR("ip4_frag(): pbuf too short", p->len >= IP_HLEN, return ERR_VAL);
/* Save original offset */
tmp = lwip_ntohs(IPH_OFFSET(iphdr));
ofo = tmp & IP_OFFMASK;
/* already fragmented? if so, the last fragment we create must have MF, too */
mf_set = tmp & IP_MF;
left = (u16_t)(p->tot_len - IP_HLEN);
while (left) {
/* Fill this fragment */
fragsize = LWIP_MIN(left, (u16_t)(nfb * 8));
#if LWIP_NETIF_TX_SINGLE_PBUF
rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM);
if (rambuf == NULL) {
goto memerr;
}
LWIP_ASSERT("this needs a pbuf in one piece!",
(rambuf->len == rambuf->tot_len) && (rambuf->next == NULL));
poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff);
/* make room for the IP header */
if (pbuf_add_header(rambuf, IP_HLEN)) {
pbuf_free(rambuf);
goto memerr;
}
/* fill in the IP header */
SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
iphdr = (struct ip_hdr *)rambuf->payload;
#else /* LWIP_NETIF_TX_SINGLE_PBUF */
/* When not using a static buffer, create a chain of pbufs.
* The first will be a PBUF_RAM holding the link and IP header.
* The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
* but limited to the size of an mtu.
*/
rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM);
if (rambuf == NULL) {
goto memerr;
}
LWIP_ASSERT("this needs a pbuf in one piece!",
(rambuf->len >= (IP_HLEN)));
SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
iphdr = (struct ip_hdr *)rambuf->payload;
left_to_copy = fragsize;
while (left_to_copy) {
struct pbuf_custom_ref *pcr;
u16_t plen = (u16_t)(p->len - poff);
LWIP_ASSERT("p->len >= poff", p->len >= poff);
newpbuflen = LWIP_MIN(left_to_copy, plen);
/* Is this pbuf already empty? */
if (!newpbuflen) {
poff = 0;
p = p->next;
continue;
}
pcr = ip_frag_alloc_pbuf_custom_ref();
if (pcr == NULL) {
pbuf_free(rambuf);
goto memerr;
}
/* Mirror this pbuf, although we might not need all of it. */
newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc,
(u8_t *)p->payload + poff, newpbuflen);
if (newpbuf == NULL) {
ip_frag_free_pbuf_custom_ref(pcr);
pbuf_free(rambuf);
goto memerr;
}
pbuf_ref(p);
pcr->original = p;
pcr->pc.custom_free_function = ipfrag_free_pbuf_custom;
/* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
* so that it is removed when pbuf_dechain is later called on rambuf.
*/
pbuf_cat(rambuf, newpbuf);
left_to_copy = (u16_t)(left_to_copy - newpbuflen);
if (left_to_copy) {
poff = 0;
p = p->next;
}
}
poff = (u16_t)(poff + newpbuflen);
#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
/* Correct header */
last = (left <= netif->mtu - IP_HLEN);
/* Set new offset and MF flag */
tmp = (IP_OFFMASK & (ofo));
if (!last || mf_set) {
/* the last fragment has MF set if the input frame had it */
tmp = tmp | IP_MF;
}
IPH_OFFSET_SET(iphdr, lwip_htons(tmp));
IPH_LEN_SET(iphdr, lwip_htons((u16_t)(fragsize + IP_HLEN)));
IPH_CHKSUM_SET(iphdr, 0);
#if CHECKSUM_GEN_IP
IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) {
IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN));
}
#endif /* CHECKSUM_GEN_IP */
/* No need for separate header pbuf - we allowed room for it in rambuf
* when allocated.
*/
netif->output(netif, rambuf, dest);
IPFRAG_STATS_INC(ip_frag.xmit);
/* Unfortunately we can't reuse rambuf - the hardware may still be
* using the buffer. Instead we free it (and the ensuing chain) and
* recreate it next time round the loop. If we're lucky the hardware
* will have already sent the packet, the free will really free, and
* there will be zero memory penalty.
*/
pbuf_free(rambuf);
left = (u16_t)(left - fragsize);
ofo = (u16_t)(ofo + nfb);
}
MIB2_STATS_INC(mib2.ipfragoks);
return ERR_OK;
memerr:
MIB2_STATS_INC(mib2.ipfragfails);
return ERR_MEM;
}
#endif /* IP_FRAG */
#endif /* LWIP_IPV4 */

View File

@ -1,812 +0,0 @@
/**
* @file
*
* @defgroup dhcp6 DHCPv6
* @ingroup ip6
* DHCPv6 client: IPv6 address autoconfiguration as per
* RFC 3315 (stateful DHCPv6) and
* RFC 3736 (stateless DHCPv6).
*
* For now, only stateless DHCPv6 is implemented!
*
* TODO:
* - enable/disable API to not always start when RA is received
* - stateful DHCPv6 (for now, only stateless DHCPv6 for DNS and NTP servers works)
* - create Client Identifier?
* - only start requests if a valid local address is available on the netif
* - only start information requests if required (not for every RA)
*
* dhcp6_enable_stateful() enables stateful DHCPv6 for a netif (stateless disabled)\n
* dhcp6_enable_stateless() enables stateless DHCPv6 for a netif (stateful disabled)\n
* dhcp6_disable() disable DHCPv6 for a netif
*
* When enabled, requests are only issued after receipt of RA with the
* corresponding bits set.
*/
/*
* Copyright (c) 2018 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*/
#include "lwip/opt.h"
#if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */
#include "lwip/dhcp6.h"
#include "lwip/prot/dhcp6.h"
#include "lwip/def.h"
#include "lwip/udp.h"
#include "lwip/dns.h"
#include <string.h>
#ifdef LWIP_HOOK_FILENAME
#include LWIP_HOOK_FILENAME
#endif
#ifndef LWIP_HOOK_DHCP6_APPEND_OPTIONS
#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len)
#endif
#ifndef LWIP_HOOK_DHCP6_PARSE_OPTION
#define LWIP_HOOK_DHCP6_PARSE_OPTION(netif, dhcp6, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0)
#endif
#if LWIP_DNS && LWIP_DHCP6_MAX_DNS_SERVERS
#if DNS_MAX_SERVERS > LWIP_DHCP6_MAX_DNS_SERVERS
#define LWIP_DHCP6_PROVIDE_DNS_SERVERS LWIP_DHCP6_MAX_DNS_SERVERS
#else
#define LWIP_DHCP6_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS
#endif
#else
#define LWIP_DHCP6_PROVIDE_DNS_SERVERS 0
#endif
/** Option handling: options are parsed in dhcp6_parse_reply
* and saved in an array where other functions can load them from.
* This might be moved into the struct dhcp6 (not necessarily since
* lwIP is single-threaded and the array is only used while in recv
* callback). */
enum dhcp6_option_idx {
DHCP6_OPTION_IDX_CLI_ID = 0,
DHCP6_OPTION_IDX_SERVER_ID,
#if LWIP_DHCP6_PROVIDE_DNS_SERVERS
DHCP6_OPTION_IDX_DNS_SERVER,
DHCP6_OPTION_IDX_DOMAIN_LIST,
#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */
#if LWIP_DHCP6_GET_NTP_SRV
DHCP6_OPTION_IDX_NTP_SERVER,
#endif /* LWIP_DHCP_GET_NTP_SRV */
DHCP6_OPTION_IDX_MAX
};
struct dhcp6_option_info {
u8_t option_given;
u16_t val_start;
u16_t val_length;
};
/** Holds the decoded option info, only valid while in dhcp6_recv. */
struct dhcp6_option_info dhcp6_rx_options[DHCP6_OPTION_IDX_MAX];
#define dhcp6_option_given(dhcp6, idx) (dhcp6_rx_options[idx].option_given != 0)
#define dhcp6_got_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 1)
#define dhcp6_clear_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 0)
#define dhcp6_clear_all_options(dhcp6) (memset(dhcp6_rx_options, 0, sizeof(dhcp6_rx_options)))
#define dhcp6_get_option_start(dhcp6, idx) (dhcp6_rx_options[idx].val_start)
#define dhcp6_get_option_length(dhcp6, idx) (dhcp6_rx_options[idx].val_length)
#define dhcp6_set_option(dhcp6, idx, start, len) do { dhcp6_rx_options[idx].val_start = (start); dhcp6_rx_options[idx].val_length = (len); }while(0)
const ip_addr_t dhcp6_All_DHCP6_Relay_Agents_and_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010002);
const ip_addr_t dhcp6_All_DHCP6_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010003);
static struct udp_pcb *dhcp6_pcb;
static u8_t dhcp6_pcb_refcount;
/* receive, unfold, parse and free incoming messages */
static void dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port);
/** Ensure DHCP PCB is allocated and bound */
static err_t
dhcp6_inc_pcb_refcount(void)
{
if (dhcp6_pcb_refcount == 0) {
LWIP_ASSERT("dhcp6_inc_pcb_refcount(): memory leak", dhcp6_pcb == NULL);
/* allocate UDP PCB */
dhcp6_pcb = udp_new_ip6();
if (dhcp6_pcb == NULL) {
return ERR_MEM;
}
ip_set_option(dhcp6_pcb, SOF_BROADCAST);
/* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */
udp_bind(dhcp6_pcb, IP6_ADDR_ANY, DHCP6_CLIENT_PORT);
udp_recv(dhcp6_pcb, dhcp6_recv, NULL);
}
dhcp6_pcb_refcount++;
return ERR_OK;
}
/** Free DHCP PCB if the last netif stops using it */
static void
dhcp6_dec_pcb_refcount(void)
{
LWIP_ASSERT("dhcp6_pcb_refcount(): refcount error", (dhcp6_pcb_refcount > 0));
dhcp6_pcb_refcount--;
if (dhcp6_pcb_refcount == 0) {
udp_remove(dhcp6_pcb);
dhcp6_pcb = NULL;
}
}
/**
* @ingroup dhcp6
* Set a statically allocated struct dhcp6 to work with.
* Using this prevents dhcp6_start to allocate it using mem_malloc.
*
* @param netif the netif for which to set the struct dhcp
* @param dhcp6 (uninitialised) dhcp6 struct allocated by the application
*/
void
dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6)
{
LWIP_ASSERT("netif != NULL", netif != NULL);
LWIP_ASSERT("dhcp6 != NULL", dhcp6 != NULL);
LWIP_ASSERT("netif already has a struct dhcp6 set", netif_dhcp6_data(netif) == NULL);
/* clear data structure */
memset(dhcp6, 0, sizeof(struct dhcp6));
/* dhcp6_set_state(&dhcp, DHCP6_STATE_OFF); */
netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6);
}
/**
* @ingroup dhcp6
* Removes a struct dhcp6 from a netif.
*
* ATTENTION: Only use this when not using dhcp6_set_struct() to allocate the
* struct dhcp6 since the memory is passed back to the heap.
*
* @param netif the netif from which to remove the struct dhcp
*/
void dhcp6_cleanup(struct netif *netif)
{
LWIP_ASSERT("netif != NULL", netif != NULL);
if (netif_dhcp6_data(netif) != NULL) {
mem_free(netif_dhcp6_data(netif));
netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL);
}
}
static struct dhcp6*
dhcp6_get_struct(struct netif *netif, const char *dbg_requester)
{
struct dhcp6 *dhcp6 = netif_dhcp6_data(netif);
if (dhcp6 == NULL) {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: mallocing new DHCPv6 client\n", dbg_requester));
dhcp6 = (struct dhcp6 *)mem_malloc(sizeof(struct dhcp6));
if (dhcp6 == NULL) {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: could not allocate dhcp6\n", dbg_requester));
return NULL;
}
/* clear data structure, this implies DHCP6_STATE_OFF */
memset(dhcp6, 0, sizeof(struct dhcp6));
/* store this dhcp6 client in the netif */
netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6);
} else {
/* already has DHCP6 client attached */
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("%s: using existing DHCPv6 client\n", dbg_requester));
}
if (!dhcp6->pcb_allocated) {
if (dhcp6_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP6 PCB is allocated */
mem_free(dhcp6);
netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL);
return NULL;
}
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: allocated dhcp6", dbg_requester));
dhcp6->pcb_allocated = 1;
}
return dhcp6;
}
/*
* Set the DHCPv6 state
* If the state changed, reset the number of tries.
*/
static void
dhcp6_set_state(struct dhcp6 *dhcp6, u8_t new_state, const char *dbg_caller)
{
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("DHCPv6 state: %d -> %d (%s)\n",
dhcp6->state, new_state, dbg_caller));
if (new_state != dhcp6->state) {
dhcp6->state = new_state;
dhcp6->tries = 0;
dhcp6->request_timeout = 0;
}
}
static int
dhcp6_stateless_enabled(struct dhcp6 *dhcp6)
{
if ((dhcp6->state == DHCP6_STATE_STATELESS_IDLE) ||
(dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG)) {
return 1;
}
return 0;
}
/*static int
dhcp6_stateful_enabled(struct dhcp6 *dhcp6)
{
if (dhcp6->state == DHCP6_STATE_OFF) {
return 0;
}
if (dhcp6_stateless_enabled(dhcp6)) {
return 0;
}
return 1;
}*/
/**
* @ingroup dhcp6
* Enable stateful DHCPv6 on this netif
* Requests are sent on receipt of an RA message with the
* ND6_RA_FLAG_MANAGED_ADDR_CONFIG flag set.
*
* A struct dhcp6 will be allocated for this netif if not
* set via @ref dhcp6_set_struct before.
*
* @todo: stateful DHCPv6 not supported, yet
*/
err_t
dhcp6_enable_stateful(struct netif *netif)
{
LWIP_UNUSED_ARG(netif);
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("stateful dhcp6 not implemented yet"));
return ERR_VAL;
}
/**
* @ingroup dhcp6
* Enable stateless DHCPv6 on this netif
* Requests are sent on receipt of an RA message with the
* ND6_RA_FLAG_OTHER_CONFIG flag set.
*
* A struct dhcp6 will be allocated for this netif if not
* set via @ref dhcp6_set_struct before.
*/
err_t
dhcp6_enable_stateless(struct netif *netif)
{
struct dhcp6 *dhcp6;
LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_enable_stateless(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
dhcp6 = dhcp6_get_struct(netif, "dhcp6_enable_stateless()");
if (dhcp6 == NULL) {
return ERR_MEM;
}
if (dhcp6_stateless_enabled(dhcp6)) {
LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 already enabled"));
return ERR_OK;
} else if (dhcp6->state != DHCP6_STATE_OFF) {
/* stateful running */
/* @todo: stop stateful once it is implemented */
LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): switching from stateful to stateless DHCPv6"));
}
LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 enabled\n"));
dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_enable_stateless");
return ERR_OK;
}
/**
* @ingroup dhcp6
* Disable stateful or stateless DHCPv6 on this netif
* Requests are sent on receipt of an RA message with the
* ND6_RA_FLAG_OTHER_CONFIG flag set.
*/
void
dhcp6_disable(struct netif *netif)
{
struct dhcp6 *dhcp6;
LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_disable(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num));
dhcp6 = netif_dhcp6_data(netif);
if (dhcp6 != NULL) {
if (dhcp6->state != DHCP6_STATE_OFF) {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_disable(): DHCPv6 disabled (old state: %s)\n",
(dhcp6_stateless_enabled(dhcp6) ? "stateless" : "stateful")));
dhcp6_set_state(dhcp6, DHCP6_STATE_OFF, "dhcp6_disable");
if (dhcp6->pcb_allocated != 0) {
dhcp6_dec_pcb_refcount(); /* free DHCPv6 PCB if not needed any more */
dhcp6->pcb_allocated = 0;
}
}
}
}
/**
* Create a DHCPv6 request, fill in common headers
*
* @param netif the netif under DHCPv6 control
* @param dhcp6 dhcp6 control struct
* @param message_type message type of the request
* @param opt_len_alloc option length to allocate
* @param options_out_len option length on exit
* @return a pbuf for the message
*/
static struct pbuf *
dhcp6_create_msg(struct netif *netif, struct dhcp6 *dhcp6, u8_t message_type,
u16_t opt_len_alloc, u16_t *options_out_len)
{
struct pbuf *p_out;
struct dhcp6_msg *msg_out;
LWIP_ERROR("dhcp6_create_msg: netif != NULL", (netif != NULL), return NULL;);
LWIP_ERROR("dhcp6_create_msg: dhcp6 != NULL", (dhcp6 != NULL), return NULL;);
p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp6_msg) + opt_len_alloc, PBUF_RAM);
if (p_out == NULL) {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
("dhcp6_create_msg(): could not allocate pbuf\n"));
return NULL;
}
LWIP_ASSERT("dhcp6_create_msg: check that first pbuf can hold struct dhcp6_msg",
(p_out->len >= sizeof(struct dhcp6_msg) + opt_len_alloc));
/* @todo: limit new xid for certain message types? */
/* reuse transaction identifier in retransmissions */
if (dhcp6->tries == 0) {
dhcp6->xid = LWIP_RAND() & 0xFFFFFF;
}
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE,
("transaction id xid(%"X32_F")\n", dhcp6->xid));
msg_out = (struct dhcp6_msg *)p_out->payload;
memset(msg_out, 0, sizeof(struct dhcp6_msg) + opt_len_alloc);
msg_out->msgtype = message_type;
msg_out->transaction_id[0] = (u8_t)(dhcp6->xid >> 16);
msg_out->transaction_id[1] = (u8_t)(dhcp6->xid >> 8);
msg_out->transaction_id[2] = (u8_t)dhcp6->xid;
*options_out_len = 0;
return p_out;
}
static u16_t
dhcp6_option_short(u16_t options_out_len, u8_t *options, u16_t value)
{
options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8);
options[options_out_len++] = (u8_t) (value & 0x00ffU);
return options_out_len;
}
static u16_t
dhcp6_option_optionrequest(u16_t options_out_len, u8_t *options, const u16_t *req_options,
u16_t num_req_options, u16_t max_len)
{
size_t i;
u16_t ret;
LWIP_ASSERT("dhcp6_option_optionrequest: options_out_len + sizeof(struct dhcp6_msg) + addlen <= max_len",
sizeof(struct dhcp6_msg) + options_out_len + 4U + (2U * num_req_options) <= max_len);
LWIP_UNUSED_ARG(max_len);
ret = dhcp6_option_short(options_out_len, options, DHCP6_OPTION_ORO);
ret = dhcp6_option_short(ret, options, 2 * num_req_options);
for (i = 0; i < num_req_options; i++) {
ret = dhcp6_option_short(ret, options, req_options[i]);
}
return ret;
}
/* All options are added, shrink the pbuf to the required size */
static void
dhcp6_msg_finalize(u16_t options_out_len, struct pbuf *p_out)
{
/* shrink the pbuf to the actual content length */
pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp6_msg) + options_out_len));
}
#if LWIP_IPV6_DHCP6_STATELESS
static void
dhcp6_information_request(struct netif *netif, struct dhcp6 *dhcp6)
{
const u16_t requested_options[] = {DHCP6_OPTION_DNS_SERVERS, DHCP6_OPTION_DOMAIN_LIST, DHCP6_OPTION_SNTP_SERVERS};
u16_t msecs;
struct pbuf *p_out;
u16_t options_out_len;
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request()\n"));
/* create and initialize the DHCP message header */
p_out = dhcp6_create_msg(netif, dhcp6, DHCP6_INFOREQUEST, 4 + sizeof(requested_options), &options_out_len);
if (p_out != NULL) {
err_t err;
struct dhcp6_msg *msg_out = (struct dhcp6_msg *)p_out->payload;
u8_t *options = (u8_t *)(msg_out + 1);
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request: making request\n"));
options_out_len = dhcp6_option_optionrequest(options_out_len, options, requested_options,
LWIP_ARRAYSIZE(requested_options), p_out->len);
LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, DHCP6_STATE_REQUESTING_CONFIG, msg_out,
DHCP6_INFOREQUEST, options_out_len, p_out->len);
dhcp6_msg_finalize(options_out_len, p_out);
err = udp_sendto_if(dhcp6_pcb, p_out, &dhcp6_All_DHCP6_Relay_Agents_and_Servers, DHCP6_SERVER_PORT, netif);
pbuf_free(p_out);
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request: INFOREQUESTING -> %d\n", (int)err));
LWIP_UNUSED_ARG(err);
} else {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp6_information_request: could not allocate DHCP6 request\n"));
}
dhcp6_set_state(dhcp6, DHCP6_STATE_REQUESTING_CONFIG, "dhcp6_information_request");
if (dhcp6->tries < 255) {
dhcp6->tries++;
}
msecs = (u16_t)((dhcp6->tries < 6 ? 1 << dhcp6->tries : 60) * 1000);
dhcp6->request_timeout = (u16_t)((msecs + DHCP6_TIMER_MSECS - 1) / DHCP6_TIMER_MSECS);
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request(): set request timeout %"U16_F" msecs\n", msecs));
}
static err_t
dhcp6_request_config(struct netif *netif, struct dhcp6 *dhcp6)
{
/* stateless mode enabled and no request running? */
if (dhcp6->state == DHCP6_STATE_STATELESS_IDLE) {
/* send Information-request and wait for answer; setup receive timeout */
dhcp6_information_request(netif, dhcp6);
}
return ERR_OK;
}
static void
dhcp6_abort_config_request(struct dhcp6 *dhcp6)
{
if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) {
/* abort running request */
dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_abort_config_request");
}
}
/* Handle a REPLY to INFOREQUEST
* This parses DNS and NTP server addresses from the reply.
*/
static void
dhcp6_handle_config_reply(struct netif *netif, struct pbuf *p_msg_in)
{
struct dhcp6 *dhcp6 = netif_dhcp6_data(netif);
LWIP_UNUSED_ARG(dhcp6);
LWIP_UNUSED_ARG(p_msg_in);
#if LWIP_DHCP6_PROVIDE_DNS_SERVERS
if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER)) {
ip_addr_t dns_addr;
ip6_addr_t *dns_addr6;
u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER);
u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER);
u16_t idx;
u8_t n;
memset(&dns_addr, 0, sizeof(dns_addr));
dns_addr6 = ip_2_ip6(&dns_addr);
for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_PROVIDE_DNS_SERVERS);
n++, idx += sizeof(struct ip6_addr_packed)) {
u16_t copied = pbuf_copy_partial(p_msg_in, dns_addr6, sizeof(struct ip6_addr_packed), idx);
if (copied != sizeof(struct ip6_addr_packed)) {
/* pbuf length mismatch */
return;
}
ip6_addr_assign_zone(dns_addr6, IP6_UNKNOWN, netif);
/* @todo: do we need a different offset than DHCP(v4)? */
dns_setserver(n, &dns_addr);
}
}
/* @ todo: parse and set Domain Search List */
#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */
#if LWIP_DHCP6_GET_NTP_SRV
if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER)) {
ip_addr_t ntp_server_addrs[LWIP_DHCP6_MAX_NTP_SERVERS];
u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER);
u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER);
u16_t idx;
u8_t n;
for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_MAX_NTP_SERVERS);
n++, idx += sizeof(struct ip6_addr_packed)) {
u16_t copied;
ip6_addr_t *ntp_addr6 = ip_2_ip6(&ntp_server_addrs[n]);
ip_addr_set_zero_ip6(&ntp_server_addrs[n]);
copied = pbuf_copy_partial(p_msg_in, ntp_addr6, sizeof(struct ip6_addr_packed), idx);
if (copied != sizeof(struct ip6_addr_packed)) {
/* pbuf length mismatch */
return;
}
ip6_addr_assign_zone(ntp_addr6, IP6_UNKNOWN, netif);
}
dhcp6_set_ntp_servers(n, ntp_server_addrs);
}
#endif /* LWIP_DHCP6_GET_NTP_SRV */
}
#endif /* LWIP_IPV6_DHCP6_STATELESS */
/** This function is called from nd6 module when an RA messsage is received
* It triggers DHCPv6 requests (if enabled).
*/
void
dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config)
{
struct dhcp6 *dhcp6;
LWIP_ASSERT("netif != NULL", netif != NULL);
dhcp6 = netif_dhcp6_data(netif);
LWIP_UNUSED_ARG(managed_addr_config);
LWIP_UNUSED_ARG(other_config);
LWIP_UNUSED_ARG(dhcp6);
#if LWIP_IPV6_DHCP6_STATELESS
if (dhcp6 != NULL) {
if (dhcp6_stateless_enabled(dhcp6)) {
if (other_config) {
dhcp6_request_config(netif, dhcp6);
} else {
dhcp6_abort_config_request(dhcp6);
}
}
}
#endif /* LWIP_IPV6_DHCP6_STATELESS */
}
/**
* Parse the DHCPv6 message and extract the DHCPv6 options.
*
* Extract the DHCPv6 options (offset + length) so that we can later easily
* check for them or extract the contents.
*/
static err_t
dhcp6_parse_reply(struct pbuf *p, struct dhcp6 *dhcp6)
{
u16_t offset;
u16_t offset_max;
u16_t options_idx;
struct dhcp6_msg *msg_in;
LWIP_UNUSED_ARG(dhcp6);
/* clear received options */
dhcp6_clear_all_options(dhcp6);
msg_in = (struct dhcp6_msg *)p->payload;
/* parse options */
options_idx = sizeof(struct dhcp6_msg);
/* parse options to the end of the received packet */
offset_max = p->tot_len;
offset = options_idx;
/* at least 4 byte to read? */
while ((offset + 4 <= offset_max)) {
u8_t op_len_buf[4];
u8_t *op_len;
u16_t op;
u16_t len;
u16_t val_offset = (u16_t)(offset + 4);
if (val_offset < offset) {
/* overflow */
return ERR_BUF;
}
/* copy option + length, might be split accross pbufs */
op_len = (u8_t *)pbuf_get_contiguous(p, op_len_buf, 4, 4, offset);
if (op_len == NULL) {
/* failed to get option and length */
return ERR_VAL;
}
op = (op_len[0] << 8) | op_len[1];
len = (op_len[2] << 8) | op_len[3];
offset = val_offset + len;
if (offset < val_offset) {
/* overflow */
return ERR_BUF;
}
switch (op) {
case (DHCP6_OPTION_CLIENTID):
dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID);
dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID, val_offset, len);
break;
case (DHCP6_OPTION_SERVERID):
dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID);
dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID, val_offset, len);
break;
#if LWIP_DHCP6_PROVIDE_DNS_SERVERS
case (DHCP6_OPTION_DNS_SERVERS):
dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER);
dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER, val_offset, len);
break;
case (DHCP6_OPTION_DOMAIN_LIST):
dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST);
dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST, val_offset, len);
break;
#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */
#if LWIP_DHCP6_GET_NTP_SRV
case (DHCP6_OPTION_SNTP_SERVERS):
dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER);
dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER, val_offset, len);
break;
#endif /* LWIP_DHCP6_GET_NTP_SRV*/
default:
LWIP_DEBUGF(DHCP6_DEBUG, ("skipping option %"U16_F" in options\n", op));
LWIP_HOOK_DHCP6_PARSE_OPTION(ip_current_netif(), dhcp6, dhcp6->state, msg_in,
msg_in->msgtype, op, len, q, val_offset);
break;
}
}
return ERR_OK;
}
static void
dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port)
{
struct netif *netif = ip_current_input_netif();
struct dhcp6 *dhcp6 = netif_dhcp6_data(netif);
struct dhcp6_msg *reply_msg = (struct dhcp6_msg *)p->payload;
u8_t msg_type;
u32_t xid;
LWIP_UNUSED_ARG(arg);
/* Caught DHCPv6 message from netif that does not have DHCPv6 enabled? -> not interested */
if ((dhcp6 == NULL) || (dhcp6->pcb_allocated == 0)) {
goto free_pbuf_and_return;
}
LWIP_ERROR("invalid server address type", IP_IS_V6(addr), goto free_pbuf_and_return;);
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_recv(pbuf = %p) from DHCPv6 server %s port %"U16_F"\n", (void *)p,
ipaddr_ntoa(addr), port));
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len));
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len));
/* prevent warnings about unused arguments */
LWIP_UNUSED_ARG(pcb);
LWIP_UNUSED_ARG(addr);
LWIP_UNUSED_ARG(port);
if (p->len < sizeof(struct dhcp6_msg)) {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCPv6 reply message or pbuf too short\n"));
goto free_pbuf_and_return;
}
/* match transaction ID against what we expected */
xid = reply_msg->transaction_id[0] << 16;
xid |= reply_msg->transaction_id[1] << 8;
xid |= reply_msg->transaction_id[2];
if (xid != dhcp6->xid) {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING,
("transaction id mismatch reply_msg->xid(%"X32_F")!= dhcp6->xid(%"X32_F")\n", xid, dhcp6->xid));
goto free_pbuf_and_return;
}
/* option fields could be unfold? */
if (dhcp6_parse_reply(p, dhcp6) != ERR_OK) {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS,
("problem unfolding DHCPv6 message - too short on memory?\n"));
goto free_pbuf_and_return;
}
/* read DHCP message type */
msg_type = reply_msg->msgtype;
/* message type is DHCP6 REPLY? */
if (msg_type == DHCP6_REPLY) {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("DHCP6_REPLY received\n"));
#if LWIP_IPV6_DHCP6_STATELESS
/* in info-requesting state? */
if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) {
dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_recv");
dhcp6_handle_config_reply(netif, p);
} else
#endif /* LWIP_IPV6_DHCP6_STATELESS */
{
/* @todo: handle reply in other states? */
}
} else {
/* @todo: handle other message types */
}
free_pbuf_and_return:
pbuf_free(p);
}
/**
* A DHCPv6 request has timed out.
*
* The timer that was started with the DHCPv6 request has
* timed out, indicating no response was received in time.
*/
static void
dhcp6_timeout(struct netif *netif, struct dhcp6 *dhcp6)
{
LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout()\n"));
LWIP_UNUSED_ARG(netif);
LWIP_UNUSED_ARG(dhcp6);
#if LWIP_IPV6_DHCP6_STATELESS
/* back-off period has passed, or server selection timed out */
if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) {
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout(): retrying information request\n"));
dhcp6_information_request(netif, dhcp6);
}
#endif /* LWIP_IPV6_DHCP6_STATELESS */
}
/**
* DHCPv6 timeout handling (this function must be called every 500ms,
* see @ref DHCP6_TIMER_MSECS).
*
* A DHCPv6 server is expected to respond within a short period of time.
* This timer checks whether an outstanding DHCPv6 request is timed out.
*/
void
dhcp6_tmr(void)
{
struct netif *netif;
/* loop through netif's */
NETIF_FOREACH(netif) {
struct dhcp6 *dhcp6 = netif_dhcp6_data(netif);
/* only act on DHCPv6 configured interfaces */
if (dhcp6 != NULL) {
/* timer is active (non zero), and is about to trigger now */
if (dhcp6->request_timeout > 1) {
dhcp6->request_timeout--;
} else if (dhcp6->request_timeout == 1) {
dhcp6->request_timeout--;
/* { dhcp6->request_timeout == 0 } */
LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_tmr(): request timeout\n"));
/* this client's request timeout triggered */
dhcp6_timeout(netif, dhcp6);
}
}
}
}
#endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */

View File

@ -1,123 +0,0 @@
/**
* @file
*
* Ethernet output for IPv6. Uses ND tables for link-layer addressing.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#if LWIP_IPV6 && LWIP_ETHERNET
#include "lwip/ethip6.h"
#include "lwip/nd6.h"
#include "lwip/pbuf.h"
#include "lwip/ip6.h"
#include "lwip/ip6_addr.h"
#include "lwip/inet_chksum.h"
#include "lwip/netif.h"
#include "lwip/icmp6.h"
#include "lwip/prot/ethernet.h"
#include "netif/ethernet.h"
#include <string.h>
/**
* Resolve and fill-in Ethernet address header for outgoing IPv6 packet.
*
* For IPv6 multicast, corresponding Ethernet addresses
* are selected and the packet is transmitted on the link.
*
* For unicast addresses, ask the ND6 module what to do. It will either let us
* send the the packet right away, or queue the packet for later itself, unless
* an error occurs.
*
* @todo anycast addresses
*
* @param netif The lwIP network interface which the IP packet will be sent on.
* @param q The pbuf(s) containing the IP packet to be sent.
* @param ip6addr The IP address of the packet destination.
*
* @return
* - ERR_OK or the return value of @ref nd6_get_next_hop_addr_or_queue.
*/
err_t
ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr)
{
struct eth_addr dest;
const u8_t *hwaddr;
err_t result;
LWIP_ASSERT_CORE_LOCKED();
/* The destination IP address must be properly zoned from here on down. */
IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif);
/* multicast destination IP address? */
if (ip6_addr_ismulticast(ip6addr)) {
/* Hash IP multicast address to MAC address.*/
dest.addr[0] = 0x33;
dest.addr[1] = 0x33;
dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0];
dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1];
dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2];
dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3];
/* Send out. */
return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6);
}
/* We have a unicast destination IP address */
/* @todo anycast? */
/* Ask ND6 what to do with the packet. */
result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr);
if (result != ERR_OK) {
return result;
}
/* If no hardware address is returned, nd6 has queued the packet for later. */
if (hwaddr == NULL) {
return ERR_OK;
}
/* Send out the packet using the returned hardware address. */
SMEMCPY(dest.addr, hwaddr, 6);
return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6);
}
#endif /* LWIP_IPV6 && LWIP_ETHERNET */

View File

@ -1,425 +0,0 @@
/**
* @file
*
* IPv6 version of ICMP, as per RFC 4443.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */
#include "lwip/icmp6.h"
#include "lwip/prot/icmp6.h"
#include "lwip/ip6.h"
#include "lwip/ip6_addr.h"
#include "lwip/inet_chksum.h"
#include "lwip/pbuf.h"
#include "lwip/netif.h"
#include "lwip/nd6.h"
#include "lwip/mld6.h"
#include "lwip/ip.h"
#include "lwip/stats.h"
#include <string.h>
#if LWIP_ICMP6_DATASIZE == 0
#undef LWIP_ICMP6_DATASIZE
#define LWIP_ICMP6_DATASIZE 8
#endif
/* Forward declarations */
static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type);
static void icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data,
u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr);
static void icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data,
u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr, struct netif *netif);
/**
* Process an input ICMPv6 message. Called by ip6_input.
*
* Will generate a reply for echo requests. Other messages are forwarded
* to nd6_input, or mld6_input.
*
* @param p the mld packet, p->payload pointing to the icmpv6 header
* @param inp the netif on which this packet was received
*/
void
icmp6_input(struct pbuf *p, struct netif *inp)
{
struct icmp6_hdr *icmp6hdr;
struct pbuf *r;
const ip6_addr_t *reply_src;
ICMP6_STATS_INC(icmp6.recv);
/* Check that ICMPv6 header fits in payload */
if (p->len < sizeof(struct icmp6_hdr)) {
/* drop short packets */
pbuf_free(p);
ICMP6_STATS_INC(icmp6.lenerr);
ICMP6_STATS_INC(icmp6.drop);
return;
}
icmp6hdr = (struct icmp6_hdr *)p->payload;
#if CHECKSUM_CHECK_ICMP6
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) {
if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(),
ip6_current_dest_addr()) != 0) {
/* Checksum failed */
pbuf_free(p);
ICMP6_STATS_INC(icmp6.chkerr);
ICMP6_STATS_INC(icmp6.drop);
return;
}
}
#endif /* CHECKSUM_CHECK_ICMP6 */
switch (icmp6hdr->type) {
case ICMP6_TYPE_NA: /* Neighbor advertisement */
case ICMP6_TYPE_NS: /* Neighbor solicitation */
case ICMP6_TYPE_RA: /* Router advertisement */
case ICMP6_TYPE_RD: /* Redirect */
case ICMP6_TYPE_PTB: /* Packet too big */
nd6_input(p, inp);
return;
case ICMP6_TYPE_RS:
#if LWIP_IPV6_FORWARD
/* @todo implement router functionality */
#endif
break;
#if LWIP_IPV6_MLD
case ICMP6_TYPE_MLQ:
case ICMP6_TYPE_MLR:
case ICMP6_TYPE_MLD:
mld6_input(p, inp);
return;
#endif
case ICMP6_TYPE_EREQ:
#if !LWIP_MULTICAST_PING
/* multicast destination address? */
if (ip6_addr_ismulticast(ip6_current_dest_addr())) {
/* drop */
pbuf_free(p);
ICMP6_STATS_INC(icmp6.drop);
return;
}
#endif /* LWIP_MULTICAST_PING */
/* Allocate reply. */
r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM);
if (r == NULL) {
/* drop */
pbuf_free(p);
ICMP6_STATS_INC(icmp6.memerr);
return;
}
/* Copy echo request. */
if (pbuf_copy(r, p) != ERR_OK) {
/* drop */
pbuf_free(p);
pbuf_free(r);
ICMP6_STATS_INC(icmp6.err);
return;
}
/* Determine reply source IPv6 address. */
#if LWIP_MULTICAST_PING
if (ip6_addr_ismulticast(ip6_current_dest_addr())) {
reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr()));
if (reply_src == NULL) {
/* drop */
pbuf_free(p);
pbuf_free(r);
ICMP6_STATS_INC(icmp6.rterr);
return;
}
}
else
#endif /* LWIP_MULTICAST_PING */
{
reply_src = ip6_current_dest_addr();
}
/* Set fields in reply. */
((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP;
((struct icmp6_echo_hdr *)(r->payload))->chksum = 0;
#if CHECKSUM_GEN_ICMP6
IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) {
((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r,
IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr());
}
#endif /* CHECKSUM_GEN_ICMP6 */
/* Send reply. */
ICMP6_STATS_INC(icmp6.xmit);
ip6_output_if(r, reply_src, ip6_current_src_addr(),
LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp);
pbuf_free(r);
break;
default:
ICMP6_STATS_INC(icmp6.proterr);
ICMP6_STATS_INC(icmp6.drop);
break;
}
pbuf_free(p);
}
/**
* Send an icmpv6 'destination unreachable' packet.
*
* This function must be used only in direct response to a packet that is being
* received right now. Otherwise, address zones would be lost.
*
* @param p the input packet for which the 'unreachable' should be sent,
* p->payload pointing to the IPv6 header
* @param c ICMPv6 code for the unreachable type
*/
void
icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c)
{
icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR);
}
/**
* Send an icmpv6 'packet too big' packet.
*
* This function must be used only in direct response to a packet that is being
* received right now. Otherwise, address zones would be lost.
*
* @param p the input packet for which the 'packet too big' should be sent,
* p->payload pointing to the IPv6 header
* @param mtu the maximum mtu that we can accept
*/
void
icmp6_packet_too_big(struct pbuf *p, u32_t mtu)
{
icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB);
}
/**
* Send an icmpv6 'time exceeded' packet.
*
* This function must be used only in direct response to a packet that is being
* received right now. Otherwise, address zones would be lost.
*
* @param p the input packet for which the 'time exceeded' should be sent,
* p->payload pointing to the IPv6 header
* @param c ICMPv6 code for the time exceeded type
*/
void
icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c)
{
icmp6_send_response(p, c, 0, ICMP6_TYPE_TE);
}
/**
* Send an icmpv6 'time exceeded' packet, with explicit source and destination
* addresses.
*
* This function may be used to send a response sometime after receiving the
* packet for which this response is meant. The provided source and destination
* addresses are used primarily to retain their zone information.
*
* @param p the input packet for which the 'time exceeded' should be sent,
* p->payload pointing to the IPv6 header
* @param c ICMPv6 code for the time exceeded type
* @param src_addr source address of the original packet, with zone information
* @param dest_addr destination address of the original packet, with zone
* information
*/
void
icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c,
const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr)
{
icmp6_send_response_with_addrs(p, c, 0, ICMP6_TYPE_TE, src_addr, dest_addr);
}
/**
* Send an icmpv6 'parameter problem' packet.
*
* This function must be used only in direct response to a packet that is being
* received right now. Otherwise, address zones would be lost and the calculated
* offset would be wrong (calculated against ip6_current_header()).
*
* @param p the input packet for which the 'param problem' should be sent,
* p->payload pointing to the IP header
* @param c ICMPv6 code for the param problem type
* @param pointer the pointer to the byte where the parameter is found
*/
void
icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer)
{
u32_t pointer_u32 = (u32_t)((const u8_t *)pointer - (const u8_t *)ip6_current_header());
icmp6_send_response(p, c, pointer_u32, ICMP6_TYPE_PP);
}
/**
* Send an ICMPv6 packet in response to an incoming packet.
* The packet is sent *to* ip_current_src_addr() on ip_current_netif().
*
* @param p the input packet for which the response should be sent,
* p->payload pointing to the IPv6 header
* @param code Code of the ICMPv6 header
* @param data Additional 32-bit parameter in the ICMPv6 header
* @param type Type of the ICMPv6 header
*/
static void
icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type)
{
const struct ip6_addr *reply_src, *reply_dest;
struct netif *netif = ip_current_netif();
LWIP_ASSERT("icmpv6 packet not a direct response", netif != NULL);
reply_dest = ip6_current_src_addr();
/* Select an address to use as source. */
reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest));
if (reply_src == NULL) {
ICMP6_STATS_INC(icmp6.rterr);
return;
}
icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, reply_dest, netif);
}
/**
* Send an ICMPv6 packet in response to an incoming packet.
*
* Call this function if the packet is NOT sent as a direct response to an
* incoming packet, but rather sometime later (e.g. for a fragment reassembly
* timeout). The caller must provide the zoned source and destination addresses
* from the original packet with the src_addr and dest_addr parameters. The
* reason for this approach is that while the addresses themselves are part of
* the original packet, their zone information is not, thus possibly resulting
* in a link-local response being sent over the wrong link.
*
* @param p the input packet for which the response should be sent,
* p->payload pointing to the IPv6 header
* @param code Code of the ICMPv6 header
* @param data Additional 32-bit parameter in the ICMPv6 header
* @param type Type of the ICMPv6 header
* @param src_addr original source address
* @param dest_addr original destination address
*/
static void
icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, u8_t type,
const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr)
{
const struct ip6_addr *reply_src, *reply_dest;
struct netif *netif;
/* Get the destination address and netif for this ICMP message. */
LWIP_ASSERT("must provide both source and destination", src_addr != NULL);
LWIP_ASSERT("must provide both source and destination", dest_addr != NULL);
/* Special case, as ip6_current_xxx is either NULL, or points
to a different packet than the one that expired. */
IP6_ADDR_ZONECHECK(src_addr);
IP6_ADDR_ZONECHECK(dest_addr);
/* Swap source and destination for the reply. */
reply_dest = src_addr;
reply_src = dest_addr;
netif = ip6_route(reply_src, reply_dest);
if (netif == NULL) {
ICMP6_STATS_INC(icmp6.rterr);
return;
}
icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src,
reply_dest, netif);
}
/**
* Send an ICMPv6 packet (with srd/dst address and netif given).
*
* @param p the input packet for which the response should be sent,
* p->payload pointing to the IPv6 header
* @param code Code of the ICMPv6 header
* @param data Additional 32-bit parameter in the ICMPv6 header
* @param type Type of the ICMPv6 header
* @param reply_src source address of the packet to send
* @param reply_dest destination address of the packet to send
* @param netif netif to send the packet
*/
static void
icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, u8_t type,
const ip6_addr_t *reply_src, const ip6_addr_t *reply_dest, struct netif *netif)
{
struct pbuf *q;
struct icmp6_hdr *icmp6hdr;
/* ICMPv6 header + IPv6 header + data */
q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE,
PBUF_RAM);
if (q == NULL) {
LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n"));
ICMP6_STATS_INC(icmp6.memerr);
return;
}
LWIP_ASSERT("check that first pbuf can hold icmp 6message",
(q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE)));
icmp6hdr = (struct icmp6_hdr *)q->payload;
icmp6hdr->type = type;
icmp6hdr->code = code;
icmp6hdr->data = lwip_htonl(data);
/* copy fields from original packet */
SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload,
IP6_HLEN + LWIP_ICMP6_DATASIZE);
/* calculate checksum */
icmp6hdr->chksum = 0;
#if CHECKSUM_GEN_ICMP6
IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) {
icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len,
reply_src, reply_dest);
}
#endif /* CHECKSUM_GEN_ICMP6 */
ICMP6_STATS_INC(icmp6.xmit);
ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif);
pbuf_free(q);
}
#endif /* LWIP_ICMP6 && LWIP_IPV6 */

View File

@ -1,53 +0,0 @@
/**
* @file
*
* INET v6 addresses.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
#include "lwip/def.h"
#include "lwip/inet.h"
/** This variable is initialized by the system to contain the wildcard IPv6 address.
*/
const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
#endif /* LWIP_IPV6 */

View File

@ -1,343 +0,0 @@
/**
* @file
*
* IPv6 addresses.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
* Functions for handling IPv6 addresses.
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */
#include "lwip/ip_addr.h"
#include "lwip/def.h"
#include <string.h>
#if LWIP_IPV4
#include "lwip/ip4_addr.h" /* for ip6addr_aton to handle IPv4-mapped addresses */
#endif /* LWIP_IPV4 */
/* used by IP6_ADDR_ANY(6) in ip6_addr.h */
const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul);
#define lwip_xchar(i) ((char)((i) < 10 ? '0' + (i) : 'A' + (i) - 10))
/**
* Check whether "cp" is a valid ascii representation
* of an IPv6 address and convert to a binary address.
* Returns 1 if the address is valid, 0 if not.
*
* @param cp IPv6 address in ascii representation (e.g. "FF01::1")
* @param addr pointer to which to save the ip address in network order
* @return 1 if cp could be converted to addr, 0 on failure
*/
int
ip6addr_aton(const char *cp, ip6_addr_t *addr)
{
u32_t addr_index, zero_blocks, current_block_index, current_block_value;
const char *s;
#if LWIP_IPV4
int check_ipv4_mapped = 0;
#endif /* LWIP_IPV4 */
/* Count the number of colons, to count the number of blocks in a "::" sequence
zero_blocks may be 1 even if there are no :: sequences */
zero_blocks = 8;
for (s = cp; *s != 0; s++) {
if (*s == ':') {
zero_blocks--;
#if LWIP_IPV4
} else if (*s == '.') {
if ((zero_blocks == 5) ||(zero_blocks == 2)) {
check_ipv4_mapped = 1;
/* last block could be the start of an IPv4 address */
zero_blocks--;
} else {
/* invalid format */
return 0;
}
break;
#endif /* LWIP_IPV4 */
} else if (!lwip_isxdigit(*s)) {
break;
}
}
/* parse each block */
addr_index = 0;
current_block_index = 0;
current_block_value = 0;
for (s = cp; *s != 0; s++) {
if (*s == ':') {
if (addr) {
if (current_block_index & 0x1) {
addr->addr[addr_index++] |= current_block_value;
}
else {
addr->addr[addr_index] = current_block_value << 16;
}
}
current_block_index++;
#if LWIP_IPV4
if (check_ipv4_mapped) {
if (current_block_index == 6) {
ip4_addr_t ip4;
int ret = ip4addr_aton(s + 1, &ip4);
if (ret) {
if (addr) {
addr->addr[3] = lwip_htonl(ip4.addr);
current_block_index++;
goto fix_byte_order_and_return;
}
return 1;
}
}
}
#endif /* LWIP_IPV4 */
current_block_value = 0;
if (current_block_index > 7) {
/* address too long! */
return 0;
}
if (s[1] == ':') {
if (s[2] == ':') {
/* invalid format: three successive colons */
return 0;
}
s++;
/* "::" found, set zeros */
while (zero_blocks > 0) {
zero_blocks--;
if (current_block_index & 0x1) {
addr_index++;
} else {
if (addr) {
addr->addr[addr_index] = 0;
}
}
current_block_index++;
if (current_block_index > 7) {
/* address too long! */
return 0;
}
}
}
} else if (lwip_isxdigit(*s)) {
/* add current digit */
current_block_value = (current_block_value << 4) +
(lwip_isdigit(*s) ? (u32_t)(*s - '0') :
(u32_t)(10 + (lwip_islower(*s) ? *s - 'a' : *s - 'A')));
} else {
/* unexpected digit, space? CRLF? */
break;
}
}
if (addr) {
if (current_block_index & 0x1) {
addr->addr[addr_index++] |= current_block_value;
}
else {
addr->addr[addr_index] = current_block_value << 16;
}
#if LWIP_IPV4
fix_byte_order_and_return:
#endif
/* convert to network byte order. */
for (addr_index = 0; addr_index < 4; addr_index++) {
addr->addr[addr_index] = lwip_htonl(addr->addr[addr_index]);
}
ip6_addr_clear_zone(addr);
}
if (current_block_index != 7) {
return 0;
}
return 1;
}
/**
* Convert numeric IPv6 address into ASCII representation.
* returns ptr to static buffer; not reentrant!
*
* @param addr ip6 address in network order to convert
* @return pointer to a global static (!) buffer that holds the ASCII
* representation of addr
*/
char *
ip6addr_ntoa(const ip6_addr_t *addr)
{
static char str[40];
return ip6addr_ntoa_r(addr, str, 40);
}
/**
* Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used.
*
* @param addr ip6 address in network order to convert
* @param buf target buffer where the string is stored
* @param buflen length of buf
* @return either pointer to buf which now holds the ASCII
* representation of addr or NULL if buf was too small
*/
char *
ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen)
{
u32_t current_block_index, current_block_value, next_block_value;
s32_t i;
u8_t zero_flag, empty_block_flag;
#if LWIP_IPV4
if (ip6_addr_isipv4mappedipv6(addr)) {
/* This is an IPv4 mapped address */
ip4_addr_t addr4;
char *ret;
#define IP4MAPPED_HEADER "::FFFF:"
char *buf_ip4 = buf + sizeof(IP4MAPPED_HEADER) - 1;
int buflen_ip4 = buflen - sizeof(IP4MAPPED_HEADER) + 1;
if (buflen < (int)sizeof(IP4MAPPED_HEADER)) {
return NULL;
}
memcpy(buf, IP4MAPPED_HEADER, sizeof(IP4MAPPED_HEADER));
addr4.addr = addr->addr[3];
ret = ip4addr_ntoa_r(&addr4, buf_ip4, buflen_ip4);
if (ret != buf_ip4) {
return NULL;
}
return buf;
}
#endif /* LWIP_IPV4 */
i = 0;
empty_block_flag = 0; /* used to indicate a zero chain for "::' */
for (current_block_index = 0; current_block_index < 8; current_block_index++) {
/* get the current 16-bit block */
current_block_value = lwip_htonl(addr->addr[current_block_index >> 1]);
if ((current_block_index & 0x1) == 0) {
current_block_value = current_block_value >> 16;
}
current_block_value &= 0xffff;
/* Check for empty block. */
if (current_block_value == 0) {
if (current_block_index == 7 && empty_block_flag == 1) {
/* special case, we must render a ':' for the last block. */
buf[i++] = ':';
if (i >= buflen) {
return NULL;
}
break;
}
if (empty_block_flag == 0) {
/* generate empty block "::", but only if more than one contiguous zero block,
* according to current formatting suggestions RFC 5952. */
next_block_value = lwip_htonl(addr->addr[(current_block_index + 1) >> 1]);
if ((current_block_index & 0x1) == 0x01) {
next_block_value = next_block_value >> 16;
}
next_block_value &= 0xffff;
if (next_block_value == 0) {
empty_block_flag = 1;
buf[i++] = ':';
if (i >= buflen) {
return NULL;
}
continue; /* move on to next block. */
}
} else if (empty_block_flag == 1) {
/* move on to next block. */
continue;
}
} else if (empty_block_flag == 1) {
/* Set this flag value so we don't produce multiple empty blocks. */
empty_block_flag = 2;
}
if (current_block_index > 0) {
buf[i++] = ':';
if (i >= buflen) {
return NULL;
}
}
if ((current_block_value & 0xf000) == 0) {
zero_flag = 1;
} else {
buf[i++] = lwip_xchar(((current_block_value & 0xf000) >> 12));
zero_flag = 0;
if (i >= buflen) {
return NULL;
}
}
if (((current_block_value & 0xf00) == 0) && (zero_flag)) {
/* do nothing */
} else {
buf[i++] = lwip_xchar(((current_block_value & 0xf00) >> 8));
zero_flag = 0;
if (i >= buflen) {
return NULL;
}
}
if (((current_block_value & 0xf0) == 0) && (zero_flag)) {
/* do nothing */
}
else {
buf[i++] = lwip_xchar(((current_block_value & 0xf0) >> 4));
zero_flag = 0;
if (i >= buflen) {
return NULL;
}
}
buf[i++] = lwip_xchar((current_block_value & 0xf));
if (i >= buflen) {
return NULL;
}
}
buf[i] = 0;
return buf;
}
#endif /* LWIP_IPV6 */

View File

@ -1,862 +0,0 @@
/**
* @file
*
* IPv6 fragmentation and reassembly.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
#include "lwip/opt.h"
#include "lwip/ip6_frag.h"
#include "lwip/ip6.h"
#include "lwip/icmp6.h"
#include "lwip/nd6.h"
#include "lwip/ip.h"
#include "lwip/pbuf.h"
#include "lwip/memp.h"
#include "lwip/stats.h"
#include <string.h>
#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */
/** Setting this to 0, you can turn off checking the fragments for overlapping
* regions. The code gets a little smaller. Only use this if you know that
* overlapping won't occur on your network! */
#ifndef IP_REASS_CHECK_OVERLAP
#define IP_REASS_CHECK_OVERLAP 1
#endif /* IP_REASS_CHECK_OVERLAP */
/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is
* full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller.
* Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA
* is set to 1, so one datagram can be reassembled at a time, only. */
#ifndef IP_REASS_FREE_OLDEST
#define IP_REASS_FREE_OLDEST 1
#endif /* IP_REASS_FREE_OLDEST */
#if IPV6_FRAG_COPYHEADER
/* The number of bytes we need to "borrow" from (i.e., overwrite in) the header
* that precedes the fragment header for reassembly pruposes. */
#define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN))
#endif
#define IP_REASS_FLAG_LASTFRAG 0x01
/** This is a helper struct which holds the starting
* offset and the ending offset of this fragment to
* easily chain the fragments.
* It has the same packing requirements as the IPv6 header, since it replaces
* the Fragment Header in memory in incoming fragments to keep
* track of the various fragments.
*/
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/bpstruct.h"
#endif
PACK_STRUCT_BEGIN
struct ip6_reass_helper {
PACK_STRUCT_FIELD(struct pbuf *next_pbuf);
PACK_STRUCT_FIELD(u16_t start);
PACK_STRUCT_FIELD(u16_t end);
} PACK_STRUCT_STRUCT;
PACK_STRUCT_END
#ifdef PACK_STRUCT_USE_INCLUDES
# include "arch/epstruct.h"
#endif
/* static variables */
static struct ip6_reassdata *reassdatagrams;
static u16_t ip6_reass_pbufcount;
/* Forward declarations. */
static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr);
#if IP_REASS_FREE_OLDEST
static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed);
#endif /* IP_REASS_FREE_OLDEST */
void
ip6_reass_tmr(void)
{
struct ip6_reassdata *r, *tmp;
#if !IPV6_FRAG_COPYHEADER
LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1",
sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN);
#endif /* !IPV6_FRAG_COPYHEADER */
r = reassdatagrams;
while (r != NULL) {
/* Decrement the timer. Once it reaches 0,
* clean up the incomplete fragment assembly */
if (r->timer > 0) {
r->timer--;
r = r->next;
} else {
/* reassembly timed out */
tmp = r;
/* get the next pointer before freeing */
r = r->next;
/* free the helper struct and all enqueued pbufs */
ip6_reass_free_complete_datagram(tmp);
}
}
}
/**
* Free a datagram (struct ip6_reassdata) and all its pbufs.
* Updates the total count of enqueued pbufs (ip6_reass_pbufcount),
* sends an ICMP time exceeded packet.
*
* @param ipr datagram to free
*/
static void
ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr)
{
struct ip6_reassdata *prev;
u16_t pbufs_freed = 0;
u16_t clen;
struct pbuf *p;
struct ip6_reass_helper *iprh;
#if LWIP_ICMP6
iprh = (struct ip6_reass_helper *)ipr->p->payload;
if (iprh->start == 0) {
/* The first fragment was received, send ICMP time exceeded. */
/* First, de-queue the first pbuf from r->p. */
p = ipr->p;
ipr->p = iprh->next_pbuf;
/* Restore the part that we've overwritten with our helper structure, or we
* might send garbage (and disclose a pointer) in the ICMPv6 reply. */
MEMCPY(p->payload, ipr->orig_hdr, sizeof(iprh));
/* Then, move back to the original ipv6 header (we are now pointing to Fragment header).
This cannot fail since we already checked when receiving this fragment. */
if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr))) {
LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0);
}
else {
/* Reconstruct the zoned source and destination addresses, so that we do
* not end up sending the ICMP response over the wrong link. */
ip6_addr_t src_addr, dest_addr;
ip6_addr_copy_from_packed(src_addr, IPV6_FRAG_SRC(ipr));
ip6_addr_set_zone(&src_addr, ipr->src_zone);
ip6_addr_copy_from_packed(dest_addr, IPV6_FRAG_DEST(ipr));
ip6_addr_set_zone(&dest_addr, ipr->dest_zone);
/* Send the actual ICMP response. */
icmp6_time_exceeded_with_addrs(p, ICMP6_TE_FRAG, &src_addr, &dest_addr);
}
clen = pbuf_clen(p);
LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
pbufs_freed = (u16_t)(pbufs_freed + clen);
pbuf_free(p);
}
#endif /* LWIP_ICMP6 */
/* First, free all received pbufs. The individual pbufs need to be released
separately as they have not yet been chained */
p = ipr->p;
while (p != NULL) {
struct pbuf *pcur;
iprh = (struct ip6_reass_helper *)p->payload;
pcur = p;
/* get the next pointer before freeing */
p = iprh->next_pbuf;
clen = pbuf_clen(pcur);
LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
pbufs_freed = (u16_t)(pbufs_freed + clen);
pbuf_free(pcur);
}
/* Then, unchain the struct ip6_reassdata from the list and free it. */
if (ipr == reassdatagrams) {
reassdatagrams = ipr->next;
} else {
prev = reassdatagrams;
while (prev != NULL) {
if (prev->next == ipr) {
break;
}
prev = prev->next;
}
if (prev != NULL) {
prev->next = ipr->next;
}
}
memp_free(MEMP_IP6_REASSDATA, ipr);
/* Finally, update number of pbufs in reassembly queue */
LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed);
ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - pbufs_freed);
}
#if IP_REASS_FREE_OLDEST
/**
* Free the oldest datagram to make room for enqueueing new fragments.
* The datagram ipr is not freed!
*
* @param ipr ip6_reassdata for the current fragment
* @param pbufs_needed number of pbufs needed to enqueue
* (used for freeing other datagrams if not enough space)
*/
static void
ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed)
{
struct ip6_reassdata *r, *oldest;
/* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs,
* but don't free the current datagram! */
do {
r = oldest = reassdatagrams;
while (r != NULL) {
if (r != ipr) {
if (r->timer <= oldest->timer) {
/* older than the previous oldest */
oldest = r;
}
}
r = r->next;
}
if (oldest == ipr) {
/* nothing to free, ipr is the only element on the list */
return;
}
if (oldest != NULL) {
ip6_reass_free_complete_datagram(oldest);
}
} while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL));
}
#endif /* IP_REASS_FREE_OLDEST */
/**
* Reassembles incoming IPv6 fragments into an IPv6 datagram.
*
* @param p points to the IPv6 Fragment Header
* @return NULL if reassembly is incomplete, pbuf pointing to
* IPv6 Header if reassembly is complete
*/
struct pbuf *
ip6_reass(struct pbuf *p)
{
struct ip6_reassdata *ipr, *ipr_prev;
struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL;
struct ip6_frag_hdr *frag_hdr;
u16_t offset, len, start, end;
ptrdiff_t hdrdiff;
u16_t clen;
u8_t valid = 1;
struct pbuf *q, *next_pbuf;
IP6_FRAG_STATS_INC(ip6_frag.recv);
/* ip6_frag_hdr must be in the first pbuf, not chained. Checked by caller. */
LWIP_ASSERT("IPv6 fragment header does not fit in first pbuf",
p->len >= sizeof(struct ip6_frag_hdr));
frag_hdr = (struct ip6_frag_hdr *) p->payload;
clen = pbuf_clen(p);
offset = lwip_ntohs(frag_hdr->_fragment_offset);
/* Calculate fragment length from IPv6 payload length.
* Adjust for headers before Fragment Header.
* And finally adjust by Fragment Header length. */
len = lwip_ntohs(ip6_current_header()->_plen);
hdrdiff = (u8_t*)p->payload - (const u8_t*)ip6_current_header();
LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff <= 0xFFFF);
LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff >= IP6_HLEN);
hdrdiff -= IP6_HLEN;
hdrdiff += IP6_FRAG_HLEN;
if (hdrdiff > len) {
IP6_FRAG_STATS_INC(ip6_frag.proterr);
goto nullreturn;
}
len = (u16_t)(len - hdrdiff);
start = (offset & IP6_FRAG_OFFSET_MASK);
if (start > (0xFFFF - len)) {
/* u16_t overflow, cannot handle this */
IP6_FRAG_STATS_INC(ip6_frag.proterr);
goto nullreturn;
}
/* Look for the datagram the fragment belongs to in the current datagram queue,
* remembering the previous in the queue for later dequeueing. */
for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) {
/* Check if the incoming fragment matches the one currently present
in the reassembly buffer. If so, we proceed with copying the
fragment into the buffer. */
if ((frag_hdr->_identification == ipr->identification) &&
ip6_addr_cmp_packed(ip6_current_src_addr(), &(IPV6_FRAG_SRC(ipr)), ipr->src_zone) &&
ip6_addr_cmp_packed(ip6_current_dest_addr(), &(IPV6_FRAG_DEST(ipr)), ipr->dest_zone)) {
IP6_FRAG_STATS_INC(ip6_frag.cachehit);
break;
}
ipr_prev = ipr;
}
if (ipr == NULL) {
/* Enqueue a new datagram into the datagram queue */
ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA);
if (ipr == NULL) {
#if IP_REASS_FREE_OLDEST
/* Make room and try again. */
ip6_reass_remove_oldest_datagram(ipr, clen);
ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA);
if (ipr != NULL) {
/* re-search ipr_prev since it might have been removed */
for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
if (ipr_prev->next == ipr) {
break;
}
}
} else
#endif /* IP_REASS_FREE_OLDEST */
{
IP6_FRAG_STATS_INC(ip6_frag.memerr);
goto nullreturn;
}
}
memset(ipr, 0, sizeof(struct ip6_reassdata));
ipr->timer = IPV6_REASS_MAXAGE;
/* enqueue the new structure to the front of the list */
ipr->next = reassdatagrams;
reassdatagrams = ipr;
/* Use the current IPv6 header for src/dest address reference.
* Eventually, we will replace it when we get the first fragment
* (it might be this one, in any case, it is done later). */
/* need to use the none-const pointer here: */
ipr->iphdr = ip_data.current_ip6_header;
#if IPV6_FRAG_COPYHEADER
MEMCPY(&ipr->src, &ip6_current_header()->src, sizeof(ipr->src));
MEMCPY(&ipr->dest, &ip6_current_header()->dest, sizeof(ipr->dest));
#endif /* IPV6_FRAG_COPYHEADER */
#if LWIP_IPV6_SCOPES
/* Also store the address zone information.
* @todo It is possible that due to netif destruction and recreation, the
* stored zones end up resolving to a different interface. In that case, we
* risk sending a "time exceeded" ICMP response over the wrong link.
* Ideally, netif destruction would clean up matching pending reassembly
* structures, but custom zone mappings would make that non-trivial. */
ipr->src_zone = ip6_addr_zone(ip6_current_src_addr());
ipr->dest_zone = ip6_addr_zone(ip6_current_dest_addr());
#endif /* LWIP_IPV6_SCOPES */
/* copy the fragmented packet id. */
ipr->identification = frag_hdr->_identification;
/* copy the nexth field */
ipr->nexth = frag_hdr->_nexth;
}
/* Check if we are allowed to enqueue more datagrams. */
if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
#if IP_REASS_FREE_OLDEST
ip6_reass_remove_oldest_datagram(ipr, clen);
if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) {
/* re-search ipr_prev since it might have been removed */
for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
if (ipr_prev->next == ipr) {
break;
}
}
} else
#endif /* IP_REASS_FREE_OLDEST */
{
/* @todo: send ICMPv6 time exceeded here? */
/* drop this pbuf */
IP6_FRAG_STATS_INC(ip6_frag.memerr);
goto nullreturn;
}
}
/* Overwrite Fragment Header with our own helper struct. */
#if IPV6_FRAG_COPYHEADER
if (IPV6_FRAG_REQROOM > 0) {
/* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4).
This cannot fail since we already checked when receiving this fragment. */
u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM);
LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */
LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0);
}
#else /* IPV6_FRAG_COPYHEADER */
LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1",
sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN);
#endif /* IPV6_FRAG_COPYHEADER */
/* Prepare the pointer to the helper structure, and its initial values.
* Do not yet write to the structure itself, as we still have to make a
* backup of the original data, and we should not do that until we know for
* sure that we are going to add this packet to the list. */
iprh = (struct ip6_reass_helper *)p->payload;
next_pbuf = NULL;
end = (u16_t)(start + len);
/* find the right place to insert this pbuf */
/* Iterate through until we either get to the end of the list (append),
* or we find on with a larger offset (insert). */
for (q = ipr->p; q != NULL;) {
iprh_tmp = (struct ip6_reass_helper*)q->payload;
if (start < iprh_tmp->start) {
#if IP_REASS_CHECK_OVERLAP
if (end > iprh_tmp->start) {
/* fragment overlaps with following, throw away */
IP6_FRAG_STATS_INC(ip6_frag.proterr);
goto nullreturn;
}
if (iprh_prev != NULL) {
if (start < iprh_prev->end) {
/* fragment overlaps with previous, throw away */
IP6_FRAG_STATS_INC(ip6_frag.proterr);
goto nullreturn;
}
}
#endif /* IP_REASS_CHECK_OVERLAP */
/* the new pbuf should be inserted before this */
next_pbuf = q;
if (iprh_prev != NULL) {
/* not the fragment with the lowest offset */
iprh_prev->next_pbuf = p;
} else {
/* fragment with the lowest offset */
ipr->p = p;
}
break;
} else if (start == iprh_tmp->start) {
/* received the same datagram twice: no need to keep the datagram */
goto nullreturn;
#if IP_REASS_CHECK_OVERLAP
} else if (start < iprh_tmp->end) {
/* overlap: no need to keep the new datagram */
IP6_FRAG_STATS_INC(ip6_frag.proterr);
goto nullreturn;
#endif /* IP_REASS_CHECK_OVERLAP */
} else {
/* Check if the fragments received so far have no gaps. */
if (iprh_prev != NULL) {
if (iprh_prev->end != iprh_tmp->start) {
/* There is a fragment missing between the current
* and the previous fragment */
valid = 0;
}
}
}
q = iprh_tmp->next_pbuf;
iprh_prev = iprh_tmp;
}
/* If q is NULL, then we made it to the end of the list. Determine what to do now */
if (q == NULL) {
if (iprh_prev != NULL) {
/* this is (for now), the fragment with the highest offset:
* chain it to the last fragment */
#if IP_REASS_CHECK_OVERLAP
LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= start);
#endif /* IP_REASS_CHECK_OVERLAP */
iprh_prev->next_pbuf = p;
if (iprh_prev->end != start) {
valid = 0;
}
} else {
#if IP_REASS_CHECK_OVERLAP
LWIP_ASSERT("no previous fragment, this must be the first fragment!",
ipr->p == NULL);
#endif /* IP_REASS_CHECK_OVERLAP */
/* this is the first fragment we ever received for this ip datagram */
ipr->p = p;
}
}
/* Track the current number of pbufs current 'in-flight', in order to limit
the number of fragments that may be enqueued at any one time */
ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount + clen);
/* Remember IPv6 header if this is the first fragment. */
if (start == 0) {
/* need to use the none-const pointer here: */
ipr->iphdr = ip_data.current_ip6_header;
/* Make a backup of the part of the packet data that we are about to
* overwrite, so that we can restore the original later. */
MEMCPY(ipr->orig_hdr, p->payload, sizeof(*iprh));
/* For IPV6_FRAG_COPYHEADER there is no need to copy src/dst again, as they
* will be the same as they were. With LWIP_IPV6_SCOPES, the same applies
* to the source/destination zones. */
}
/* Only after the backup do we get to fill in the actual helper structure. */
iprh->next_pbuf = next_pbuf;
iprh->start = start;
iprh->end = end;
/* If this is the last fragment, calculate total packet length. */
if ((offset & IP6_FRAG_MORE_FLAG) == 0) {
ipr->datagram_len = iprh->end;
}
/* Additional validity tests: we have received first and last fragment. */
iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload;
if (iprh_tmp->start != 0) {
valid = 0;
}
if (ipr->datagram_len == 0) {
valid = 0;
}
/* Final validity test: no gaps between current and last fragment. */
iprh_prev = iprh;
q = iprh->next_pbuf;
while ((q != NULL) && valid) {
iprh = (struct ip6_reass_helper*)q->payload;
if (iprh_prev->end != iprh->start) {
valid = 0;
break;
}
iprh_prev = iprh;
q = iprh->next_pbuf;
}
if (valid) {
/* All fragments have been received */
struct ip6_hdr* iphdr_ptr;
/* chain together the pbufs contained within the ip6_reassdata list. */
iprh = (struct ip6_reass_helper*) ipr->p->payload;
while (iprh != NULL) {
next_pbuf = iprh->next_pbuf;
if (next_pbuf != NULL) {
/* Save next helper struct (will be hidden in next step). */
iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload;
/* hide the fragment header for every succeeding fragment */
pbuf_remove_header(next_pbuf, IP6_FRAG_HLEN);
#if IPV6_FRAG_COPYHEADER
if (IPV6_FRAG_REQROOM > 0) {
/* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */
u8_t hdrerr = pbuf_remove_header(next_pbuf, IPV6_FRAG_REQROOM);
LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */
LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0);
}
#endif
pbuf_cat(ipr->p, next_pbuf);
}
else {
iprh_tmp = NULL;
}
iprh = iprh_tmp;
}
/* Get the first pbuf. */
p = ipr->p;
#if IPV6_FRAG_COPYHEADER
if (IPV6_FRAG_REQROOM > 0) {
u8_t hdrerr;
/* Restore (only) the bytes that we overwrote beyond the fragment header.
* Those bytes may belong to either the IPv6 header or an extension
* header placed before the fragment header. */
MEMCPY(p->payload, ipr->orig_hdr, IPV6_FRAG_REQROOM);
/* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */
hdrerr = pbuf_remove_header(p, IPV6_FRAG_REQROOM);
LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */
LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0);
}
#endif
/* We need to get rid of the fragment header itself, which is somewhere in
* the middle of the packet (but still in the first pbuf of the chain).
* Getting rid of the header is required by RFC 2460 Sec. 4.5 and necessary
* in order to be able to reassemble packets that are close to full size
* (i.e., around 65535 bytes). We simply move up all the headers before the
* fragment header, including the IPv6 header, and adjust the payload start
* accordingly. This works because all these headers are in the first pbuf
* of the chain, and because the caller adjusts all its pointers on
* successful reassembly. */
MEMMOVE((u8_t*)ipr->iphdr + sizeof(struct ip6_frag_hdr), ipr->iphdr,
(size_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr));
/* This is where the IPv6 header is now. */
iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->iphdr +
sizeof(struct ip6_frag_hdr));
/* Adjust datagram length by adding header lengths. */
ipr->datagram_len = (u16_t)(ipr->datagram_len + ((u8_t*)p->payload - (u8_t*)iphdr_ptr)
- IP6_HLEN);
/* Set payload length in ip header. */
iphdr_ptr->_plen = lwip_htons(ipr->datagram_len);
/* With the fragment header gone, we now need to adjust the next-header
* field of whatever header was originally before it. Since the packet made
* it through the original header processing routines at least up to the
* fragment header, we do not need any further sanity checks here. */
if (IP6H_NEXTH(iphdr_ptr) == IP6_NEXTH_FRAGMENT) {
iphdr_ptr->_nexth = ipr->nexth;
} else {
u8_t *ptr = (u8_t *)iphdr_ptr + IP6_HLEN;
while (*ptr != IP6_NEXTH_FRAGMENT) {
ptr += 8 * (1 + ptr[1]);
}
*ptr = ipr->nexth;
}
/* release the resources allocated for the fragment queue entry */
if (reassdatagrams == ipr) {
/* it was the first in the list */
reassdatagrams = ipr->next;
} else {
/* it wasn't the first, so it must have a valid 'prev' */
LWIP_ASSERT("sanity check linked list", ipr_prev != NULL);
ipr_prev->next = ipr->next;
}
memp_free(MEMP_IP6_REASSDATA, ipr);
/* adjust the number of pbufs currently queued for reassembly. */
clen = pbuf_clen(p);
LWIP_ASSERT("ip6_reass_pbufcount >= clen", ip6_reass_pbufcount >= clen);
ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - clen);
/* Move pbuf back to IPv6 header. This should never fail. */
if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) {
LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0);
pbuf_free(p);
return NULL;
}
/* Return the pbuf chain */
return p;
}
/* the datagram is not (yet?) reassembled completely */
return NULL;
nullreturn:
IP6_FRAG_STATS_INC(ip6_frag.drop);
pbuf_free(p);
return NULL;
}
#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */
#if LWIP_IPV6 && LWIP_IPV6_FRAG
#if !LWIP_NETIF_TX_SINGLE_PBUF
/** Allocate a new struct pbuf_custom_ref */
static struct pbuf_custom_ref*
ip6_frag_alloc_pbuf_custom_ref(void)
{
return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF);
}
/** Free a struct pbuf_custom_ref */
static void
ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p)
{
LWIP_ASSERT("p != NULL", p != NULL);
memp_free(MEMP_FRAG_PBUF, p);
}
/** Free-callback function to free a 'struct pbuf_custom_ref', called by
* pbuf_free. */
static void
ip6_frag_free_pbuf_custom(struct pbuf *p)
{
struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p;
LWIP_ASSERT("pcr != NULL", pcr != NULL);
LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p);
if (pcr->original != NULL) {
pbuf_free(pcr->original);
}
ip6_frag_free_pbuf_custom_ref(pcr);
}
#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */
/**
* Fragment an IPv6 datagram if too large for the netif or path MTU.
*
* Chop the datagram in MTU sized chunks and send them in order
* by pointing PBUF_REFs into p
*
* @param p ipv6 packet to send
* @param netif the netif on which to send
* @param dest destination ipv6 address to which to send
*
* @return ERR_OK if sent successfully, err_t otherwise
*/
err_t
ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest)
{
struct ip6_hdr *original_ip6hdr;
struct ip6_hdr *ip6hdr;
struct ip6_frag_hdr *frag_hdr;
struct pbuf *rambuf;
#if !LWIP_NETIF_TX_SINGLE_PBUF
struct pbuf *newpbuf;
u16_t newpbuflen = 0;
u16_t left_to_copy;
#endif
static u32_t identification;
u16_t left, cop;
const u16_t mtu = nd6_get_destination_mtu(dest, netif);
const u16_t nfb = (u16_t)((mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK);
u16_t fragment_offset = 0;
u16_t last;
u16_t poff = IP6_HLEN;
identification++;
original_ip6hdr = (struct ip6_hdr *)p->payload;
/* @todo we assume there are no options in the unfragmentable part (IPv6 header). */
LWIP_ASSERT("p->tot_len >= IP6_HLEN", p->tot_len >= IP6_HLEN);
left = (u16_t)(p->tot_len - IP6_HLEN);
while (left) {
last = (left <= nfb);
/* Fill this fragment */
cop = last ? left : nfb;
#if LWIP_NETIF_TX_SINGLE_PBUF
rambuf = pbuf_alloc(PBUF_IP, cop + IP6_FRAG_HLEN, PBUF_RAM);
if (rambuf == NULL) {
IP6_FRAG_STATS_INC(ip6_frag.memerr);
return ERR_MEM;
}
LWIP_ASSERT("this needs a pbuf in one piece!",
(rambuf->len == rambuf->tot_len) && (rambuf->next == NULL));
poff += pbuf_copy_partial(p, (u8_t*)rambuf->payload + IP6_FRAG_HLEN, cop, poff);
/* make room for the IP header */
if (pbuf_add_header(rambuf, IP6_HLEN)) {
pbuf_free(rambuf);
IP6_FRAG_STATS_INC(ip6_frag.memerr);
return ERR_MEM;
}
/* fill in the IP header */
SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN);
ip6hdr = (struct ip6_hdr *)rambuf->payload;
frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN);
#else
/* When not using a static buffer, create a chain of pbufs.
* The first will be a PBUF_RAM holding the link, IPv6, and Fragment header.
* The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
* but limited to the size of an mtu.
*/
rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM);
if (rambuf == NULL) {
IP6_FRAG_STATS_INC(ip6_frag.memerr);
return ERR_MEM;
}
LWIP_ASSERT("this needs a pbuf in one piece!",
(p->len >= (IP6_HLEN)));
SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN);
ip6hdr = (struct ip6_hdr *)rambuf->payload;
frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN);
/* Can just adjust p directly for needed offset. */
p->payload = (u8_t *)p->payload + poff;
p->len = (u16_t)(p->len - poff);
p->tot_len = (u16_t)(p->tot_len - poff);
left_to_copy = cop;
while (left_to_copy) {
struct pbuf_custom_ref *pcr;
newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len;
/* Is this pbuf already empty? */
if (!newpbuflen) {
p = p->next;
continue;
}
pcr = ip6_frag_alloc_pbuf_custom_ref();
if (pcr == NULL) {
pbuf_free(rambuf);
IP6_FRAG_STATS_INC(ip6_frag.memerr);
return ERR_MEM;
}
/* Mirror this pbuf, although we might not need all of it. */
newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen);
if (newpbuf == NULL) {
ip6_frag_free_pbuf_custom_ref(pcr);
pbuf_free(rambuf);
IP6_FRAG_STATS_INC(ip6_frag.memerr);
return ERR_MEM;
}
pbuf_ref(p);
pcr->original = p;
pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom;
/* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
* so that it is removed when pbuf_dechain is later called on rambuf.
*/
pbuf_cat(rambuf, newpbuf);
left_to_copy = (u16_t)(left_to_copy - newpbuflen);
if (left_to_copy) {
p = p->next;
}
}
poff = newpbuflen;
#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
/* Set headers */
frag_hdr->_nexth = original_ip6hdr->_nexth;
frag_hdr->reserved = 0;
frag_hdr->_fragment_offset = lwip_htons((u16_t)((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG)));
frag_hdr->_identification = lwip_htonl(identification);
IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT);
IP6H_PLEN_SET(ip6hdr, (u16_t)(cop + IP6_FRAG_HLEN));
/* No need for separate header pbuf - we allowed room for it in rambuf
* when allocated.
*/
IP6_FRAG_STATS_INC(ip6_frag.xmit);
netif->output_ip6(netif, rambuf, dest);
/* Unfortunately we can't reuse rambuf - the hardware may still be
* using the buffer. Instead we free it (and the ensuing chain) and
* recreate it next time round the loop. If we're lucky the hardware
* will have already sent the packet, the free will really free, and
* there will be zero memory penalty.
*/
pbuf_free(rambuf);
left = (u16_t)(left - cop);
fragment_offset = (u16_t)(fragment_offset + cop);
}
return ERR_OK;
}
#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */

View File

@ -1,626 +0,0 @@
/**
* @file
* Multicast listener discovery
*
* @defgroup mld6 MLD6
* @ingroup ip6
* Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710.
* No support for MLDv2.\n
* Note: The allnodes (ff01::1, ff02::1) group is assumed be received by your
* netif since it must always be received for correct IPv6 operation (e.g. SLAAC).
* Ensure the netif filters are configured accordingly!\n
* The netif flags also need NETIF_FLAG_MLD6 flag set to enable MLD6 on a
* netif ("netif->flags |= NETIF_FLAG_MLD6;").\n
* To be called from TCPIP thread.
*/
/*
* Copyright (c) 2010 Inico Technologies Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Ivan Delamer <delamer@inicotech.com>
*
*
* Please coordinate changes and requests with Ivan Delamer
* <delamer@inicotech.com>
*/
/* Based on igmp.c implementation of igmp v2 protocol */
#include "lwip/opt.h"
#if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */
#include "lwip/mld6.h"
#include "lwip/prot/mld6.h"
#include "lwip/icmp6.h"
#include "lwip/ip6.h"
#include "lwip/ip6_addr.h"
#include "lwip/ip.h"
#include "lwip/inet_chksum.h"
#include "lwip/pbuf.h"
#include "lwip/netif.h"
#include "lwip/memp.h"
#include "lwip/stats.h"
#include <string.h>
/*
* MLD constants
*/
#define MLD6_HL 1
#define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500)
#define MLD6_GROUP_NON_MEMBER 0
#define MLD6_GROUP_DELAYING_MEMBER 1
#define MLD6_GROUP_IDLE_MEMBER 2
/* Forward declarations. */
static struct mld_group *mld6_new_group(struct netif *ifp, const ip6_addr_t *addr);
static err_t mld6_remove_group(struct netif *netif, struct mld_group *group);
static void mld6_delayed_report(struct mld_group *group, u16_t maxresp);
static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type);
/**
* Stop MLD processing on interface
*
* @param netif network interface on which stop MLD processing
*/
err_t
mld6_stop(struct netif *netif)
{
struct mld_group *group = netif_mld6_data(netif);
netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, NULL);
while (group != NULL) {
struct mld_group *next = group->next; /* avoid use-after-free below */
/* disable the group at the MAC level */
if (netif->mld_mac_filter != NULL) {
netif->mld_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER);
}
/* free group */
memp_free(MEMP_MLD6_GROUP, group);
/* move to "next" */
group = next;
}
return ERR_OK;
}
/**
* Report MLD memberships for this interface
*
* @param netif network interface on which report MLD memberships
*/
void
mld6_report_groups(struct netif *netif)
{
struct mld_group *group = netif_mld6_data(netif);
while (group != NULL) {
mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS);
group = group->next;
}
}
/**
* Search for a group that is joined on a netif
*
* @param ifp the network interface for which to look
* @param addr the group ipv6 address to search for
* @return a struct mld_group* if the group has been found,
* NULL if the group wasn't found.
*/
struct mld_group *
mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr)
{
struct mld_group *group = netif_mld6_data(ifp);
while (group != NULL) {
if (ip6_addr_cmp(&(group->group_address), addr)) {
return group;
}
group = group->next;
}
return NULL;
}
/**
* create a new group
*
* @param ifp the network interface for which to create
* @param addr the new group ipv6
* @return a struct mld_group*,
* NULL on memory error.
*/
static struct mld_group *
mld6_new_group(struct netif *ifp, const ip6_addr_t *addr)
{
struct mld_group *group;
group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP);
if (group != NULL) {
ip6_addr_set(&(group->group_address), addr);
group->timer = 0; /* Not running */
group->group_state = MLD6_GROUP_IDLE_MEMBER;
group->last_reporter_flag = 0;
group->use = 0;
group->next = netif_mld6_data(ifp);
netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group);
}
return group;
}
/**
* Remove a group from the mld_group_list, but do not free it yet
*
* @param group the group to remove
* @return ERR_OK if group was removed from the list, an err_t otherwise
*/
static err_t
mld6_remove_group(struct netif *netif, struct mld_group *group)
{
err_t err = ERR_OK;
/* Is it the first group? */
if (netif_mld6_data(netif) == group) {
netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group->next);
} else {
/* look for group further down the list */
struct mld_group *tmpGroup;
for (tmpGroup = netif_mld6_data(netif); tmpGroup != NULL; tmpGroup = tmpGroup->next) {
if (tmpGroup->next == group) {
tmpGroup->next = group->next;
break;
}
}
/* Group not find group */
if (tmpGroup == NULL) {
err = ERR_ARG;
}
}
return err;
}
/**
* Process an input MLD message. Called by icmp6_input.
*
* @param p the mld packet, p->payload pointing to the icmpv6 header
* @param inp the netif on which this packet was received
*/
void
mld6_input(struct pbuf *p, struct netif *inp)
{
struct mld_header *mld_hdr;
struct mld_group *group;
MLD6_STATS_INC(mld6.recv);
/* Check that mld header fits in packet. */
if (p->len < sizeof(struct mld_header)) {
/* @todo debug message */
pbuf_free(p);
MLD6_STATS_INC(mld6.lenerr);
MLD6_STATS_INC(mld6.drop);
return;
}
mld_hdr = (struct mld_header *)p->payload;
switch (mld_hdr->type) {
case ICMP6_TYPE_MLQ: /* Multicast listener query. */
/* Is it a general query? */
if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) &&
ip6_addr_isany(&(mld_hdr->multicast_address))) {
MLD6_STATS_INC(mld6.rx_general);
/* Report all groups, except all nodes group, and if-local groups. */
group = netif_mld6_data(inp);
while (group != NULL) {
if ((!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) &&
(!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) {
mld6_delayed_report(group, mld_hdr->max_resp_delay);
}
group = group->next;
}
} else {
/* Have we joined this group?
* We use IP6 destination address to have a memory aligned copy.
* mld_hdr->multicast_address should be the same. */
MLD6_STATS_INC(mld6.rx_group);
group = mld6_lookfor_group(inp, ip6_current_dest_addr());
if (group != NULL) {
/* Schedule a report. */
mld6_delayed_report(group, mld_hdr->max_resp_delay);
}
}
break; /* ICMP6_TYPE_MLQ */
case ICMP6_TYPE_MLR: /* Multicast listener report. */
/* Have we joined this group?
* We use IP6 destination address to have a memory aligned copy.
* mld_hdr->multicast_address should be the same. */
MLD6_STATS_INC(mld6.rx_report);
group = mld6_lookfor_group(inp, ip6_current_dest_addr());
if (group != NULL) {
/* If we are waiting to report, cancel it. */
if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) {
group->timer = 0; /* stopped */
group->group_state = MLD6_GROUP_IDLE_MEMBER;
group->last_reporter_flag = 0;
}
}
break; /* ICMP6_TYPE_MLR */
case ICMP6_TYPE_MLD: /* Multicast listener done. */
/* Do nothing, router will query us. */
break; /* ICMP6_TYPE_MLD */
default:
MLD6_STATS_INC(mld6.proterr);
MLD6_STATS_INC(mld6.drop);
break;
}
pbuf_free(p);
}
/**
* @ingroup mld6
* Join a group on one or all network interfaces.
*
* If the group is to be joined on all interfaces, the given group address must
* not have a zone set (i.e., it must have its zone index set to IP6_NO_ZONE).
* If the group is to be joined on one particular interface, the given group
* address may or may not have a zone set.
*
* @param srcaddr ipv6 address (zoned) of the network interface which should
* join a new group. If IP6_ADDR_ANY6, join on all netifs
* @param groupaddr the ipv6 address of the group to join (possibly but not
* necessarily zoned)
* @return ERR_OK if group was joined on the netif(s), an err_t otherwise
*/
err_t
mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr)
{
err_t err = ERR_VAL; /* no matching interface */
struct netif *netif;
LWIP_ASSERT_CORE_LOCKED();
/* loop through netif's */
NETIF_FOREACH(netif) {
/* Should we join this interface ? */
if (ip6_addr_isany(srcaddr) ||
netif_get_ip6_addr_match(netif, srcaddr) >= 0) {
err = mld6_joingroup_netif(netif, groupaddr);
if (err != ERR_OK) {
return err;
}
}
}
return err;
}
/**
* @ingroup mld6
* Join a group on a network interface.
*
* @param netif the network interface which should join a new group.
* @param groupaddr the ipv6 address of the group to join (possibly but not
* necessarily zoned)
* @return ERR_OK if group was joined on the netif, an err_t otherwise
*/
err_t
mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr)
{
struct mld_group *group;
#if LWIP_IPV6_SCOPES
ip6_addr_t ip6addr;
/* If the address has a particular scope but no zone set, use the netif to
* set one now. Within the mld6 module, all addresses are properly zoned. */
if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) {
ip6_addr_set(&ip6addr, groupaddr);
ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif);
groupaddr = &ip6addr;
}
IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif);
#endif /* LWIP_IPV6_SCOPES */
LWIP_ASSERT_CORE_LOCKED();
/* find group or create a new one if not found */
group = mld6_lookfor_group(netif, groupaddr);
if (group == NULL) {
/* Joining a new group. Create a new group entry. */
group = mld6_new_group(netif, groupaddr);
if (group == NULL) {
return ERR_MEM;
}
/* Activate this address on the MAC layer. */
if (netif->mld_mac_filter != NULL) {
netif->mld_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER);
}
/* Report our membership. */
MLD6_STATS_INC(mld6.tx_report);
mld6_send(netif, group, ICMP6_TYPE_MLR);
mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS);
}
/* Increment group use */
group->use++;
return ERR_OK;
}
/**
* @ingroup mld6
* Leave a group on a network interface.
*
* Zoning of address follows the same rules as @ref mld6_joingroup.
*
* @param srcaddr ipv6 address (zoned) of the network interface which should
* leave the group. If IP6_ADDR_ANY6, leave on all netifs
* @param groupaddr the ipv6 address of the group to leave (possibly, but not
* necessarily zoned)
* @return ERR_OK if group was left on the netif(s), an err_t otherwise
*/
err_t
mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr)
{
err_t err = ERR_VAL; /* no matching interface */
struct netif *netif;
LWIP_ASSERT_CORE_LOCKED();
/* loop through netif's */
NETIF_FOREACH(netif) {
/* Should we leave this interface ? */
if (ip6_addr_isany(srcaddr) ||
netif_get_ip6_addr_match(netif, srcaddr) >= 0) {
err_t res = mld6_leavegroup_netif(netif, groupaddr);
if (err != ERR_OK) {
/* Store this result if we have not yet gotten a success */
err = res;
}
}
}
return err;
}
/**
* @ingroup mld6
* Leave a group on a network interface.
*
* @param netif the network interface which should leave the group.
* @param groupaddr the ipv6 address of the group to leave (possibly, but not
* necessarily zoned)
* @return ERR_OK if group was left on the netif, an err_t otherwise
*/
err_t
mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr)
{
struct mld_group *group;
#if LWIP_IPV6_SCOPES
ip6_addr_t ip6addr;
if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) {
ip6_addr_set(&ip6addr, groupaddr);
ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif);
groupaddr = &ip6addr;
}
IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif);
#endif /* LWIP_IPV6_SCOPES */
LWIP_ASSERT_CORE_LOCKED();
/* find group */
group = mld6_lookfor_group(netif, groupaddr);
if (group != NULL) {
/* Leave if there is no other use of the group */
if (group->use <= 1) {
/* Remove the group from the list */
mld6_remove_group(netif, group);
/* If we are the last reporter for this group */
if (group->last_reporter_flag) {
MLD6_STATS_INC(mld6.tx_leave);
mld6_send(netif, group, ICMP6_TYPE_MLD);
}
/* Disable the group at the MAC level */
if (netif->mld_mac_filter != NULL) {
netif->mld_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER);
}
/* free group struct */
memp_free(MEMP_MLD6_GROUP, group);
} else {
/* Decrement group use */
group->use--;
}
/* Left group */
return ERR_OK;
}
/* Group not found */
return ERR_VAL;
}
/**
* Periodic timer for mld processing. Must be called every
* MLD6_TMR_INTERVAL milliseconds (100).
*
* When a delaying member expires, a membership report is sent.
*/
void
mld6_tmr(void)
{
struct netif *netif;
NETIF_FOREACH(netif) {
struct mld_group *group = netif_mld6_data(netif);
while (group != NULL) {
if (group->timer > 0) {
group->timer--;
if (group->timer == 0) {
/* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */
if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) {
MLD6_STATS_INC(mld6.tx_report);
mld6_send(netif, group, ICMP6_TYPE_MLR);
group->group_state = MLD6_GROUP_IDLE_MEMBER;
}
}
}
group = group->next;
}
}
}
/**
* Schedule a delayed membership report for a group
*
* @param group the mld_group for which "delaying" membership report
* should be sent
* @param maxresp_in the max resp delay provided in the query
*/
static void
mld6_delayed_report(struct mld_group *group, u16_t maxresp_in)
{
/* Convert maxresp from milliseconds to tmr ticks */
u16_t maxresp = maxresp_in / MLD6_TMR_INTERVAL;
if (maxresp == 0) {
maxresp = 1;
}
#ifdef LWIP_RAND
/* Randomize maxresp. (if LWIP_RAND is supported) */
maxresp = (u16_t)(LWIP_RAND() % maxresp);
if (maxresp == 0) {
maxresp = 1;
}
#endif /* LWIP_RAND */
/* Apply timer value if no report has been scheduled already. */
if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) ||
((group->group_state == MLD6_GROUP_DELAYING_MEMBER) &&
((group->timer == 0) || (maxresp < group->timer)))) {
group->timer = maxresp;
group->group_state = MLD6_GROUP_DELAYING_MEMBER;
}
}
/**
* Send a MLD message (report or done).
*
* An IPv6 hop-by-hop options header with a router alert option
* is prepended.
*
* @param group the group to report or quit
* @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done)
*/
static void
mld6_send(struct netif *netif, struct mld_group *group, u8_t type)
{
struct mld_header *mld_hdr;
struct pbuf *p;
const ip6_addr_t *src_addr;
/* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */
p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + MLD6_HBH_HLEN, PBUF_RAM);
if (p == NULL) {
MLD6_STATS_INC(mld6.memerr);
return;
}
/* Move to make room for Hop-by-hop options header. */
if (pbuf_remove_header(p, MLD6_HBH_HLEN)) {
pbuf_free(p);
MLD6_STATS_INC(mld6.lenerr);
return;
}
/* Select our source address. */
if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) {
/* This is a special case, when we are performing duplicate address detection.
* We must join the multicast group, but we don't have a valid address yet. */
src_addr = IP6_ADDR_ANY6;
} else {
/* Use link-local address as source address. */
src_addr = netif_ip6_addr(netif, 0);
}
/* MLD message header pointer. */
mld_hdr = (struct mld_header *)p->payload;
/* Set fields. */
mld_hdr->type = type;
mld_hdr->code = 0;
mld_hdr->chksum = 0;
mld_hdr->max_resp_delay = 0;
mld_hdr->reserved = 0;
ip6_addr_copy_to_packed(mld_hdr->multicast_address, group->group_address);
#if CHECKSUM_GEN_ICMP6
IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) {
mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len,
src_addr, &(group->group_address));
}
#endif /* CHECKSUM_GEN_ICMP6 */
/* Add hop-by-hop headers options: router alert with MLD value. */
ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD);
if (type == ICMP6_TYPE_MLR) {
/* Remember we were the last to report */
group->last_reporter_flag = 1;
}
/* Send the packet out. */
MLD6_STATS_INC(mld6.xmit);
ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address),
MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, netif);
pbuf_free(p);
}
#endif /* LWIP_IPV6 */

View File

@ -1,447 +0,0 @@
/**
* @file
* Dynamic pool memory manager
*
* lwIP has dedicated pools for many structures (netconn, protocol control blocks,
* packet buffers, ...). All these pools are managed here.
*
* @defgroup mempool Memory pools
* @ingroup infrastructure
* Custom memory pools
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#include "lwip/memp.h"
#include "lwip/sys.h"
#include "lwip/stats.h"
#include <string.h>
/* Make sure we include everything we need for size calculation required by memp_std.h */
#include "lwip/pbuf.h"
#include "lwip/raw.h"
#include "lwip/udp.h"
#include "lwip/tcp.h"
#include "lwip/priv/tcp_priv.h"
#include "lwip/altcp.h"
#include "lwip/ip4_frag.h"
#include "lwip/netbuf.h"
#include "lwip/api.h"
#include "lwip/priv/tcpip_priv.h"
#include "lwip/priv/api_msg.h"
#include "lwip/priv/sockets_priv.h"
#include "lwip/etharp.h"
#include "lwip/igmp.h"
#include "lwip/timeouts.h"
/* needed by default MEMP_NUM_SYS_TIMEOUT */
#include "netif/ppp/ppp_opts.h"
#include "lwip/netdb.h"
#include "lwip/dns.h"
#include "lwip/priv/nd6_priv.h"
#include "lwip/ip6_frag.h"
#include "lwip/mld6.h"
#define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc)
#include "lwip/priv/memp_std.h"
const struct memp_desc *const memp_pools[MEMP_MAX] = {
#define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name,
#include "lwip/priv/memp_std.h"
};
#ifdef LWIP_HOOK_FILENAME
#include LWIP_HOOK_FILENAME
#endif
#if MEMP_MEM_MALLOC && MEMP_OVERFLOW_CHECK >= 2
#undef MEMP_OVERFLOW_CHECK
/* MEMP_OVERFLOW_CHECK >= 2 does not work with MEMP_MEM_MALLOC, use 1 instead */
#define MEMP_OVERFLOW_CHECK 1
#endif
#if MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC
/**
* Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm".
*/
static int
memp_sanity(const struct memp_desc *desc)
{
struct memp *t, *h;
t = *desc->tab;
if (t != NULL) {
for (h = t->next; (t != NULL) && (h != NULL); t = t->next,
h = ((h->next != NULL) ? h->next->next : NULL)) {
if (t == h) {
return 0;
}
}
}
return 1;
}
#endif /* MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC */
#if MEMP_OVERFLOW_CHECK
/**
* Check if a memp element was victim of an overflow or underflow
* (e.g. the restricted area after/before it has been altered)
*
* @param p the memp element to check
* @param desc the pool p comes from
*/
static void
memp_overflow_check_element(struct memp *p, const struct memp_desc *desc)
{
mem_overflow_check_raw((u8_t *)p + MEMP_SIZE, desc->size, "pool ", desc->desc);
}
/**
* Initialize the restricted area of on memp element.
*/
static void
memp_overflow_init_element(struct memp *p, const struct memp_desc *desc)
{
mem_overflow_init_raw((u8_t *)p + MEMP_SIZE, desc->size);
}
#if MEMP_OVERFLOW_CHECK >= 2
/**
* Do an overflow check for all elements in every pool.
*
* @see memp_overflow_check_element for a description of the check
*/
static void
memp_overflow_check_all(void)
{
u16_t i, j;
struct memp *p;
SYS_ARCH_DECL_PROTECT(old_level);
SYS_ARCH_PROTECT(old_level);
for (i = 0; i < MEMP_MAX; ++i) {
p = (struct memp *)LWIP_MEM_ALIGN(memp_pools[i]->base);
for (j = 0; j < memp_pools[i]->num; ++j) {
memp_overflow_check_element(p, memp_pools[i]);
p = LWIP_ALIGNMENT_CAST(struct memp *, ((u8_t *)p + MEMP_SIZE + memp_pools[i]->size + MEM_SANITY_REGION_AFTER_ALIGNED));
}
}
SYS_ARCH_UNPROTECT(old_level);
}
#endif /* MEMP_OVERFLOW_CHECK >= 2 */
#endif /* MEMP_OVERFLOW_CHECK */
/**
* Initialize custom memory pool.
* Related functions: memp_malloc_pool, memp_free_pool
*
* @param desc pool to initialize
*/
void
memp_init_pool(const struct memp_desc *desc)
{
#if MEMP_MEM_MALLOC
LWIP_UNUSED_ARG(desc);
#else
int i;
struct memp *memp;
*desc->tab = NULL;
memp = (struct memp *)LWIP_MEM_ALIGN(desc->base);
#if MEMP_MEM_INIT
/* force memset on pool memory */
memset(memp, 0, (size_t)desc->num * (MEMP_SIZE + desc->size
#if MEMP_OVERFLOW_CHECK
+ MEM_SANITY_REGION_AFTER_ALIGNED
#endif
));
#endif
/* create a linked list of memp elements */
for (i = 0; i < desc->num; ++i) {
memp->next = *desc->tab;
*desc->tab = memp;
#if MEMP_OVERFLOW_CHECK
memp_overflow_init_element(memp, desc);
#endif /* MEMP_OVERFLOW_CHECK */
/* cast through void* to get rid of alignment warnings */
memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size
#if MEMP_OVERFLOW_CHECK
+ MEM_SANITY_REGION_AFTER_ALIGNED
#endif
);
}
#if MEMP_STATS
desc->stats->avail = desc->num;
#endif /* MEMP_STATS */
#endif /* !MEMP_MEM_MALLOC */
#if MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY)
desc->stats->name = desc->desc;
#endif /* MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) */
}
/**
* Initializes lwIP built-in pools.
* Related functions: memp_malloc, memp_free
*
* Carves out memp_memory into linked lists for each pool-type.
*/
void
memp_init(void)
{
u16_t i;
/* for every pool: */
for (i = 0; i < LWIP_ARRAYSIZE(memp_pools); i++) {
memp_init_pool(memp_pools[i]);
#if LWIP_STATS && MEMP_STATS
lwip_stats.memp[i] = memp_pools[i]->stats;
#endif
}
#if MEMP_OVERFLOW_CHECK >= 2
/* check everything a first time to see if it worked */
memp_overflow_check_all();
#endif /* MEMP_OVERFLOW_CHECK >= 2 */
}
static void *
#if !MEMP_OVERFLOW_CHECK
do_memp_malloc_pool(const struct memp_desc *desc)
#else
do_memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line)
#endif
{
struct memp *memp;
SYS_ARCH_DECL_PROTECT(old_level);
#if MEMP_MEM_MALLOC
memp = (struct memp *)mem_malloc(MEMP_SIZE + MEMP_ALIGN_SIZE(desc->size));
SYS_ARCH_PROTECT(old_level);
#else /* MEMP_MEM_MALLOC */
SYS_ARCH_PROTECT(old_level);
memp = *desc->tab;
#endif /* MEMP_MEM_MALLOC */
if (memp != NULL) {
#if !MEMP_MEM_MALLOC
#if MEMP_OVERFLOW_CHECK == 1
memp_overflow_check_element(memp, desc);
#endif /* MEMP_OVERFLOW_CHECK */
*desc->tab = memp->next;
#if MEMP_OVERFLOW_CHECK
memp->next = NULL;
#endif /* MEMP_OVERFLOW_CHECK */
#endif /* !MEMP_MEM_MALLOC */
#if MEMP_OVERFLOW_CHECK
memp->file = file;
memp->line = line;
#if MEMP_MEM_MALLOC
memp_overflow_init_element(memp, desc);
#endif /* MEMP_MEM_MALLOC */
#endif /* MEMP_OVERFLOW_CHECK */
LWIP_ASSERT("memp_malloc: memp properly aligned",
((mem_ptr_t)memp % MEM_ALIGNMENT) == 0);
#if MEMP_STATS
desc->stats->used++;
if (desc->stats->used > desc->stats->max) {
desc->stats->max = desc->stats->used;
}
#endif
SYS_ARCH_UNPROTECT(old_level);
/* cast through u8_t* to get rid of alignment warnings */
return ((u8_t *)memp + MEMP_SIZE);
} else {
#if MEMP_STATS
desc->stats->err++;
#endif
SYS_ARCH_UNPROTECT(old_level);
LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", desc->desc));
}
return NULL;
}
/**
* Get an element from a custom pool.
*
* @param desc the pool to get an element from
*
* @return a pointer to the allocated memory or a NULL pointer on error
*/
void *
#if !MEMP_OVERFLOW_CHECK
memp_malloc_pool(const struct memp_desc *desc)
#else
memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line)
#endif
{
LWIP_ASSERT("invalid pool desc", desc != NULL);
if (desc == NULL) {
return NULL;
}
#if !MEMP_OVERFLOW_CHECK
return do_memp_malloc_pool(desc);
#else
return do_memp_malloc_pool_fn(desc, file, line);
#endif
}
/**
* Get an element from a specific pool.
*
* @param type the pool to get an element from
*
* @return a pointer to the allocated memory or a NULL pointer on error
*/
void *
#if !MEMP_OVERFLOW_CHECK
memp_malloc(memp_t type)
#else
memp_malloc_fn(memp_t type, const char *file, const int line)
#endif
{
void *memp;
LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;);
#if MEMP_OVERFLOW_CHECK >= 2
memp_overflow_check_all();
#endif /* MEMP_OVERFLOW_CHECK >= 2 */
#if !MEMP_OVERFLOW_CHECK
memp = do_memp_malloc_pool(memp_pools[type]);
#else
memp = do_memp_malloc_pool_fn(memp_pools[type], file, line);
#endif
return memp;
}
static void
do_memp_free_pool(const struct memp_desc *desc, void *mem)
{
struct memp *memp;
SYS_ARCH_DECL_PROTECT(old_level);
LWIP_ASSERT("memp_free: mem properly aligned",
((mem_ptr_t)mem % MEM_ALIGNMENT) == 0);
/* cast through void* to get rid of alignment warnings */
memp = (struct memp *)(void *)((u8_t *)mem - MEMP_SIZE);
SYS_ARCH_PROTECT(old_level);
#if MEMP_OVERFLOW_CHECK == 1
memp_overflow_check_element(memp, desc);
#endif /* MEMP_OVERFLOW_CHECK */
#if MEMP_STATS
desc->stats->used--;
#endif
#if MEMP_MEM_MALLOC
LWIP_UNUSED_ARG(desc);
SYS_ARCH_UNPROTECT(old_level);
mem_free(memp);
#else /* MEMP_MEM_MALLOC */
memp->next = *desc->tab;
*desc->tab = memp;
#if MEMP_SANITY_CHECK
LWIP_ASSERT("memp sanity", memp_sanity(desc));
#endif /* MEMP_SANITY_CHECK */
SYS_ARCH_UNPROTECT(old_level);
#endif /* !MEMP_MEM_MALLOC */
}
/**
* Put a custom pool element back into its pool.
*
* @param desc the pool where to put mem
* @param mem the memp element to free
*/
void
memp_free_pool(const struct memp_desc *desc, void *mem)
{
LWIP_ASSERT("invalid pool desc", desc != NULL);
if ((desc == NULL) || (mem == NULL)) {
return;
}
do_memp_free_pool(desc, mem);
}
/**
* Put an element back into its pool.
*
* @param type the pool where to put mem
* @param mem the memp element to free
*/
void
memp_free(memp_t type, void *mem)
{
#ifdef LWIP_HOOK_MEMP_AVAILABLE
struct memp *old_first;
#endif
LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;);
if (mem == NULL) {
return;
}
#if MEMP_OVERFLOW_CHECK >= 2
memp_overflow_check_all();
#endif /* MEMP_OVERFLOW_CHECK >= 2 */
#ifdef LWIP_HOOK_MEMP_AVAILABLE
old_first = *memp_pools[type]->tab;
#endif
do_memp_free_pool(memp_pools[type], mem);
#ifdef LWIP_HOOK_MEMP_AVAILABLE
if (old_first == NULL) {
LWIP_HOOK_MEMP_AVAILABLE(type);
}
#endif
}

View File

@ -1,671 +0,0 @@
/**
* @file
* Implementation of raw protocol PCBs for low-level handling of
* different types of protocols besides (or overriding) those
* already available in lwIP.\n
* See also @ref raw_raw
*
* @defgroup raw_raw RAW
* @ingroup callbackstyle_api
* Implementation of raw protocol PCBs for low-level handling of
* different types of protocols besides (or overriding) those
* already available in lwIP.\n
* @see @ref api
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */
#include "lwip/def.h"
#include "lwip/memp.h"
#include "lwip/ip_addr.h"
#include "lwip/netif.h"
#include "lwip/raw.h"
#include "lwip/priv/raw_priv.h"
#include "lwip/stats.h"
#include "lwip/ip6.h"
#include "lwip/ip6_addr.h"
#include "lwip/inet_chksum.h"
#include <string.h>
/** The list of RAW PCBs */
static struct raw_pcb *raw_pcbs;
static u8_t
raw_input_local_match(struct raw_pcb *pcb, u8_t broadcast)
{
LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */
/* check if PCB is bound to specific netif */
if ((pcb->netif_idx != NETIF_NO_INDEX) &&
(pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) {
return 0;
}
#if LWIP_IPV4 && LWIP_IPV6
/* Dual-stack: PCBs listening to any IP type also listen to any IP address */
if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) {
#if IP_SOF_BROADCAST_RECV
if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) {
return 0;
}
#endif /* IP_SOF_BROADCAST_RECV */
return 1;
}
#endif /* LWIP_IPV4 && LWIP_IPV6 */
/* Only need to check PCB if incoming IP version matches PCB IP version */
if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) {
#if LWIP_IPV4
/* Special case: IPv4 broadcast: receive all broadcasts
* Note: broadcast variable can only be 1 if it is an IPv4 broadcast */
if (broadcast != 0) {
#if IP_SOF_BROADCAST_RECV
if (ip_get_option(pcb, SOF_BROADCAST))
#endif /* IP_SOF_BROADCAST_RECV */
{
if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) {
return 1;
}
}
} else
#endif /* LWIP_IPV4 */
/* Handle IPv4 and IPv6: catch all or exact match */
if (ip_addr_isany(&pcb->local_ip) ||
ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) {
return 1;
}
}
return 0;
}
/**
* Determine if in incoming IP packet is covered by a RAW PCB
* and if so, pass it to a user-provided receive callback function.
*
* Given an incoming IP datagram (as a chain of pbufs) this function
* finds a corresponding RAW PCB and calls the corresponding receive
* callback function.
*
* @param p pbuf to be demultiplexed to a RAW PCB.
* @param inp network interface on which the datagram was received.
* @return - 1 if the packet has been eaten by a RAW PCB receive
* callback function. The caller MAY NOT not reference the
* packet any longer, and MAY NOT call pbuf_free().
* @return - 0 if packet is not eaten (pbuf is still referenced by the
* caller).
*
*/
raw_input_state_t
raw_input(struct pbuf *p, struct netif *inp)
{
struct raw_pcb *pcb, *prev;
s16_t proto;
raw_input_state_t ret = RAW_INPUT_NONE;
u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif());
LWIP_UNUSED_ARG(inp);
#if LWIP_IPV6
#if LWIP_IPV4
if (IP_HDR_GET_VERSION(p->payload) == 6)
#endif /* LWIP_IPV4 */
{
struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload;
proto = IP6H_NEXTH(ip6hdr);
}
#if LWIP_IPV4
else
#endif /* LWIP_IPV4 */
#endif /* LWIP_IPV6 */
#if LWIP_IPV4
{
proto = IPH_PROTO((struct ip_hdr *)p->payload);
}
#endif /* LWIP_IPV4 */
prev = NULL;
pcb = raw_pcbs;
/* loop through all raw pcbs until the packet is eaten by one */
/* this allows multiple pcbs to match against the packet by design */
while (pcb != NULL) {
if ((pcb->protocol == proto) && raw_input_local_match(pcb, broadcast) &&
(((pcb->flags & RAW_FLAGS_CONNECTED) == 0) ||
ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) {
/* receive callback function available? */
if (pcb->recv != NULL) {
u8_t eaten;
#ifndef LWIP_NOASSERT
void *old_payload = p->payload;
#endif
ret = RAW_INPUT_DELIVERED;
/* the receive callback function did not eat the packet? */
eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr());
if (eaten != 0) {
/* receive function ate the packet */
p = NULL;
if (prev != NULL) {
/* move the pcb to the front of raw_pcbs so that is
found faster next time */
prev->next = pcb->next;
pcb->next = raw_pcbs;
raw_pcbs = pcb;
}
return RAW_INPUT_EATEN;
} else {
/* sanity-check that the receive callback did not alter the pbuf */
LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet",
p->payload == old_payload);
}
}
/* no receive callback function was set for this raw PCB */
}
/* drop the packet */
prev = pcb;
pcb = pcb->next;
}
return ret;
}
/**
* @ingroup raw_raw
* Bind a RAW PCB.
*
* @param pcb RAW PCB to be bound with a local address ipaddr.
* @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to
* bind to all local interfaces.
*
* @return lwIP error code.
* - ERR_OK. Successful. No error occurred.
* - ERR_USE. The specified IP address is already bound to by
* another RAW PCB.
*
* @see raw_disconnect()
*/
err_t
raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr)
{
LWIP_ASSERT_CORE_LOCKED();
if ((pcb == NULL) || (ipaddr == NULL)) {
return ERR_VAL;
}
ip_addr_set_ipaddr(&pcb->local_ip, ipaddr);
#if LWIP_IPV6 && LWIP_IPV6_SCOPES
/* If the given IP address should have a zone but doesn't, assign one now.
* This is legacy support: scope-aware callers should always provide properly
* zoned source addresses. */
if (IP_IS_V6(&pcb->local_ip) &&
ip6_addr_lacks_zone(ip_2_ip6(&pcb->local_ip), IP6_UNKNOWN)) {
ip6_addr_select_zone(ip_2_ip6(&pcb->local_ip), ip_2_ip6(&pcb->local_ip));
}
#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
return ERR_OK;
}
/**
* @ingroup raw_raw
* Bind an RAW PCB to a specific netif.
* After calling this function, all packets received via this PCB
* are guaranteed to have come in via the specified netif, and all
* outgoing packets will go out via the specified netif.
*
* @param pcb RAW PCB to be bound with netif.
* @param netif netif to bind to. Can be NULL.
*
* @see raw_disconnect()
*/
void
raw_bind_netif(struct raw_pcb *pcb, const struct netif *netif)
{
LWIP_ASSERT_CORE_LOCKED();
if (netif != NULL) {
pcb->netif_idx = netif_get_index(netif);
} else {
pcb->netif_idx = NETIF_NO_INDEX;
}
}
/**
* @ingroup raw_raw
* Connect an RAW PCB. This function is required by upper layers
* of lwip. Using the raw api you could use raw_sendto() instead
*
* This will associate the RAW PCB with the remote address.
*
* @param pcb RAW PCB to be connected with remote address ipaddr and port.
* @param ipaddr remote IP address to connect with.
*
* @return lwIP error code
*
* @see raw_disconnect() and raw_sendto()
*/
err_t
raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr)
{
LWIP_ASSERT_CORE_LOCKED();
if ((pcb == NULL) || (ipaddr == NULL)) {
return ERR_VAL;
}
ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr);
#if LWIP_IPV6 && LWIP_IPV6_SCOPES
/* If the given IP address should have a zone but doesn't, assign one now,
* using the bound address to make a more informed decision when possible. */
if (IP_IS_V6(&pcb->remote_ip) &&
ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) {
ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip));
}
#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */
raw_set_flags(pcb, RAW_FLAGS_CONNECTED);
return ERR_OK;
}
/**
* @ingroup raw_raw
* Disconnect a RAW PCB.
*
* @param pcb the raw pcb to disconnect.
*/
void
raw_disconnect(struct raw_pcb *pcb)
{
LWIP_ASSERT_CORE_LOCKED();
/* reset remote address association */
#if LWIP_IPV4 && LWIP_IPV6
if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) {
ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE);
} else {
#endif
ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip);
#if LWIP_IPV4 && LWIP_IPV6
}
#endif
pcb->netif_idx = NETIF_NO_INDEX;
/* mark PCB as unconnected */
raw_clear_flags(pcb, RAW_FLAGS_CONNECTED);
}
/**
* @ingroup raw_raw
* Set the callback function for received packets that match the
* raw PCB's protocol and binding.
*
* The callback function MUST either
* - eat the packet by calling pbuf_free() and returning non-zero. The
* packet will not be passed to other raw PCBs or other protocol layers.
* - not free the packet, and return zero. The packet will be matched
* against further PCBs and/or forwarded to another protocol layers.
*/
void
raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg)
{
LWIP_ASSERT_CORE_LOCKED();
/* remember recv() callback and user data */
pcb->recv = recv;
pcb->recv_arg = recv_arg;
}
/**
* @ingroup raw_raw
* Send the raw IP packet to the given address. An IP header will be prepended
* to the packet, unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that
* case, the packet must include an IP header, which will then be sent as is.
*
* @param pcb the raw pcb which to send
* @param p the IP payload to send
* @param ipaddr the destination address of the IP packet
*
*/
err_t
raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr)
{
struct netif *netif;
const ip_addr_t *src_ip;
if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) {
return ERR_VAL;
}
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n"));
if (pcb->netif_idx != NETIF_NO_INDEX) {
netif = netif_get_by_index(pcb->netif_idx);
} else {
#if LWIP_MULTICAST_TX_OPTIONS
netif = NULL;
if (ip_addr_ismulticast(ipaddr)) {
/* For multicast-destined packets, use the user-provided interface index to
* determine the outgoing interface, if an interface index is set and a
* matching netif can be found. Otherwise, fall back to regular routing. */
netif = netif_get_by_index(pcb->mcast_ifindex);
}
if (netif == NULL)
#endif /* LWIP_MULTICAST_TX_OPTIONS */
{
netif = ip_route(&pcb->local_ip, ipaddr);
}
}
if (netif == NULL) {
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to "));
ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ipaddr);
return ERR_RTE;
}
if (ip_addr_isany(&pcb->local_ip) || ip_addr_ismulticast(&pcb->local_ip)) {
/* use outgoing network interface IP address as source address */
src_ip = ip_netif_get_local_ip(netif, ipaddr);
#if LWIP_IPV6
if (src_ip == NULL) {
return ERR_RTE;
}
#endif /* LWIP_IPV6 */
} else {
/* use RAW PCB local IP address as source address */
src_ip = &pcb->local_ip;
}
return raw_sendto_if_src(pcb, p, ipaddr, netif, src_ip);
}
/**
* @ingroup raw_raw
* Send the raw IP packet to the given address, using a particular outgoing
* netif and source IP address. An IP header will be prepended to the packet,
* unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that case, the
* packet must include an IP header, which will then be sent as is.
*
* @param pcb RAW PCB used to send the data
* @param p chain of pbufs to be sent
* @param dst_ip destination IP address
* @param netif the netif used for sending
* @param src_ip source IP address
*/
err_t
raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip,
struct netif *netif, const ip_addr_t *src_ip)
{
err_t err;
struct pbuf *q; /* q will be sent down the stack */
u16_t header_size;
u8_t ttl;
LWIP_ASSERT_CORE_LOCKED();
if ((pcb == NULL) || (dst_ip == NULL) || (netif == NULL) || (src_ip == NULL) ||
!IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) {
return ERR_VAL;
}
header_size = (
#if LWIP_IPV4 && LWIP_IPV6
IP_IS_V6(dst_ip) ? IP6_HLEN : IP_HLEN);
#elif LWIP_IPV4
IP_HLEN);
#else
IP6_HLEN);
#endif
/* Handle the HDRINCL option as an exception: none of the code below applies
* to this case, and sending the packet needs to be done differently too. */
if (pcb->flags & RAW_FLAGS_HDRINCL) {
/* A full header *must* be present in the first pbuf of the chain, as the
* output routines may access its fields directly. */
if (p->len < header_size) {
return ERR_VAL;
}
/* @todo multicast loop support, if at all desired for this scenario.. */
NETIF_SET_HINTS(netif, &pcb->netif_hints);
err = ip_output_if_hdrincl(p, src_ip, dst_ip, netif);
NETIF_RESET_HINTS(netif);
return err;
}
/* packet too large to add an IP header without causing an overflow? */
if ((u16_t)(p->tot_len + header_size) < p->tot_len) {
return ERR_MEM;
}
/* not enough space to add an IP header to first pbuf in given p chain? */
if (pbuf_add_header(p, header_size)) {
/* allocate header in new pbuf */
q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM);
/* new header pbuf could not be allocated? */
if (q == NULL) {
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n"));
return ERR_MEM;
}
if (p->tot_len != 0) {
/* chain header q in front of given pbuf p */
pbuf_chain(q, p);
}
/* { first pbuf q points to header pbuf } */
LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p));
} else {
/* first pbuf q equals given pbuf */
q = p;
if (pbuf_remove_header(q, header_size)) {
LWIP_ASSERT("Can't restore header we just removed!", 0);
return ERR_MEM;
}
}
#if IP_SOF_BROADCAST
if (IP_IS_V4(dst_ip)) {
/* broadcast filter? */
if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(dst_ip, netif)) {
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb));
/* free any temporary header pbuf allocated by pbuf_header() */
if (q != p) {
pbuf_free(q);
}
return ERR_VAL;
}
}
#endif /* IP_SOF_BROADCAST */
/* Multicast Loop? */
#if LWIP_MULTICAST_TX_OPTIONS
if (((pcb->flags & RAW_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) {
q->flags |= PBUF_FLAG_MCASTLOOP;
}
#endif /* LWIP_MULTICAST_TX_OPTIONS */
#if LWIP_IPV6
/* If requested, based on the IPV6_CHECKSUM socket option per RFC3542,
compute the checksum and update the checksum in the payload. */
if (IP_IS_V6(dst_ip) && pcb->chksum_reqd) {
u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(dst_ip));
LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2));
SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t));
}
#endif
/* Determine TTL to use */
#if LWIP_MULTICAST_TX_OPTIONS
ttl = (ip_addr_ismulticast(dst_ip) ? raw_get_multicast_ttl(pcb) : pcb->ttl);
#else /* LWIP_MULTICAST_TX_OPTIONS */
ttl = pcb->ttl;
#endif /* LWIP_MULTICAST_TX_OPTIONS */
NETIF_SET_HINTS(netif, &pcb->netif_hints);
err = ip_output_if(q, src_ip, dst_ip, ttl, pcb->tos, pcb->protocol, netif);
NETIF_RESET_HINTS(netif);
/* did we chain a header earlier? */
if (q != p) {
/* free the header */
pbuf_free(q);
}
return err;
}
/**
* @ingroup raw_raw
* Send the raw IP packet to the address given by raw_connect()
*
* @param pcb the raw pcb which to send
* @param p the IP payload to send
*
*/
err_t
raw_send(struct raw_pcb *pcb, struct pbuf *p)
{
return raw_sendto(pcb, p, &pcb->remote_ip);
}
/**
* @ingroup raw_raw
* Remove an RAW PCB.
*
* @param pcb RAW PCB to be removed. The PCB is removed from the list of
* RAW PCB's and the data structure is freed from memory.
*
* @see raw_new()
*/
void
raw_remove(struct raw_pcb *pcb)
{
struct raw_pcb *pcb2;
LWIP_ASSERT_CORE_LOCKED();
/* pcb to be removed is first in list? */
if (raw_pcbs == pcb) {
/* make list start at 2nd pcb */
raw_pcbs = raw_pcbs->next;
/* pcb not 1st in list */
} else {
for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) {
/* find pcb in raw_pcbs list */
if (pcb2->next != NULL && pcb2->next == pcb) {
/* remove pcb from list */
pcb2->next = pcb->next;
break;
}
}
}
memp_free(MEMP_RAW_PCB, pcb);
}
/**
* @ingroup raw_raw
* Create a RAW PCB.
*
* @return The RAW PCB which was created. NULL if the PCB data structure
* could not be allocated.
*
* @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP)
*
* @see raw_remove()
*/
struct raw_pcb *
raw_new(u8_t proto)
{
struct raw_pcb *pcb;
LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n"));
LWIP_ASSERT_CORE_LOCKED();
pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB);
/* could allocate RAW PCB? */
if (pcb != NULL) {
/* initialize PCB to all zeroes */
memset(pcb, 0, sizeof(struct raw_pcb));
pcb->protocol = proto;
pcb->ttl = RAW_TTL;
#if LWIP_MULTICAST_TX_OPTIONS
raw_set_multicast_ttl(pcb, RAW_TTL);
#endif /* LWIP_MULTICAST_TX_OPTIONS */
pcb->next = raw_pcbs;
raw_pcbs = pcb;
}
return pcb;
}
/**
* @ingroup raw_raw
* Create a RAW PCB for specific IP type.
*
* @return The RAW PCB which was created. NULL if the PCB data structure
* could not be allocated.
*
* @param type IP address type, see @ref lwip_ip_addr_type definitions.
* If you want to listen to IPv4 and IPv6 (dual-stack) packets,
* supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE.
* @param proto the protocol number (next header) of the IPv6 packet payload
* (e.g. IP6_NEXTH_ICMP6)
*
* @see raw_remove()
*/
struct raw_pcb *
raw_new_ip_type(u8_t type, u8_t proto)
{
struct raw_pcb *pcb;
LWIP_ASSERT_CORE_LOCKED();
pcb = raw_new(proto);
#if LWIP_IPV4 && LWIP_IPV6
if (pcb != NULL) {
IP_SET_TYPE_VAL(pcb->local_ip, type);
IP_SET_TYPE_VAL(pcb->remote_ip, type);
}
#else /* LWIP_IPV4 && LWIP_IPV6 */
LWIP_UNUSED_ARG(type);
#endif /* LWIP_IPV4 && LWIP_IPV6 */
return pcb;
}
/** This function is called from netif.c when address is changed
*
* @param old_addr IP address of the netif before change
* @param new_addr IP address of the netif after change
*/
void raw_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr)
{
struct raw_pcb *rpcb;
if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) {
for (rpcb = raw_pcbs; rpcb != NULL; rpcb = rpcb->next) {
/* PCB bound to current local interface address? */
if (ip_addr_cmp(&rpcb->local_ip, old_addr)) {
/* The PCB is bound to the old ipaddr and
* is set to bound to the new one instead */
ip_addr_copy(rpcb->local_ip, *new_addr);
}
}
}
}
#endif /* LWIP_RAW */

View File

@ -1,169 +0,0 @@
/**
* @file
* Statistics module
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#include "lwip/opt.h"
#if LWIP_STATS /* don't build if not configured for use in lwipopts.h */
#include "lwip/def.h"
#include "lwip/stats.h"
#include "lwip/mem.h"
#include "lwip/debug.h"
#include <string.h>
struct stats_ lwip_stats;
void
stats_init(void)
{
#ifdef LWIP_DEBUG
#if MEM_STATS
lwip_stats.mem.name = "MEM";
#endif /* MEM_STATS */
#endif /* LWIP_DEBUG */
}
#if LWIP_STATS_DISPLAY
void
stats_display_proto(struct stats_proto *proto, const char *name)
{
LWIP_PLATFORM_DIAG(("\n%s\n\t", name));
LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit));
LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv));
LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw));
LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop));
LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr));
LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr));
LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr));
LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr));
LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr));
LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr));
LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err));
LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit));
}
#if IGMP_STATS || MLD6_STATS
void
stats_display_igmp(struct stats_igmp *igmp, const char *name)
{
LWIP_PLATFORM_DIAG(("\n%s\n\t", name));
LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit));
LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv));
LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop));
LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr));
LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr));
LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr));
LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr));
LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1));
LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group));
LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general));
LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report));
LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join));
LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave));
LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n", igmp->tx_report));
}
#endif /* IGMP_STATS || MLD6_STATS */
#if MEM_STATS || MEMP_STATS
void
stats_display_mem(struct stats_mem *mem, const char *name)
{
LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name));
LWIP_PLATFORM_DIAG(("avail: %"MEM_SIZE_F"\n\t", mem->avail));
LWIP_PLATFORM_DIAG(("used: %"MEM_SIZE_F"\n\t", mem->used));
LWIP_PLATFORM_DIAG(("max: %"MEM_SIZE_F"\n\t", mem->max));
LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n", mem->err));
}
#if MEMP_STATS
void
stats_display_memp(struct stats_mem *mem, int idx)
{
if (idx < MEMP_MAX) {
stats_display_mem(mem, mem->name);
}
}
#endif /* MEMP_STATS */
#endif /* MEM_STATS || MEMP_STATS */
#if SYS_STATS
void
stats_display_sys(struct stats_sys *sys)
{
LWIP_PLATFORM_DIAG(("\nSYS\n\t"));
LWIP_PLATFORM_DIAG(("sem.used: %"STAT_COUNTER_F"\n\t", sys->sem.used));
LWIP_PLATFORM_DIAG(("sem.max: %"STAT_COUNTER_F"\n\t", sys->sem.max));
LWIP_PLATFORM_DIAG(("sem.err: %"STAT_COUNTER_F"\n\t", sys->sem.err));
LWIP_PLATFORM_DIAG(("mutex.used: %"STAT_COUNTER_F"\n\t", sys->mutex.used));
LWIP_PLATFORM_DIAG(("mutex.max: %"STAT_COUNTER_F"\n\t", sys->mutex.max));
LWIP_PLATFORM_DIAG(("mutex.err: %"STAT_COUNTER_F"\n\t", sys->mutex.err));
LWIP_PLATFORM_DIAG(("mbox.used: %"STAT_COUNTER_F"\n\t", sys->mbox.used));
LWIP_PLATFORM_DIAG(("mbox.max: %"STAT_COUNTER_F"\n\t", sys->mbox.max));
LWIP_PLATFORM_DIAG(("mbox.err: %"STAT_COUNTER_F"\n", sys->mbox.err));
}
#endif /* SYS_STATS */
void
stats_display(void)
{
s16_t i;
LINK_STATS_DISPLAY();
ETHARP_STATS_DISPLAY();
IPFRAG_STATS_DISPLAY();
IP6_FRAG_STATS_DISPLAY();
IP_STATS_DISPLAY();
ND6_STATS_DISPLAY();
IP6_STATS_DISPLAY();
IGMP_STATS_DISPLAY();
MLD6_STATS_DISPLAY();
ICMP_STATS_DISPLAY();
ICMP6_STATS_DISPLAY();
UDP_STATS_DISPLAY();
TCP_STATS_DISPLAY();
MEM_STATS_DISPLAY();
for (i = 0; i < MEMP_MAX; i++) {
MEMP_STATS_DISPLAY(i);
}
SYS_STATS_DISPLAY();
}
#endif /* LWIP_STATS_DISPLAY */
#endif /* LWIP_STATS */

View File

@ -1,148 +0,0 @@
/**
* @file
* lwIP Operating System abstraction
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
/**
* @defgroup sys_layer Porting (system abstraction layer)
* @ingroup lwip
*
* @defgroup sys_os OS abstraction layer
* @ingroup sys_layer
* No need to implement functions in this section in NO_SYS mode.
* The OS-specific code should be implemented in arch/sys_arch.h
* and sys_arch.c of your port.
*
* The operating system emulation layer provides a common interface
* between the lwIP code and the underlying operating system kernel. The
* general idea is that porting lwIP to new architectures requires only
* small changes to a few header files and a new sys_arch
* implementation. It is also possible to do a sys_arch implementation
* that does not rely on any underlying operating system.
*
* The sys_arch provides semaphores, mailboxes and mutexes to lwIP. For the full
* lwIP functionality, multiple threads support can be implemented in the
* sys_arch, but this is not required for the basic lwIP
* functionality. Timer scheduling is implemented in lwIP, but can be implemented
* by the sys_arch port (LWIP_TIMERS_CUSTOM==1).
*
* In addition to the source file providing the functionality of sys_arch,
* the OS emulation layer must provide several header files defining
* macros used throughout lwip. The files required and the macros they
* must define are listed below the sys_arch description.
*
* Since lwIP 1.4.0, semaphore, mutexes and mailbox functions are prototyped in a way that
* allows both using pointers or actual OS structures to be used. This way, memory
* required for such types can be either allocated in place (globally or on the
* stack) or on the heap (allocated internally in the "*_new()" functions).
*
* Note:
* -----
* Be careful with using mem_malloc() in sys_arch. When malloc() refers to
* mem_malloc() you can run into a circular function call problem. In mem.c
* mem_init() tries to allocate a semaphore using mem_malloc, which of course
* can't be performed when sys_arch uses mem_malloc.
*
* @defgroup sys_sem Semaphores
* @ingroup sys_os
* Semaphores can be either counting or binary - lwIP works with both
* kinds.
* Semaphores are represented by the type "sys_sem_t" which is typedef'd
* in the sys_arch.h file. Mailboxes are equivalently represented by the
* type "sys_mbox_t". Mutexes are represented by the type "sys_mutex_t".
* lwIP does not place any restrictions on how these types are represented
* internally.
*
* @defgroup sys_mutex Mutexes
* @ingroup sys_os
* Mutexes are recommended to correctly handle priority inversion,
* especially if you use LWIP_CORE_LOCKING .
*
* @defgroup sys_mbox Mailboxes
* @ingroup sys_os
* Mailboxes should be implemented as a queue which allows multiple messages
* to be posted (implementing as a rendez-vous point where only one message can be
* posted at a time can have a highly negative impact on performance). A message
* in a mailbox is just a pointer, nothing more.
*
* @defgroup sys_time Time
* @ingroup sys_layer
*
* @defgroup sys_prot Critical sections
* @ingroup sys_layer
* Used to protect short regions of code against concurrent access.
* - Your system is a bare-metal system (probably with an RTOS)
* and interrupts are under your control:
* Implement this as LockInterrupts() / UnlockInterrupts()
* - Your system uses an RTOS with deferred interrupt handling from a
* worker thread: Implement as a global mutex or lock/unlock scheduler
* - Your system uses a high-level OS with e.g. POSIX signals:
* Implement as a global mutex
*
* @defgroup sys_misc Misc
* @ingroup sys_os
*/
#include "lwip/opt.h"
#include "lwip/sys.h"
/* Most of the functions defined in sys.h must be implemented in the
* architecture-dependent file sys_arch.c */
#if !NO_SYS
#ifndef sys_msleep
/**
* Sleep for some ms. Timeouts are NOT processed while sleeping.
*
* @param ms number of milliseconds to sleep
*/
void
sys_msleep(u32_t ms)
{
if (ms > 0) {
sys_sem_t delaysem;
err_t err = sys_sem_new(&delaysem, 0);
if (err == ERR_OK) {
sys_arch_sem_wait(&delaysem, ms);
sys_sem_free(&delaysem);
}
}
}
#endif /* sys_msleep */
#endif /* !NO_SYS */

View File

@ -1,451 +0,0 @@
/**
* @file
* Stack-internal timers implementation.
* This file includes timer callbacks for stack-internal timers as well as
* functions to set up or stop timers and check for expired timers.
*
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
* Simon Goldschmidt
*
*/
#include "lwip/opt.h"
#include "lwip/timeouts.h"
#include "lwip/priv/tcp_priv.h"
#include "lwip/def.h"
#include "lwip/memp.h"
#include "lwip/priv/tcpip_priv.h"
#include "lwip/ip4_frag.h"
#include "lwip/etharp.h"
#include "lwip/dhcp.h"
#include "lwip/autoip.h"
#include "lwip/igmp.h"
#include "lwip/dns.h"
#include "lwip/nd6.h"
#include "lwip/ip6_frag.h"
#include "lwip/mld6.h"
#include "lwip/dhcp6.h"
#include "lwip/sys.h"
#include "lwip/pbuf.h"
#if LWIP_DEBUG_TIMERNAMES
#define HANDLER(x) x, #x
#else /* LWIP_DEBUG_TIMERNAMES */
#define HANDLER(x) x
#endif /* LWIP_DEBUG_TIMERNAMES */
#define LWIP_MAX_TIMEOUT 0x7fffffff
/* Check if timer's expiry time is greater than time and care about u32_t wraparounds */
#define TIME_LESS_THAN(t, compare_to) ( (((u32_t)((t)-(compare_to))) > LWIP_MAX_TIMEOUT) ? 1 : 0 )
/** This array contains all stack-internal cyclic timers. To get the number of
* timers, use LWIP_ARRAYSIZE() */
const struct lwip_cyclic_timer lwip_cyclic_timers[] = {
#if LWIP_TCP
/* The TCP timer is a special case: it does not have to run always and
is triggered to start from TCP using tcp_timer_needed() */
{TCP_TMR_INTERVAL, HANDLER(tcp_tmr)},
#endif /* LWIP_TCP */
#if LWIP_IPV4
#if IP_REASSEMBLY
{IP_TMR_INTERVAL, HANDLER(ip_reass_tmr)},
#endif /* IP_REASSEMBLY */
#if LWIP_ARP
{ARP_TMR_INTERVAL, HANDLER(etharp_tmr)},
#endif /* LWIP_ARP */
#if LWIP_DHCP
{DHCP_COARSE_TIMER_MSECS, HANDLER(dhcp_coarse_tmr)},
{DHCP_FINE_TIMER_MSECS, HANDLER(dhcp_fine_tmr)},
#endif /* LWIP_DHCP */
#if LWIP_AUTOIP
{AUTOIP_TMR_INTERVAL, HANDLER(autoip_tmr)},
#endif /* LWIP_AUTOIP */
#if LWIP_IGMP
{IGMP_TMR_INTERVAL, HANDLER(igmp_tmr)},
#endif /* LWIP_IGMP */
#endif /* LWIP_IPV4 */
#if LWIP_DNS
{DNS_TMR_INTERVAL, HANDLER(dns_tmr)},
#endif /* LWIP_DNS */
#if LWIP_IPV6
{ND6_TMR_INTERVAL, HANDLER(nd6_tmr)},
#if LWIP_IPV6_REASS
{IP6_REASS_TMR_INTERVAL, HANDLER(ip6_reass_tmr)},
#endif /* LWIP_IPV6_REASS */
#if LWIP_IPV6_MLD
{MLD6_TMR_INTERVAL, HANDLER(mld6_tmr)},
#endif /* LWIP_IPV6_MLD */
#if LWIP_IPV6_DHCP6
{DHCP6_TIMER_MSECS, HANDLER(dhcp6_tmr)},
#endif /* LWIP_IPV6_DHCP6 */
#endif /* LWIP_IPV6 */
};
const int lwip_num_cyclic_timers = LWIP_ARRAYSIZE(lwip_cyclic_timers);
#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM
/** The one and only timeout list */
static struct sys_timeo *next_timeout;
static u32_t current_timeout_due_time;
#if LWIP_TESTMODE
struct sys_timeo**
sys_timeouts_get_next_timeout(void)
{
return &next_timeout;
}
#endif
#if LWIP_TCP
/** global variable that shows if the tcp timer is currently scheduled or not */
static int tcpip_tcp_timer_active;
/**
* Timer callback function that calls tcp_tmr() and reschedules itself.
*
* @param arg unused argument
*/
static void
tcpip_tcp_timer(void *arg)
{
LWIP_UNUSED_ARG(arg);
/* call TCP timer handler */
tcp_tmr();
/* timer still needed? */
if (tcp_active_pcbs || tcp_tw_pcbs) {
/* restart timer */
sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL);
} else {
/* disable timer */
tcpip_tcp_timer_active = 0;
}
}
/**
* Called from TCP_REG when registering a new PCB:
* the reason is to have the TCP timer only running when
* there are active (or time-wait) PCBs.
*/
void
tcp_timer_needed(void)
{
LWIP_ASSERT_CORE_LOCKED();
/* timer is off but needed again? */
if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) {
/* enable and start timer */
tcpip_tcp_timer_active = 1;
sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL);
}
}
#endif /* LWIP_TCP */
static void
#if LWIP_DEBUG_TIMERNAMES
sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg, const char *handler_name)
#else /* LWIP_DEBUG_TIMERNAMES */
sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg)
#endif
{
struct sys_timeo *timeout, *t;
timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT);
if (timeout == NULL) {
LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL);
return;
}
timeout->next = NULL;
timeout->h = handler;
timeout->arg = arg;
timeout->time = abs_time;
#if LWIP_DEBUG_TIMERNAMES
timeout->handler_name = handler_name;
LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p abs_time=%"U32_F" handler=%s arg=%p\n",
(void *)timeout, abs_time, handler_name, (void *)arg));
#endif /* LWIP_DEBUG_TIMERNAMES */
if (next_timeout == NULL) {
next_timeout = timeout;
return;
}
if (TIME_LESS_THAN(timeout->time, next_timeout->time)) {
timeout->next = next_timeout;
next_timeout = timeout;
} else {
for (t = next_timeout; t != NULL; t = t->next) {
if ((t->next == NULL) || TIME_LESS_THAN(timeout->time, t->next->time)) {
timeout->next = t->next;
t->next = timeout;
break;
}
}
}
}
/**
* Timer callback function that calls cyclic->handler() and reschedules itself.
*
* @param arg unused argument
*/
#if !LWIP_TESTMODE
static
#endif
void
lwip_cyclic_timer(void *arg)
{
u32_t now;
u32_t next_timeout_time;
const struct lwip_cyclic_timer *cyclic = (const struct lwip_cyclic_timer *)arg;
#if LWIP_DEBUG_TIMERNAMES
LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: %s()\n", cyclic->handler_name));
#endif
cyclic->handler();
now = sys_now();
next_timeout_time = (u32_t)(current_timeout_due_time + cyclic->interval_ms); /* overflow handled by TIME_LESS_THAN macro */
if (TIME_LESS_THAN(next_timeout_time, now)) {
/* timer would immediately expire again -> "overload" -> restart without any correction */
#if LWIP_DEBUG_TIMERNAMES
sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg, cyclic->handler_name);
#else
sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg);
#endif
} else {
/* correct cyclic interval with handler execution delay and sys_check_timeouts jitter */
#if LWIP_DEBUG_TIMERNAMES
sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg, cyclic->handler_name);
#else
sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg);
#endif
}
}
/** Initialize this module */
void sys_timeouts_init(void)
{
size_t i;
/* tcp_tmr() at index 0 is started on demand */
for (i = (LWIP_TCP ? 1 : 0); i < LWIP_ARRAYSIZE(lwip_cyclic_timers); i++) {
/* we have to cast via size_t to get rid of const warning
(this is OK as cyclic_timer() casts back to const* */
sys_timeout(lwip_cyclic_timers[i].interval_ms, lwip_cyclic_timer, LWIP_CONST_CAST(void *, &lwip_cyclic_timers[i]));
}
}
/**
* Create a one-shot timer (aka timeout). Timeouts are processed in the
* following cases:
* - while waiting for a message using sys_timeouts_mbox_fetch()
* - by calling sys_check_timeouts() (NO_SYS==1 only)
*
* @param msecs time in milliseconds after that the timer should expire
* @param handler callback function to call when msecs have elapsed
* @param arg argument to pass to the callback function
*/
#if LWIP_DEBUG_TIMERNAMES
void
sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char *handler_name)
#else /* LWIP_DEBUG_TIMERNAMES */
void
sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg)
#endif /* LWIP_DEBUG_TIMERNAMES */
{
u32_t next_timeout_time;
LWIP_ASSERT_CORE_LOCKED();
LWIP_ASSERT("Timeout time too long, max is LWIP_UINT32_MAX/4 msecs", msecs <= (LWIP_UINT32_MAX / 4));
next_timeout_time = (u32_t)(sys_now() + msecs); /* overflow handled by TIME_LESS_THAN macro */
#if LWIP_DEBUG_TIMERNAMES
sys_timeout_abs(next_timeout_time, handler, arg, handler_name);
#else
sys_timeout_abs(next_timeout_time, handler, arg);
#endif
}
/**
* Go through timeout list (for this task only) and remove the first matching
* entry (subsequent entries remain untouched), even though the timeout has not
* triggered yet.
*
* @param handler callback function that would be called by the timeout
* @param arg callback argument that would be passed to handler
*/
void
sys_untimeout(sys_timeout_handler handler, void *arg)
{
struct sys_timeo *prev_t, *t;
LWIP_ASSERT_CORE_LOCKED();
if (next_timeout == NULL) {
return;
}
for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) {
if ((t->h == handler) && (t->arg == arg)) {
/* We have a match */
/* Unlink from previous in list */
if (prev_t == NULL) {
next_timeout = t->next;
} else {
prev_t->next = t->next;
}
memp_free(MEMP_SYS_TIMEOUT, t);
return;
}
}
return;
}
/**
* @ingroup lwip_nosys
* Handle timeouts for NO_SYS==1 (i.e. without using
* tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout
* handler functions when timeouts expire.
*
* Must be called periodically from your main loop.
*/
void
sys_check_timeouts(void)
{
u32_t now;
LWIP_ASSERT_CORE_LOCKED();
/* Process only timers expired at the start of the function. */
now = sys_now();
do {
struct sys_timeo *tmptimeout;
sys_timeout_handler handler;
void *arg;
PBUF_CHECK_FREE_OOSEQ();
tmptimeout = next_timeout;
if (tmptimeout == NULL) {
return;
}
if (TIME_LESS_THAN(now, tmptimeout->time)) {
return;
}
/* Timeout has expired */
next_timeout = tmptimeout->next;
handler = tmptimeout->h;
arg = tmptimeout->arg;
current_timeout_due_time = tmptimeout->time;
#if LWIP_DEBUG_TIMERNAMES
if (handler != NULL) {
LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s t=%"U32_F" arg=%p\n",
tmptimeout->handler_name, sys_now() - tmptimeout->time, arg));
}
#endif /* LWIP_DEBUG_TIMERNAMES */
memp_free(MEMP_SYS_TIMEOUT, tmptimeout);
if (handler != NULL) {
handler(arg);
}
LWIP_TCPIP_THREAD_ALIVE();
/* Repeat until all expired timers have been called */
} while (1);
}
/** Rebase the timeout times to the current time.
* This is necessary if sys_check_timeouts() hasn't been called for a long
* time (e.g. while saving energy) to prevent all timer functions of that
* period being called.
*/
void
sys_restart_timeouts(void)
{
u32_t now;
u32_t base;
struct sys_timeo *t;
if (next_timeout == NULL) {
return;
}
now = sys_now();
base = next_timeout->time;
for (t = next_timeout; t != NULL; t = t->next) {
t->time = (t->time - base) + now;
}
}
/** Return the time left before the next timeout is due. If no timeouts are
* enqueued, returns 0xffffffff
*/
u32_t
sys_timeouts_sleeptime(void)
{
u32_t now;
LWIP_ASSERT_CORE_LOCKED();
if (next_timeout == NULL) {
return SYS_TIMEOUTS_SLEEPTIME_INFINITE;
}
now = sys_now();
if (TIME_LESS_THAN(next_timeout->time, now)) {
return 0;
} else {
u32_t ret = (u32_t)(next_timeout->time - now);
LWIP_ASSERT("invalid sleeptime", ret <= LWIP_MAX_TIMEOUT);
return ret;
}
}
#else /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */
/* Satisfy the TCP code which calls this function */
void
tcp_timer_needed(void)
{
}
#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */

View File

@ -1,33 +0,0 @@
/**
* @file
* This file is a posix wrapper for lwip/sockets.h.
*/
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwip/sockets.h"

View File

@ -1,36 +0,0 @@
/**
* @file
* This file is a posix wrapper for lwip/if_api.h.
*/
/*
* Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. <joel.cunningham@garmin.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwip/if_api.h"

View File

@ -1,33 +0,0 @@
/**
* @file
* This file is a posix wrapper for lwip/netdb.h.
*/
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwip/netdb.h"

View File

@ -1,33 +0,0 @@
/**
* @file
* This file is a posix wrapper for lwip/sockets.h.
*/
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwip/sockets.h"

View File

@ -1,33 +0,0 @@
/**
* @file
* This file is a posix/stdc wrapper for lwip/errno.h.
*/
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#include "lwip/errno.h"

View File

@ -1,201 +0,0 @@
/**
* @file
* Application layered TCP connection API (to be used from TCPIP thread)\n
*
* This file contains the generic API.
* For more details see @ref altcp_api.
*/
/*
* Copyright (c) 2017 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*
*/
#ifndef LWIP_HDR_ALTCP_H
#define LWIP_HDR_ALTCP_H
#include "lwip/opt.h"
#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
#include "lwip/tcpbase.h"
#include "lwip/err.h"
#include "lwip/pbuf.h"
#include "lwip/ip_addr.h"
#ifdef __cplusplus
extern "C" {
#endif
struct altcp_pcb;
struct altcp_functions;
typedef err_t (*altcp_accept_fn)(void *arg, struct altcp_pcb *new_conn, err_t err);
typedef err_t (*altcp_connected_fn)(void *arg, struct altcp_pcb *conn, err_t err);
typedef err_t (*altcp_recv_fn)(void *arg, struct altcp_pcb *conn, struct pbuf *p, err_t err);
typedef err_t (*altcp_sent_fn)(void *arg, struct altcp_pcb *conn, u16_t len);
typedef err_t (*altcp_poll_fn)(void *arg, struct altcp_pcb *conn);
typedef void (*altcp_err_fn)(void *arg, err_t err);
typedef struct altcp_pcb* (*altcp_new_fn)(void *arg, u8_t ip_type);
struct altcp_pcb {
const struct altcp_functions *fns;
struct altcp_pcb *inner_conn;
void *arg;
void *state;
/* application callbacks */
altcp_accept_fn accept;
altcp_connected_fn connected;
altcp_recv_fn recv;
altcp_sent_fn sent;
altcp_poll_fn poll;
altcp_err_fn err;
u8_t pollinterval;
};
/** @ingroup altcp */
typedef struct altcp_allocator_s {
/** Allocator function */
altcp_new_fn alloc;
/** Argument to allocator function */
void *arg;
} altcp_allocator_t;
struct altcp_pcb *altcp_new(altcp_allocator_t *allocator);
struct altcp_pcb *altcp_new_ip6(altcp_allocator_t *allocator);
struct altcp_pcb *altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type);
void altcp_arg(struct altcp_pcb *conn, void *arg);
void altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept);
void altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv);
void altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent);
void altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval);
void altcp_err(struct altcp_pcb *conn, altcp_err_fn err);
void altcp_recved(struct altcp_pcb *conn, u16_t len);
err_t altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port);
err_t altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected);
/* return conn for source code compatibility to tcp callback API only */
struct altcp_pcb *altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err);
#define altcp_listen_with_backlog(conn, backlog) altcp_listen_with_backlog_and_err(conn, backlog, NULL)
/** @ingroup altcp */
#define altcp_listen(conn) altcp_listen_with_backlog_and_err(conn, TCP_DEFAULT_LISTEN_BACKLOG, NULL)
void altcp_abort(struct altcp_pcb *conn);
err_t altcp_close(struct altcp_pcb *conn);
err_t altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx);
err_t altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags);
err_t altcp_output(struct altcp_pcb *conn);
u16_t altcp_mss(struct altcp_pcb *conn);
u16_t altcp_sndbuf(struct altcp_pcb *conn);
u16_t altcp_sndqueuelen(struct altcp_pcb *conn);
void altcp_nagle_disable(struct altcp_pcb *conn);
void altcp_nagle_enable(struct altcp_pcb *conn);
int altcp_nagle_disabled(struct altcp_pcb *conn);
void altcp_setprio(struct altcp_pcb *conn, u8_t prio);
err_t altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port);
ip_addr_t *altcp_get_ip(struct altcp_pcb *conn, int local);
u16_t altcp_get_port(struct altcp_pcb *conn, int local);
#ifdef LWIP_DEBUG
enum tcp_state altcp_dbg_get_tcp_state(struct altcp_pcb *conn);
#endif
#ifdef __cplusplus
}
#endif
#else /* LWIP_ALTCP */
/* ALTCP disabled, define everything to link against tcp callback API (e.g. to get a small non-ssl httpd) */
#include "lwip/tcp.h"
#define altcp_accept_fn tcp_accept_fn
#define altcp_connected_fn tcp_connected_fn
#define altcp_recv_fn tcp_recv_fn
#define altcp_sent_fn tcp_sent_fn
#define altcp_poll_fn tcp_poll_fn
#define altcp_err_fn tcp_err_fn
#define altcp_pcb tcp_pcb
#define altcp_tcp_new_ip_type tcp_new_ip_type
#define altcp_tcp_new tcp_new
#define altcp_tcp_new_ip6 tcp_new_ip6
#define altcp_new(allocator) tcp_new()
#define altcp_new_ip6(allocator) tcp_new_ip6()
#define altcp_new_ip_type(allocator, ip_type) tcp_new_ip_type(ip_type)
#define altcp_arg tcp_arg
#define altcp_accept tcp_accept
#define altcp_recv tcp_recv
#define altcp_sent tcp_sent
#define altcp_poll tcp_poll
#define altcp_err tcp_err
#define altcp_recved tcp_recved
#define altcp_bind tcp_bind
#define altcp_connect tcp_connect
#define altcp_listen_with_backlog_and_err tcp_listen_with_backlog_and_err
#define altcp_listen_with_backlog tcp_listen_with_backlog
#define altcp_listen tcp_listen
#define altcp_abort tcp_abort
#define altcp_close tcp_close
#define altcp_shutdown tcp_shutdown
#define altcp_write tcp_write
#define altcp_output tcp_output
#define altcp_mss tcp_mss
#define altcp_sndbuf tcp_sndbuf
#define altcp_sndqueuelen tcp_sndqueuelen
#define altcp_nagle_disable tcp_nagle_disable
#define altcp_nagle_enable tcp_nagle_enable
#define altcp_nagle_disabled tcp_nagle_disabled
#define altcp_setprio tcp_setprio
#define altcp_get_tcp_addrinfo tcp_get_tcp_addrinfo
#define altcp_get_ip(pcb, local) ((local) ? (&(pcb)->local_ip) : (&(pcb)->remote_ip))
#ifdef LWIP_DEBUG
#define altcp_dbg_get_tcp_state tcp_dbg_get_tcp_state
#endif
#endif /* LWIP_ALTCP */
#endif /* LWIP_HDR_ALTCP_H */

View File

@ -1,72 +0,0 @@
/**
* @file
* Application layered TCP connection API (to be used from TCPIP thread)\n
* This interface mimics the tcp callback API to the application while preventing
* direct linking (much like virtual functions).
* This way, an application can make use of other application layer protocols
* on top of TCP without knowing the details (e.g. TLS, proxy connection).
*
* This file contains the base implementation calling into tcp.
*/
/*
* Copyright (c) 2017 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*
*/
#ifndef LWIP_HDR_ALTCP_TCP_H
#define LWIP_HDR_ALTCP_TCP_H
#include "lwip/opt.h"
#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
#include "lwip/altcp.h"
#ifdef __cplusplus
extern "C" {
#endif
struct altcp_pcb *altcp_tcp_new_ip_type(u8_t ip_type);
#define altcp_tcp_new() altcp_tcp_new_ip_type(IPADDR_TYPE_V4)
#define altcp_tcp_new_ip6() altcp_tcp_new_ip_type(IPADDR_TYPE_V6)
struct altcp_pcb *altcp_tcp_alloc(void *arg, u8_t ip_type);
struct tcp_pcb;
struct altcp_pcb *altcp_tcp_wrap(struct tcp_pcb *tpcb);
#ifdef __cplusplus
}
#endif
#endif /* LWIP_ALTCP */
#endif /* LWIP_HDR_ALTCP_TCP_H */

View File

@ -1,117 +0,0 @@
/**
* @file
* Application layered TCP/TLS connection API (to be used from TCPIP thread)
*
* @defgroup altcp_tls TLS layer
* @ingroup altcp
* This file contains function prototypes for a TLS layer.
* A port to ARM mbedtls is provided in the apps/ tree
* (LWIP_ALTCP_TLS_MBEDTLS option).
*/
/*
* Copyright (c) 2017 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*
*/
#ifndef LWIP_HDR_ALTCP_TLS_H
#define LWIP_HDR_ALTCP_TLS_H
#include "lwip/opt.h"
#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
#if LWIP_ALTCP_TLS
#include "lwip/altcp.h"
#ifdef __cplusplus
extern "C" {
#endif
/** @ingroup altcp_tls
* ALTCP_TLS configuration handle, content depends on port (e.g. mbedtls)
*/
struct altcp_tls_config;
/** @ingroup altcp_tls
* Create an ALTCP_TLS server configuration handle
*/
struct altcp_tls_config *altcp_tls_create_config_server_privkey_cert(const u8_t *privkey, size_t privkey_len,
const u8_t *privkey_pass, size_t privkey_pass_len,
const u8_t *cert, size_t cert_len);
/** @ingroup altcp_tls
* Create an ALTCP_TLS client configuration handle
*/
struct altcp_tls_config *altcp_tls_create_config_client(const u8_t *cert, size_t cert_len);
/** @ingroup altcp_tls
* Create an ALTCP_TLS client configuration handle with two-way server/client authentication
*/
struct altcp_tls_config *altcp_tls_create_config_client_2wayauth(const u8_t *ca, size_t ca_len, const u8_t *privkey, size_t privkey_len,
const u8_t *privkey_pass, size_t privkey_pass_len,
const u8_t *cert, size_t cert_len);
/** @ingroup altcp_tls
* Free an ALTCP_TLS configuration handle
*/
void altcp_tls_free_config(struct altcp_tls_config *conf);
/** @ingroup altcp_tls
* Create new ALTCP_TLS layer wrapping an existing pcb as inner connection (e.g. TLS over TCP)
*/
struct altcp_pcb *altcp_tls_wrap(struct altcp_tls_config *config, struct altcp_pcb *inner_pcb);
/** @ingroup altcp_tls
* Create new ALTCP_TLS pcb and its inner tcp pcb
*/
struct altcp_pcb *altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type);
/** @ingroup altcp_tls
* Create new ALTCP_TLS layer pcb and its inner tcp pcb.
* Same as @ref altcp_tls_new but this allocator function fits to
* @ref altcp_allocator_t / @ref altcp_new.\n
'arg' must contain a struct altcp_tls_config *.
*/
struct altcp_pcb *altcp_tls_alloc(void *arg, u8_t ip_type);
/** @ingroup altcp_tls
* Return pointer to internal TLS context so application can tweak it.
* Real type depends on port (e.g. mbedtls)
*/
void *altcp_tls_context(struct altcp_pcb *conn);
#ifdef __cplusplus
}
#endif
#endif /* LWIP_ALTCP_TLS */
#endif /* LWIP_ALTCP */
#endif /* LWIP_HDR_ALTCP_TLS_H */

View File

@ -1,431 +0,0 @@
/**
* @file
* netconn API (to be used from non-TCPIP threads)
*/
/*
* Copyright (c) 2001-2004 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#ifndef LWIP_HDR_API_H
#define LWIP_HDR_API_H
#include "lwip/opt.h"
#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
/* Note: Netconn API is always available when sockets are enabled -
* sockets are implemented on top of them */
#include "lwip/arch.h"
#include "lwip/netbuf.h"
#include "lwip/sys.h"
#include "lwip/ip_addr.h"
#include "lwip/err.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Throughout this file, IP addresses and port numbers are expected to be in
* the same byte order as in the corresponding pcb.
*/
/* Flags for netconn_write (u8_t) */
#define NETCONN_NOFLAG 0x00
#define NETCONN_NOCOPY 0x00 /* Only for source code compatibility */
#define NETCONN_COPY 0x01
#define NETCONN_MORE 0x02
#define NETCONN_DONTBLOCK 0x04
#define NETCONN_NOAUTORCVD 0x08 /* prevent netconn_recv_data_tcp() from updating the tcp window - must be done manually via netconn_tcp_recvd() */
#define NETCONN_NOFIN 0x10 /* upper layer already received data, leave FIN in queue until called again */
/* Flags for struct netconn.flags (u8_t) */
/** This netconn had an error, don't block on recvmbox/acceptmbox any more */
#define NETCONN_FLAG_MBOXCLOSED 0x01
/** Should this netconn avoid blocking? */
#define NETCONN_FLAG_NON_BLOCKING 0x02
/** Was the last connect action a non-blocking one? */
#define NETCONN_FLAG_IN_NONBLOCKING_CONNECT 0x04
#if LWIP_NETCONN_FULLDUPLEX
/** The mbox of this netconn is being deallocated, don't use it anymore */
#define NETCONN_FLAG_MBOXINVALID 0x08
#endif /* LWIP_NETCONN_FULLDUPLEX */
/** If a nonblocking write has been rejected before, poll_tcp needs to
check if the netconn is writable again */
#define NETCONN_FLAG_CHECK_WRITESPACE 0x10
#if LWIP_IPV6
/** If this flag is set then only IPv6 communication is allowed on the
netconn. As per RFC#3493 this features defaults to OFF allowing
dual-stack usage by default. */
#define NETCONN_FLAG_IPV6_V6ONLY 0x20
#endif /* LWIP_IPV6 */
#if LWIP_NETBUF_RECVINFO
/** Received packet info will be recorded for this netconn */
#define NETCONN_FLAG_PKTINFO 0x40
#endif /* LWIP_NETBUF_RECVINFO */
/** A FIN has been received but not passed to the application yet */
#define NETCONN_FIN_RX_PENDING 0x80
/* Helpers to process several netconn_types by the same code */
#define NETCONNTYPE_GROUP(t) ((t)&0xF0)
#define NETCONNTYPE_DATAGRAM(t) ((t)&0xE0)
#if LWIP_IPV6
#define NETCONN_TYPE_IPV6 0x08
#define NETCONNTYPE_ISIPV6(t) (((t)&NETCONN_TYPE_IPV6) != 0)
#define NETCONNTYPE_ISUDPLITE(t) (((t)&0xF3) == NETCONN_UDPLITE)
#define NETCONNTYPE_ISUDPNOCHKSUM(t) (((t)&0xF3) == NETCONN_UDPNOCHKSUM)
#else /* LWIP_IPV6 */
#define NETCONNTYPE_ISIPV6(t) (0)
#define NETCONNTYPE_ISUDPLITE(t) ((t) == NETCONN_UDPLITE)
#define NETCONNTYPE_ISUDPNOCHKSUM(t) ((t) == NETCONN_UDPNOCHKSUM)
#endif /* LWIP_IPV6 */
/** @ingroup netconn_common
* Protocol family and type of the netconn
*/
enum netconn_type {
NETCONN_INVALID = 0,
/** TCP IPv4 */
NETCONN_TCP = 0x10,
#if LWIP_IPV6
/** TCP IPv6 */
NETCONN_TCP_IPV6 = NETCONN_TCP | NETCONN_TYPE_IPV6 /* 0x18 */,
#endif /* LWIP_IPV6 */
/** UDP IPv4 */
NETCONN_UDP = 0x20,
/** UDP IPv4 lite */
NETCONN_UDPLITE = 0x21,
/** UDP IPv4 no checksum */
NETCONN_UDPNOCHKSUM = 0x22,
#if LWIP_IPV6
/** UDP IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */
NETCONN_UDP_IPV6 = NETCONN_UDP | NETCONN_TYPE_IPV6 /* 0x28 */,
/** UDP IPv6 lite (dual-stack by default, unless you call @ref netconn_set_ipv6only) */
NETCONN_UDPLITE_IPV6 = NETCONN_UDPLITE | NETCONN_TYPE_IPV6 /* 0x29 */,
/** UDP IPv6 no checksum (dual-stack by default, unless you call @ref netconn_set_ipv6only) */
NETCONN_UDPNOCHKSUM_IPV6 = NETCONN_UDPNOCHKSUM | NETCONN_TYPE_IPV6 /* 0x2a */,
#endif /* LWIP_IPV6 */
/** Raw connection IPv4 */
NETCONN_RAW = 0x40
#if LWIP_IPV6
/** Raw connection IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */
, NETCONN_RAW_IPV6 = NETCONN_RAW | NETCONN_TYPE_IPV6 /* 0x48 */
#endif /* LWIP_IPV6 */
};
/** Current state of the netconn. Non-TCP netconns are always
* in state NETCONN_NONE! */
enum netconn_state {
NETCONN_NONE,
NETCONN_WRITE,
NETCONN_LISTEN,
NETCONN_CONNECT,
NETCONN_CLOSE
};
/** Used to inform the callback function about changes
*
* Event explanation:
*
* In the netconn implementation, there are three ways to block a client:
*
* - accept mbox (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); in netconn_accept())
* - receive mbox (sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); in netconn_recv_data())
* - send queue is full (sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); in lwip_netconn_do_write())
*
* The events have to be seen as events signaling the state of these mboxes/semaphores. For non-blocking
* connections, you need to know in advance whether a call to a netconn function call would block or not,
* and these events tell you about that.
*
* RCVPLUS events say: Safe to perform a potentially blocking call call once more.
* They are counted in sockets - three RCVPLUS events for accept mbox means you are safe
* to call netconn_accept 3 times without being blocked.
* Same thing for receive mbox.
*
* RCVMINUS events say: Your call to to a possibly blocking function is "acknowledged".
* Socket implementation decrements the counter.
*
* For TX, there is no need to count, its merely a flag. SENDPLUS means you may send something.
* SENDPLUS occurs when enough data was delivered to peer so netconn_send() can be called again.
* A SENDMINUS event occurs when the next call to a netconn_send() would be blocking.
*/
enum netconn_evt {
NETCONN_EVT_RCVPLUS,
NETCONN_EVT_RCVMINUS,
NETCONN_EVT_SENDPLUS,
NETCONN_EVT_SENDMINUS,
NETCONN_EVT_ERROR
};
#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD)
/** Used for netconn_join_leave_group() */
enum netconn_igmp {
NETCONN_JOIN,
NETCONN_LEAVE
};
#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */
#if LWIP_DNS
/* Used for netconn_gethostbyname_addrtype(), these should match the DNS_ADDRTYPE defines in dns.h */
#define NETCONN_DNS_DEFAULT NETCONN_DNS_IPV4_IPV6
#define NETCONN_DNS_IPV4 0
#define NETCONN_DNS_IPV6 1
#define NETCONN_DNS_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */
#define NETCONN_DNS_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */
#endif /* LWIP_DNS */
/* forward-declare some structs to avoid to include their headers */
struct ip_pcb;
struct tcp_pcb;
struct udp_pcb;
struct raw_pcb;
struct netconn;
struct api_msg;
/** A callback prototype to inform about events for a netconn */
typedef void (* netconn_callback)(struct netconn *, enum netconn_evt, u16_t len);
/** A netconn descriptor */
struct netconn {
/** type of the netconn (TCP, UDP or RAW) */
enum netconn_type type;
/** current state of the netconn */
enum netconn_state state;
/** the lwIP internal protocol control block */
union {
struct ip_pcb *ip;
struct tcp_pcb *tcp;
struct udp_pcb *udp;
struct raw_pcb *raw;
} pcb;
/** the last asynchronous unreported error this netconn had */
err_t pending_err;
#if !LWIP_NETCONN_SEM_PER_THREAD
/** sem that is used to synchronously execute functions in the core context */
sys_sem_t op_completed;
#endif
/** mbox where received packets are stored until they are fetched
by the netconn application thread (can grow quite big) */
sys_mbox_t recvmbox;
#if LWIP_TCP
/** mbox where new connections are stored until processed
by the application thread */
sys_mbox_t acceptmbox;
#endif /* LWIP_TCP */
#if LWIP_NETCONN_FULLDUPLEX
/** number of threads waiting on an mbox. This is required to unblock
all threads when closing while threads are waiting. */
int mbox_threads_waiting;
#endif
/** only used for socket layer */
#if LWIP_SOCKET
int socket;
#endif /* LWIP_SOCKET */
#if LWIP_SO_SNDTIMEO
/** timeout to wait for sending data (which means enqueueing data for sending
in internal buffers) in milliseconds */
s32_t send_timeout;
#endif /* LWIP_SO_RCVTIMEO */
#if LWIP_SO_RCVTIMEO
/** timeout in milliseconds to wait for new data to be received
(or connections to arrive for listening netconns) */
u32_t recv_timeout;
#endif /* LWIP_SO_RCVTIMEO */
#if LWIP_SO_RCVBUF
/** maximum amount of bytes queued in recvmbox
not used for TCP: adjust TCP_WND instead! */
int recv_bufsize;
/** number of bytes currently in recvmbox to be received,
tested against recv_bufsize to limit bytes on recvmbox
for UDP and RAW, used for FIONREAD */
int recv_avail;
#endif /* LWIP_SO_RCVBUF */
#if LWIP_SO_LINGER
/** values <0 mean linger is disabled, values > 0 are seconds to linger */
s16_t linger;
#endif /* LWIP_SO_LINGER */
/** flags holding more netconn-internal state, see NETCONN_FLAG_* defines */
u8_t flags;
#if LWIP_TCP
/** TCP: when data passed to netconn_write doesn't fit into the send buffer,
this temporarily stores the message.
Also used during connect and close. */
struct api_msg *current_msg;
#endif /* LWIP_TCP */
/** A callback function that is informed about events for this netconn */
netconn_callback callback;
};
/** This vector type is passed to @ref netconn_write_vectors_partly to send
* multiple buffers at once.
* ATTENTION: This type has to directly map struct iovec since one is casted
* into the other!
*/
struct netvector {
/** pointer to the application buffer that contains the data to send */
const void *ptr;
/** size of the application data to send */
size_t len;
};
/** Register an Network connection event */
#define API_EVENT(c,e,l) if (c->callback) { \
(*c->callback)(c, e, l); \
}
/* Network connection functions: */
/** @ingroup netconn_common
* Create new netconn connection
* @param t @ref netconn_type */
#define netconn_new(t) netconn_new_with_proto_and_callback(t, 0, NULL)
#define netconn_new_with_callback(t, c) netconn_new_with_proto_and_callback(t, 0, c)
struct netconn *netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto,
netconn_callback callback);
err_t netconn_prepare_delete(struct netconn *conn);
err_t netconn_delete(struct netconn *conn);
/** Get the type of a netconn (as enum netconn_type). */
#define netconn_type(conn) (conn->type)
err_t netconn_getaddr(struct netconn *conn, ip_addr_t *addr,
u16_t *port, u8_t local);
/** @ingroup netconn_common */
#define netconn_peer(c,i,p) netconn_getaddr(c,i,p,0)
/** @ingroup netconn_common */
#define netconn_addr(c,i,p) netconn_getaddr(c,i,p,1)
err_t netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port);
err_t netconn_bind_if(struct netconn *conn, u8_t if_idx);
err_t netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port);
err_t netconn_disconnect (struct netconn *conn);
err_t netconn_listen_with_backlog(struct netconn *conn, u8_t backlog);
/** @ingroup netconn_tcp */
#define netconn_listen(conn) netconn_listen_with_backlog(conn, TCP_DEFAULT_LISTEN_BACKLOG)
err_t netconn_accept(struct netconn *conn, struct netconn **new_conn);
err_t netconn_recv(struct netconn *conn, struct netbuf **new_buf);
err_t netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf);
err_t netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags);
err_t netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf);
err_t netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags);
err_t netconn_tcp_recvd(struct netconn *conn, size_t len);
err_t netconn_sendto(struct netconn *conn, struct netbuf *buf,
const ip_addr_t *addr, u16_t port);
err_t netconn_send(struct netconn *conn, struct netbuf *buf);
err_t netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size,
u8_t apiflags, size_t *bytes_written);
err_t netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt,
u8_t apiflags, size_t *bytes_written);
/** @ingroup netconn_tcp */
#define netconn_write(conn, dataptr, size, apiflags) \
netconn_write_partly(conn, dataptr, size, apiflags, NULL)
err_t netconn_close(struct netconn *conn);
err_t netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx);
#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD)
err_t netconn_join_leave_group(struct netconn *conn, const ip_addr_t *multiaddr,
const ip_addr_t *netif_addr, enum netconn_igmp join_or_leave);
err_t netconn_join_leave_group_netif(struct netconn *conn, const ip_addr_t *multiaddr,
u8_t if_idx, enum netconn_igmp join_or_leave);
#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */
#if LWIP_DNS
#if LWIP_IPV4 && LWIP_IPV6
err_t netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype);
#define netconn_gethostbyname(name, addr) netconn_gethostbyname_addrtype(name, addr, NETCONN_DNS_DEFAULT)
#else /* LWIP_IPV4 && LWIP_IPV6 */
err_t netconn_gethostbyname(const char *name, ip_addr_t *addr);
#define netconn_gethostbyname_addrtype(name, addr, dns_addrtype) netconn_gethostbyname(name, addr)
#endif /* LWIP_IPV4 && LWIP_IPV6 */
#endif /* LWIP_DNS */
err_t netconn_err(struct netconn *conn);
#define netconn_recv_bufsize(conn) ((conn)->recv_bufsize)
#define netconn_set_flags(conn, set_flags) do { (conn)->flags = (u8_t)((conn)->flags | (set_flags)); } while(0)
#define netconn_clear_flags(conn, clr_flags) do { (conn)->flags = (u8_t)((conn)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0)
#define netconn_is_flag_set(conn, flag) (((conn)->flags & (flag)) != 0)
/** Set the blocking status of netconn calls (@todo: write/send is missing) */
#define netconn_set_nonblocking(conn, val) do { if(val) { \
netconn_set_flags(conn, NETCONN_FLAG_NON_BLOCKING); \
} else { \
netconn_clear_flags(conn, NETCONN_FLAG_NON_BLOCKING); }} while(0)
/** Get the blocking status of netconn calls (@todo: write/send is missing) */
#define netconn_is_nonblocking(conn) (((conn)->flags & NETCONN_FLAG_NON_BLOCKING) != 0)
#if LWIP_IPV6
/** @ingroup netconn_common
* TCP: Set the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY)
*/
#define netconn_set_ipv6only(conn, val) do { if(val) { \
netconn_set_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); \
} else { \
netconn_clear_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); }} while(0)
/** @ingroup netconn_common
* TCP: Get the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY)
*/
#define netconn_get_ipv6only(conn) (((conn)->flags & NETCONN_FLAG_IPV6_V6ONLY) != 0)
#endif /* LWIP_IPV6 */
#if LWIP_SO_SNDTIMEO
/** Set the send timeout in milliseconds */
#define netconn_set_sendtimeout(conn, timeout) ((conn)->send_timeout = (timeout))
/** Get the send timeout in milliseconds */
#define netconn_get_sendtimeout(conn) ((conn)->send_timeout)
#endif /* LWIP_SO_SNDTIMEO */
#if LWIP_SO_RCVTIMEO
/** Set the receive timeout in milliseconds */
#define netconn_set_recvtimeout(conn, timeout) ((conn)->recv_timeout = (timeout))
/** Get the receive timeout in milliseconds */
#define netconn_get_recvtimeout(conn) ((conn)->recv_timeout)
#endif /* LWIP_SO_RCVTIMEO */
#if LWIP_SO_RCVBUF
/** Set the receive buffer in bytes */
#define netconn_set_recvbufsize(conn, recvbufsize) ((conn)->recv_bufsize = (recvbufsize))
/** Get the receive buffer in bytes */
#define netconn_get_recvbufsize(conn) ((conn)->recv_bufsize)
#endif /* LWIP_SO_RCVBUF*/
#if LWIP_NETCONN_SEM_PER_THREAD
void netconn_thread_init(void);
void netconn_thread_cleanup(void);
#else /* LWIP_NETCONN_SEM_PER_THREAD */
#define netconn_thread_init()
#define netconn_thread_cleanup()
#endif /* LWIP_NETCONN_SEM_PER_THREAD */
#ifdef __cplusplus
}
#endif
#endif /* LWIP_NETCONN || LWIP_SOCKET */
#endif /* LWIP_HDR_API_H */

View File

@ -1,79 +0,0 @@
/**
* @file
* Application layered TCP connection API that executes a proxy-connect.
*
* This file provides a starting layer that executes a proxy-connect e.g. to
* set up TLS connections through a http proxy.
*/
/*
* Copyright (c) 2018 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*
*/
#ifndef LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H
#define LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H
#include "lwip/opt.h"
#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
#include "lwip/ip_addr.h"
#ifdef __cplusplus
extern "C" {
#endif
struct altcp_proxyconnect_config {
ip_addr_t proxy_addr;
u16_t proxy_port;
};
struct altcp_pcb *altcp_proxyconnect_new(struct altcp_proxyconnect_config *config, struct altcp_pcb *inner_pcb);
struct altcp_pcb *altcp_proxyconnect_new_tcp(struct altcp_proxyconnect_config *config, u8_t ip_type);
struct altcp_pcb *altcp_proxyconnect_alloc(void *arg, u8_t ip_type);
#if LWIP_ALTCP_TLS
struct altcp_proxyconnect_tls_config {
struct altcp_proxyconnect_config proxy;
struct altcp_tls_config *tls_config;
};
struct altcp_pcb *altcp_proxyconnect_tls_alloc(void *arg, u8_t ip_type);
#endif /* LWIP_ALTCP_TLS */
#ifdef __cplusplus
}
#endif
#endif /* LWIP_ALTCP */
#endif /* LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H */

View File

@ -1,67 +0,0 @@
/**
* @file
* Application layered TCP/TLS connection API (to be used from TCPIP thread)
*
* This file contains options for an mbedtls port of the TLS layer.
*/
/*
* Copyright (c) 2017 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*
*/
#ifndef LWIP_HDR_ALTCP_TLS_OPTS_H
#define LWIP_HDR_ALTCP_TLS_OPTS_H
#include "lwip/opt.h"
#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */
/** LWIP_ALTCP_TLS_MBEDTLS==1: use mbedTLS for TLS support for altcp API
* mbedtls include directory must be reachable via include search path
*/
#ifndef LWIP_ALTCP_TLS_MBEDTLS
#define LWIP_ALTCP_TLS_MBEDTLS 0
#endif
/** Configure debug level of this file */
#ifndef ALTCP_MBEDTLS_DEBUG
#define ALTCP_MBEDTLS_DEBUG LWIP_DBG_OFF
#endif
/** Set a session timeout in seconds for the basic session cache
* ATTENTION: Using a session cache can lower security by reusing keys!
*/
#ifndef ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS
#define ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS 0
#endif
#endif /* LWIP_ALTCP */
#endif /* LWIP_HDR_ALTCP_TLS_OPTS_H */

View File

@ -1,126 +0,0 @@
/*
* Copyright (c) 2001-2003 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
#ifndef LWIP_HDR_APPS_FS_H
#define LWIP_HDR_APPS_FS_H
#include "httpd_opts.h"
#include "lwip/err.h"
#ifdef __cplusplus
extern "C" {
#endif
#define FS_READ_EOF -1
#define FS_READ_DELAYED -2
#if HTTPD_PRECALCULATED_CHECKSUM
struct fsdata_chksum {
u32_t offset;
u16_t chksum;
u16_t len;
};
#endif /* HTTPD_PRECALCULATED_CHECKSUM */
#define FS_FILE_FLAGS_HEADER_INCLUDED 0x01
#define FS_FILE_FLAGS_HEADER_PERSISTENT 0x02
#define FS_FILE_FLAGS_HEADER_HTTPVER_1_1 0x04
#define FS_FILE_FLAGS_SSI 0x08
/** Define FS_FILE_EXTENSION_T_DEFINED if you have typedef'ed to your private
* pointer type (defaults to 'void' so the default usage is 'void*')
*/
#ifndef FS_FILE_EXTENSION_T_DEFINED
typedef void fs_file_extension;
#endif
struct fs_file {
const char *data;
int len;
int index;
/* pextension is free for implementations to hold private (extensional)
arbitrary data, e.g. holding some file state or file system handle */
fs_file_extension *pextension;
#if HTTPD_PRECALCULATED_CHECKSUM
const struct fsdata_chksum *chksum;
u16_t chksum_count;
#endif /* HTTPD_PRECALCULATED_CHECKSUM */
u8_t flags;
#if LWIP_HTTPD_CUSTOM_FILES
u8_t is_custom_file;
#endif /* LWIP_HTTPD_CUSTOM_FILES */
#if LWIP_HTTPD_FILE_STATE
void *state;
#endif /* LWIP_HTTPD_FILE_STATE */
};
#if LWIP_HTTPD_FS_ASYNC_READ
typedef void (*fs_wait_cb)(void *arg);
#endif /* LWIP_HTTPD_FS_ASYNC_READ */
err_t fs_open(struct fs_file *file, const char *name);
void fs_close(struct fs_file *file);
#if LWIP_HTTPD_DYNAMIC_FILE_READ
#if LWIP_HTTPD_FS_ASYNC_READ
int fs_read_async(struct fs_file *file, char *buffer, int count, fs_wait_cb callback_fn, void *callback_arg);
#else /* LWIP_HTTPD_FS_ASYNC_READ */
int fs_read(struct fs_file *file, char *buffer, int count);
#endif /* LWIP_HTTPD_FS_ASYNC_READ */
#endif /* LWIP_HTTPD_DYNAMIC_FILE_READ */
#if LWIP_HTTPD_FS_ASYNC_READ
int fs_is_file_ready(struct fs_file *file, fs_wait_cb callback_fn, void *callback_arg);
#endif /* LWIP_HTTPD_FS_ASYNC_READ */
int fs_bytes_left(struct fs_file *file);
#if LWIP_HTTPD_FILE_STATE
/** This user-defined function is called when a file is opened. */
void *fs_state_init(struct fs_file *file, const char *name);
/** This user-defined function is called when a file is closed. */
void fs_state_free(struct fs_file *file, void *state);
#endif /* #if LWIP_HTTPD_FILE_STATE */
struct fsdata_file {
const struct fsdata_file *next;
const unsigned char *name;
const unsigned char *data;
int len;
u8_t flags;
#if HTTPD_PRECALCULATED_CHECKSUM
u16_t chksum_count;
const struct fsdata_chksum *chksum;
#endif /* HTTPD_PRECALCULATED_CHECKSUM */
};
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_APPS_FS_H */

View File

@ -1,160 +0,0 @@
/**
* @file
* HTTP client
*/
/*
* Copyright (c) 2018 Simon Goldschmidt <goldsimon@gmx.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt <goldsimon@gmx.de>
*
*/
#ifndef LWIP_HDR_APPS_HTTP_CLIENT_H
#define LWIP_HDR_APPS_HTTP_CLIENT_H
#include "lwip/opt.h"
#include "lwip/ip_addr.h"
#include "lwip/err.h"
#include "lwip/altcp.h"
#include "lwip/prot/iana.h"
#include "lwip/pbuf.h"
#if LWIP_TCP && LWIP_CALLBACK_API
#ifdef __cplusplus
extern "C" {
#endif
/**
* @ingroup httpc
* HTTPC_HAVE_FILE_IO: define this to 1 to have functions dowloading directly
* to disk via fopen/fwrite.
* These functions are example implementations of the interface only.
*/
#ifndef LWIP_HTTPC_HAVE_FILE_IO
#define LWIP_HTTPC_HAVE_FILE_IO 0
#endif
/**
* @ingroup httpc
* The default TCP port used for HTTP
*/
#define HTTP_DEFAULT_PORT LWIP_IANA_PORT_HTTP
/**
* @ingroup httpc
* HTTP client result codes
*/
typedef enum ehttpc_result {
/** File successfully received */
HTTPC_RESULT_OK = 0,
/** Unknown error */
HTTPC_RESULT_ERR_UNKNOWN = 1,
/** Connection to server failed */
HTTPC_RESULT_ERR_CONNECT = 2,
/** Failed to resolve server hostname */
HTTPC_RESULT_ERR_HOSTNAME = 3,
/** Connection unexpectedly closed by remote server */
HTTPC_RESULT_ERR_CLOSED = 4,
/** Connection timed out (server didn't respond in time) */
HTTPC_RESULT_ERR_TIMEOUT = 5,
/** Server responded with an error code */
HTTPC_RESULT_ERR_SVR_RESP = 6,
/** Local memory error */
HTTPC_RESULT_ERR_MEM = 7,
/** Local abort */
HTTPC_RESULT_LOCAL_ABORT = 8,
/** Content length mismatch */
HTTPC_RESULT_ERR_CONTENT_LEN = 9
} httpc_result_t;
typedef struct _httpc_state httpc_state_t;
/**
* @ingroup httpc
* Prototype of a http client callback function
*
* @param arg argument specified when initiating the request
* @param httpc_result result of the http transfer (see enum httpc_result_t)
* @param rx_content_len number of bytes received (without headers)
* @param srv_res this contains the http status code received (if any)
* @param err an error returned by internal lwip functions, can help to specify
* the source of the error but must not necessarily be != ERR_OK
*/
typedef void (*httpc_result_fn)(void *arg, httpc_result_t httpc_result, u32_t rx_content_len, u32_t srv_res, err_t err);
/**
* @ingroup httpc
* Prototype of http client callback: called when the headers are received
*
* @param connection http client connection
* @param arg argument specified when initiating the request
* @param hdr header pbuf(s) (may contain data also)
* @param hdr_len length of the heders in 'hdr'
* @param content_len content length as received in the headers (-1 if not received)
* @return if != ERR_OK is returned, the connection is aborted
*/
typedef err_t (*httpc_headers_done_fn)(httpc_state_t *connection, void *arg, struct pbuf *hdr, u16_t hdr_len, u32_t content_len);
typedef struct _httpc_connection {
ip_addr_t proxy_addr;
u16_t proxy_port;
u8_t use_proxy;
/* @todo: add username:pass? */
#if LWIP_ALTCP
altcp_allocator_t *altcp_allocator;
#endif
/* this callback is called when the transfer is finished (or aborted) */
httpc_result_fn result_fn;
/* this callback is called after receiving the http headers
It can abort the connection by returning != ERR_OK */
httpc_headers_done_fn headers_done_fn;
} httpc_connection_t;
err_t httpc_get_file(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings,
altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection);
err_t httpc_get_file_dns(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings,
altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection);
#if LWIP_HTTPC_HAVE_FILE_IO
err_t httpc_get_file_to_disk(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings,
void* callback_arg, const char* local_file_name, httpc_state_t **connection);
err_t httpc_get_file_dns_to_disk(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings,
void* callback_arg, const char* local_file_name, httpc_state_t **connection);
#endif /* LWIP_HTTPC_HAVE_FILE_IO */
#ifdef __cplusplus
}
#endif
#endif /* LWIP_TCP && LWIP_CALLBACK_API */
#endif /* LWIP_HDR_APPS_HTTP_CLIENT_H */

View File

@ -1,255 +0,0 @@
/**
* @file
* HTTP server
*/
/*
* Copyright (c) 2001-2003 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
* This version of the file has been modified by Texas Instruments to offer
* simple server-side-include (SSI) and Common Gateway Interface (CGI)
* capability.
*/
#ifndef LWIP_HDR_APPS_HTTPD_H
#define LWIP_HDR_APPS_HTTPD_H
#include "httpd_opts.h"
#include "lwip/err.h"
#include "lwip/pbuf.h"
#ifdef __cplusplus
extern "C" {
#endif
#if LWIP_HTTPD_CGI
/**
* @ingroup httpd
* Function pointer for a CGI script handler.
*
* This function is called each time the HTTPD server is asked for a file
* whose name was previously registered as a CGI function using a call to
* http_set_cgi_handlers. The iIndex parameter provides the index of the
* CGI within the cgis array passed to http_set_cgi_handlers. Parameters
* pcParam and pcValue provide access to the parameters provided along with
* the URI. iNumParams provides a count of the entries in the pcParam and
* pcValue arrays. Each entry in the pcParam array contains the name of a
* parameter with the corresponding entry in the pcValue array containing the
* value for that parameter. Note that pcParam may contain multiple elements
* with the same name if, for example, a multi-selection list control is used
* in the form generating the data.
*
* The function should return a pointer to a character string which is the
* path and filename of the response that is to be sent to the connected
* browser, for example "/thanks.htm" or "/response/error.ssi".
*
* The maximum number of parameters that will be passed to this function via
* iNumParams is defined by LWIP_HTTPD_MAX_CGI_PARAMETERS. Any parameters in
* the incoming HTTP request above this number will be discarded.
*
* Requests intended for use by this CGI mechanism must be sent using the GET
* method (which encodes all parameters within the URI rather than in a block
* later in the request). Attempts to use the POST method will result in the
* request being ignored.
*
*/
typedef const char *(*tCGIHandler)(int iIndex, int iNumParams, char *pcParam[],
char *pcValue[]);
/**
* @ingroup httpd
* Structure defining the base filename (URL) of a CGI and the associated
* function which is to be called when that URL is requested.
*/
typedef struct
{
const char *pcCGIName;
tCGIHandler pfnCGIHandler;
} tCGI;
void http_set_cgi_handlers(const tCGI *pCGIs, int iNumHandlers);
#endif /* LWIP_HTTPD_CGI */
#if LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI
#if LWIP_HTTPD_CGI_SSI
/* we have to prototype this struct here to make it available for the handler */
struct fs_file;
/** Define this generic CGI handler in your application.
* It is called once for every URI with parameters.
* The parameters can be stored to the object passed as connection_state, which
* is allocated to file->state via fs_state_init() from fs_open() or fs_open_custom().
* Content creation via SSI or complete dynamic files can retrieve the CGI params from there.
*/
extern void httpd_cgi_handler(struct fs_file *file, const char* uri, int iNumParams,
char **pcParam, char **pcValue
#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE
, void *connection_state
#endif /* LWIP_HTTPD_FILE_STATE */
);
#endif /* LWIP_HTTPD_CGI_SSI */
#endif /* LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI */
#if LWIP_HTTPD_SSI
/**
* @ingroup httpd
* Function pointer for the SSI tag handler callback.
*
* This function will be called each time the HTTPD server detects a tag of the
* form <!--#name--> in files with extensions mentioned in the g_pcSSIExtensions
* array (currently .shtml, .shtm, .ssi, .xml, .json) where "name" appears as
* one of the tags supplied to http_set_ssi_handler in the tags array. The
* returned insert string, which will be appended after the the string
* "<!--#name-->" in file sent back to the client, should be written to pointer
* pcInsert. iInsertLen contains the size of the buffer pointed to by
* pcInsert. The iIndex parameter provides the zero-based index of the tag as
* found in the tags array and identifies the tag that is to be processed.
*
* The handler returns the number of characters written to pcInsert excluding
* any terminating NULL or HTTPD_SSI_TAG_UNKNOWN when tag is not recognized.
*
* Note that the behavior of this SSI mechanism is somewhat different from the
* "normal" SSI processing as found in, for example, the Apache web server. In
* this case, the inserted text is appended following the SSI tag rather than
* replacing the tag entirely. This allows for an implementation that does not
* require significant additional buffering of output data yet which will still
* offer usable SSI functionality. One downside to this approach is when
* attempting to use SSI within JavaScript. The SSI tag is structured to
* resemble an HTML comment but this syntax does not constitute a comment
* within JavaScript and, hence, leaving the tag in place will result in
* problems in these cases. In order to avoid these problems, define
* LWIP_HTTPD_SSI_INCLUDE_TAG as zero in your lwip options file, or use JavaScript
* style block comments in the form / * # name * / (without the spaces).
*/
typedef u16_t (*tSSIHandler)(
#if LWIP_HTTPD_SSI_RAW
const char* ssi_tag_name,
#else /* LWIP_HTTPD_SSI_RAW */
int iIndex,
#endif /* LWIP_HTTPD_SSI_RAW */
char *pcInsert, int iInsertLen
#if LWIP_HTTPD_SSI_MULTIPART
, u16_t current_tag_part, u16_t *next_tag_part
#endif /* LWIP_HTTPD_SSI_MULTIPART */
#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE
, void *connection_state
#endif /* LWIP_HTTPD_FILE_STATE */
);
/** Set the SSI handler function
* (if LWIP_HTTPD_SSI_RAW==1, only the first argument is used)
*/
void http_set_ssi_handler(tSSIHandler pfnSSIHandler,
const char **ppcTags, int iNumTags);
/** For LWIP_HTTPD_SSI_RAW==1, return this to indicate the tag is unknown.
* In this case, the webserver writes a warning into the page.
* You can also just return 0 to write nothing for unknown tags.
*/
#define HTTPD_SSI_TAG_UNKNOWN 0xFFFF
#endif /* LWIP_HTTPD_SSI */
#if LWIP_HTTPD_SUPPORT_POST
/* These functions must be implemented by the application */
/**
* @ingroup httpd
* Called when a POST request has been received. The application can decide
* whether to accept it or not.
*
* @param connection Unique connection identifier, valid until httpd_post_end
* is called.
* @param uri The HTTP header URI receiving the POST request.
* @param http_request The raw HTTP request (the first packet, normally).
* @param http_request_len Size of 'http_request'.
* @param content_len Content-Length from HTTP header.
* @param response_uri Filename of response file, to be filled when denying the
* request
* @param response_uri_len Size of the 'response_uri' buffer.
* @param post_auto_wnd Set this to 0 to let the callback code handle window
* updates by calling 'httpd_post_data_recved' (to throttle rx speed)
* default is 1 (httpd handles window updates automatically)
* @return ERR_OK: Accept the POST request, data may be passed in
* another err_t: Deny the POST request, send back 'bad request'.
*/
err_t httpd_post_begin(void *connection, const char *uri, const char *http_request,
u16_t http_request_len, int content_len, char *response_uri,
u16_t response_uri_len, u8_t *post_auto_wnd);
/**
* @ingroup httpd
* Called for each pbuf of data that has been received for a POST.
* ATTENTION: The application is responsible for freeing the pbufs passed in!
*
* @param connection Unique connection identifier.
* @param p Received data.
* @return ERR_OK: Data accepted.
* another err_t: Data denied, http_post_get_response_uri will be called.
*/
err_t httpd_post_receive_data(void *connection, struct pbuf *p);
/**
* @ingroup httpd
* Called when all data is received or when the connection is closed.
* The application must return the filename/URI of a file to send in response
* to this POST request. If the response_uri buffer is untouched, a 404
* response is returned.
*
* @param connection Unique connection identifier.
* @param response_uri Filename of response file, to be filled when denying the request
* @param response_uri_len Size of the 'response_uri' buffer.
*/
void httpd_post_finished(void *connection, char *response_uri, u16_t response_uri_len);
#if LWIP_HTTPD_POST_MANUAL_WND
void httpd_post_data_recved(void *connection, u16_t recved_len);
#endif /* LWIP_HTTPD_POST_MANUAL_WND */
#endif /* LWIP_HTTPD_SUPPORT_POST */
void httpd_init(void);
#if HTTPD_ENABLE_HTTPS
struct altcp_tls_config;
void httpd_inits(struct altcp_tls_config *conf);
#endif
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_APPS_HTTPD_H */

View File

@ -1,396 +0,0 @@
/**
* @file
* HTTP server options list
*/
/*
* Copyright (c) 2001-2003 Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Adam Dunkels <adam@sics.se>
*
* This version of the file has been modified by Texas Instruments to offer
* simple server-side-include (SSI) and Common Gateway Interface (CGI)
* capability.
*/
#ifndef LWIP_HDR_APPS_HTTPD_OPTS_H
#define LWIP_HDR_APPS_HTTPD_OPTS_H
#include "lwip/opt.h"
#include "lwip/prot/iana.h"
/**
* @defgroup httpd_opts Options
* @ingroup httpd
* @{
*/
/** Set this to 1 to support CGI (old style).
*
* This old style CGI support works by registering an array of URLs and
* associated CGI handler functions (@ref http_set_cgi_handlers).
* This list is scanned just before fs_open is called from request handling.
* The handler can return a new URL that is used internally by the httpd to
* load the returned page (passed to fs_open).
*
* Use this CGI type e.g. to execute specific actions and return a page that
* does not depend on the CGI parameters.
*/
#if !defined LWIP_HTTPD_CGI || defined __DOXYGEN__
#define LWIP_HTTPD_CGI 0
#endif
/** Set this to 1 to support CGI (new style).
*
* This new style CGI support works by calling a global function
* (@ref tCGIHandler) for all URLs that are found. fs_open is called first
* and the URL can not be written by the CGI handler. Instead, this handler gets
* passed the http file state, an object where it can store information derived
* from the CGI URL or parameters. This file state is later passed to SSI, so
* the SSI code can return data depending on CGI input.
*
* Use this CGI handler if you want CGI information passed on to SSI.
*/
#if !defined LWIP_HTTPD_CGI_SSI || defined __DOXYGEN__
#define LWIP_HTTPD_CGI_SSI 0
#endif
/** Set this to 1 to support SSI (Server-Side-Includes)
*
* In contrast to other http servers, this only calls a preregistered callback
* function (@see http_set_ssi_handler) for each tag (in the format of
* <!--#tag-->) encountered in SSI-enabled pages.
* SSI-enabled pages must have one of the predefined SSI-enabled file extensions.
* All files with one of these extensions are parsed when sent.
*
* A downside of the current SSI implementation is that persistent connections
* don't work, as the file length is not known in advance (and httpd currently
* relies on the Content-Length header for persistent connections).
*
* To save memory, the maximum tag length is limited (@see LWIP_HTTPD_MAX_TAG_NAME_LEN).
* To save memory, the maximum insertion string length is limited (@see
* LWIP_HTTPD_MAX_TAG_INSERT_LEN). If this is not enought, @ref LWIP_HTTPD_SSI_MULTIPART
* can be used.
*/
#if !defined LWIP_HTTPD_SSI || defined __DOXYGEN__
#define LWIP_HTTPD_SSI 0
#endif
/** Set this to 1 to implement an SSI tag handler callback that gets a const char*
* to the tag (instead of an index into a pre-registered array of known tags)
* If this is 0, the SSI handler callback function is only called pre-registered tags.
*/
#if !defined LWIP_HTTPD_SSI_RAW || defined __DOXYGEN__
#define LWIP_HTTPD_SSI_RAW 0
#endif
/** Set this to 0 to prevent parsing the file extension at runtime to decide
* if a file should be scanned for SSI tags or not.
* Default is 1 (file extensions are checked using the g_pcSSIExtensions array)
* Set to 2 to override this runtime test function.
*
* This is enabled by default, but if you only use a newer version of makefsdata
* supporting the "-ssi" option, this info is already present in
*/
#if !defined LWIP_HTTPD_SSI_BY_FILE_EXTENSION || defined __DOXYGEN__
#define LWIP_HTTPD_SSI_BY_FILE_EXTENSION 1
#endif
/** Set this to 1 to support HTTP POST */
#if !defined LWIP_HTTPD_SUPPORT_POST || defined __DOXYGEN__
#define LWIP_HTTPD_SUPPORT_POST 0
#endif
/* The maximum number of parameters that the CGI handler can be sent. */
#if !defined LWIP_HTTPD_MAX_CGI_PARAMETERS || defined __DOXYGEN__
#define LWIP_HTTPD_MAX_CGI_PARAMETERS 16
#endif
/** LWIP_HTTPD_SSI_MULTIPART==1: SSI handler function is called with 2 more
* arguments indicating a counter for insert string that are too long to be
* inserted at once: the SSI handler function must then set 'next_tag_part'
* which will be passed back to it in the next call. */
#if !defined LWIP_HTTPD_SSI_MULTIPART || defined __DOXYGEN__
#define LWIP_HTTPD_SSI_MULTIPART 0
#endif
/* The maximum length of the string comprising the SSI tag name
* ATTENTION: tags longer than this are ignored, not truncated!
*/
#if !defined LWIP_HTTPD_MAX_TAG_NAME_LEN || defined __DOXYGEN__
#define LWIP_HTTPD_MAX_TAG_NAME_LEN 8
#endif
/* The maximum length of string that can be returned to replace any given tag
* If this buffer is not long enough, use LWIP_HTTPD_SSI_MULTIPART.
*/
#if !defined LWIP_HTTPD_MAX_TAG_INSERT_LEN || defined __DOXYGEN__
#define LWIP_HTTPD_MAX_TAG_INSERT_LEN 192
#endif
#if !defined LWIP_HTTPD_POST_MANUAL_WND || defined __DOXYGEN__
#define LWIP_HTTPD_POST_MANUAL_WND 0
#endif
/** This string is passed in the HTTP header as "Server: " */
#if !defined HTTPD_SERVER_AGENT || defined __DOXYGEN__
#define HTTPD_SERVER_AGENT "lwIP/" LWIP_VERSION_STRING " (http://savannah.nongnu.org/projects/lwip)"
#endif
/** Set this to 1 if you want to include code that creates HTTP headers
* at runtime. Default is off: HTTP headers are then created statically
* by the makefsdata tool. Static headers mean smaller code size, but
* the (readonly) fsdata will grow a bit as every file includes the HTTP
* header. */
#if !defined LWIP_HTTPD_DYNAMIC_HEADERS || defined __DOXYGEN__
#define LWIP_HTTPD_DYNAMIC_HEADERS 0
#endif
#if !defined HTTPD_DEBUG || defined __DOXYGEN__
#define HTTPD_DEBUG LWIP_DBG_OFF
#endif
/** Set this to 1 to use a memp pool for allocating
* struct http_state instead of the heap.
* If enabled, you'll need to define MEMP_NUM_PARALLEL_HTTPD_CONNS
* (and MEMP_NUM_PARALLEL_HTTPD_SSI_CONNS for SSI) to set the size of
* the pool(s).
*/
#if !defined HTTPD_USE_MEM_POOL || defined __DOXYGEN__
#define HTTPD_USE_MEM_POOL 0
#endif
/** The server port for HTTPD to use */
#if !defined HTTPD_SERVER_PORT || defined __DOXYGEN__
#define HTTPD_SERVER_PORT LWIP_IANA_PORT_HTTP
#endif
/** The https server port for HTTPD to use */
#if !defined HTTPD_SERVER_PORT_HTTPS || defined __DOXYGEN__
#define HTTPD_SERVER_PORT_HTTPS LWIP_IANA_PORT_HTTPS
#endif
/** Enable https support? */
#if !defined HTTPD_ENABLE_HTTPS || defined __DOXYGEN__
#define HTTPD_ENABLE_HTTPS 0
#endif
/** Maximum retries before the connection is aborted/closed.
* - number of times pcb->poll is called -> default is 4*500ms = 2s;
* - reset when pcb->sent is called
*/
#if !defined HTTPD_MAX_RETRIES || defined __DOXYGEN__
#define HTTPD_MAX_RETRIES 4
#endif
/** The poll delay is X*500ms */
#if !defined HTTPD_POLL_INTERVAL || defined __DOXYGEN__
#define HTTPD_POLL_INTERVAL 4
#endif
/** Priority for tcp pcbs created by HTTPD (very low by default).
* Lower priorities get killed first when running out of memory.
*/
#if !defined HTTPD_TCP_PRIO || defined __DOXYGEN__
#define HTTPD_TCP_PRIO TCP_PRIO_MIN
#endif
/** Set this to 1 to enable timing each file sent */
#if !defined LWIP_HTTPD_TIMING || defined __DOXYGEN__
#define LWIP_HTTPD_TIMING 0
#endif
/** Set this to 1 to enable timing each file sent */
#if !defined HTTPD_DEBUG_TIMING || defined __DOXYGEN__
#define HTTPD_DEBUG_TIMING LWIP_DBG_OFF
#endif
/** Set this to one to show error pages when parsing a request fails instead
of simply closing the connection. */
#if !defined LWIP_HTTPD_SUPPORT_EXTSTATUS || defined __DOXYGEN__
#define LWIP_HTTPD_SUPPORT_EXTSTATUS 0
#endif
/** Set this to 0 to drop support for HTTP/0.9 clients (to save some bytes) */
#if !defined LWIP_HTTPD_SUPPORT_V09 || defined __DOXYGEN__
#define LWIP_HTTPD_SUPPORT_V09 1
#endif
/** Set this to 1 to enable HTTP/1.1 persistent connections.
* ATTENTION: If the generated file system includes HTTP headers, these must
* include the "Connection: keep-alive" header (pass argument "-11" to makefsdata).
*/
#if !defined LWIP_HTTPD_SUPPORT_11_KEEPALIVE || defined __DOXYGEN__
#define LWIP_HTTPD_SUPPORT_11_KEEPALIVE 0
#endif
/** Set this to 1 to support HTTP request coming in in multiple packets/pbufs */
#if !defined LWIP_HTTPD_SUPPORT_REQUESTLIST || defined __DOXYGEN__
#define LWIP_HTTPD_SUPPORT_REQUESTLIST 1
#endif
#if LWIP_HTTPD_SUPPORT_REQUESTLIST
/** Number of rx pbufs to enqueue to parse an incoming request (up to the first
newline) */
#if !defined LWIP_HTTPD_REQ_QUEUELEN || defined __DOXYGEN__
#define LWIP_HTTPD_REQ_QUEUELEN 5
#endif
/** Number of (TCP payload-) bytes (in pbufs) to enqueue to parse and incoming
request (up to the first double-newline) */
#if !defined LWIP_HTTPD_REQ_BUFSIZE || defined __DOXYGEN__
#define LWIP_HTTPD_REQ_BUFSIZE LWIP_HTTPD_MAX_REQ_LENGTH
#endif
/** Defines the maximum length of a HTTP request line (up to the first CRLF,
copied from pbuf into this a global buffer when pbuf- or packet-queues
are received - otherwise the input pbuf is used directly) */
#if !defined LWIP_HTTPD_MAX_REQ_LENGTH || defined __DOXYGEN__
#define LWIP_HTTPD_MAX_REQ_LENGTH LWIP_MIN(1023, (LWIP_HTTPD_REQ_QUEUELEN * PBUF_POOL_BUFSIZE))
#endif
#endif /* LWIP_HTTPD_SUPPORT_REQUESTLIST */
/** This is the size of a static buffer used when URIs end with '/'.
* In this buffer, the directory requested is concatenated with all the
* configured default file names.
* Set to 0 to disable checking default filenames on non-root directories.
*/
#if !defined LWIP_HTTPD_MAX_REQUEST_URI_LEN || defined __DOXYGEN__
#define LWIP_HTTPD_MAX_REQUEST_URI_LEN 63
#endif
/** Maximum length of the filename to send as response to a POST request,
* filled in by the application when a POST is finished.
*/
#if !defined LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN || defined __DOXYGEN__
#define LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN 63
#endif
/** Set this to 0 to not send the SSI tag (default is on, so the tag will
* be sent in the HTML page */
#if !defined LWIP_HTTPD_SSI_INCLUDE_TAG || defined __DOXYGEN__
#define LWIP_HTTPD_SSI_INCLUDE_TAG 1
#endif
/** Set this to 1 to call tcp_abort when tcp_close fails with memory error.
* This can be used to prevent consuming all memory in situations where the
* HTTP server has low priority compared to other communication. */
#if !defined LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR || defined __DOXYGEN__
#define LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR 0
#endif
/** Set this to 1 to kill the oldest connection when running out of
* memory for 'struct http_state' or 'struct http_ssi_state'.
* ATTENTION: This puts all connections on a linked list, so may be kind of slow.
*/
#if !defined LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED || defined __DOXYGEN__
#define LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED 0
#endif
/** Set this to 1 to send URIs without extension without headers
* (who uses this at all??) */
#if !defined LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI || defined __DOXYGEN__
#define LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI 0
#endif
/** Default: Tags are sent from struct http_state and are therefore volatile */
#if !defined HTTP_IS_TAG_VOLATILE || defined __DOXYGEN__
#define HTTP_IS_TAG_VOLATILE(ptr) TCP_WRITE_FLAG_COPY
#endif
/* By default, the httpd is limited to send 2*pcb->mss to keep resource usage low
when http is not an important protocol in the device. */
#if !defined HTTPD_LIMIT_SENDING_TO_2MSS || defined __DOXYGEN__
#define HTTPD_LIMIT_SENDING_TO_2MSS 1
#endif
/* Define this to a function that returns the maximum amount of data to enqueue.
The function have this signature: u16_t fn(struct altcp_pcb* pcb);
The best place to define this is the hooks file (@see LWIP_HOOK_FILENAME) */
#if !defined HTTPD_MAX_WRITE_LEN || defined __DOXYGEN__
#if HTTPD_LIMIT_SENDING_TO_2MSS
#define HTTPD_MAX_WRITE_LEN(pcb) ((u16_t)(2 * altcp_mss(pcb)))
#endif
#endif
/*------------------- FS OPTIONS -------------------*/
/** Set this to 1 and provide the functions:
* - "int fs_open_custom(struct fs_file *file, const char *name)"
* Called first for every opened file to allow opening files
* that are not included in fsdata(_custom).c
* - "void fs_close_custom(struct fs_file *file)"
* Called to free resources allocated by fs_open_custom().
*/
#if !defined LWIP_HTTPD_CUSTOM_FILES || defined __DOXYGEN__
#define LWIP_HTTPD_CUSTOM_FILES 0
#endif
/** Set this to 1 to support fs_read() to dynamically read file data.
* Without this (default=off), only one-block files are supported,
* and the contents must be ready after fs_open().
*/
#if !defined LWIP_HTTPD_DYNAMIC_FILE_READ || defined __DOXYGEN__
#define LWIP_HTTPD_DYNAMIC_FILE_READ 0
#endif
/** Set this to 1 to include an application state argument per file
* that is opened. This allows to keep a state per connection/file.
*/
#if !defined LWIP_HTTPD_FILE_STATE || defined __DOXYGEN__
#define LWIP_HTTPD_FILE_STATE 0
#endif
/** HTTPD_PRECALCULATED_CHECKSUM==1: include precompiled checksums for
* predefined (MSS-sized) chunks of the files to prevent having to calculate
* the checksums at runtime. */
#if !defined HTTPD_PRECALCULATED_CHECKSUM || defined __DOXYGEN__
#define HTTPD_PRECALCULATED_CHECKSUM 0
#endif
/** LWIP_HTTPD_FS_ASYNC_READ==1: support asynchronous read operations
* (fs_read_async returns FS_READ_DELAYED and calls a callback when finished).
*/
#if !defined LWIP_HTTPD_FS_ASYNC_READ || defined __DOXYGEN__
#define LWIP_HTTPD_FS_ASYNC_READ 0
#endif
/** Filename (including path) to use as FS data file */
#if !defined HTTPD_FSDATA_FILE || defined __DOXYGEN__
/* HTTPD_USE_CUSTOM_FSDATA: Compatibility with deprecated lwIP option */
#if defined(HTTPD_USE_CUSTOM_FSDATA) && (HTTPD_USE_CUSTOM_FSDATA != 0)
#define HTTPD_FSDATA_FILE "fsdata_custom.c"
#else
#define HTTPD_FSDATA_FILE "fsdata.c"
#endif
#endif
/**
* @}
*/
#endif /* LWIP_HDR_APPS_HTTPD_OPTS_H */

View File

@ -1,100 +0,0 @@
/**
* @file
* lwIP iPerf server implementation
*/
/*
* Copyright (c) 2014 Simon Goldschmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Simon Goldschmidt
*
*/
#ifndef LWIP_HDR_APPS_LWIPERF_H
#define LWIP_HDR_APPS_LWIPERF_H
#include "lwip/opt.h"
#include "lwip/ip_addr.h"
#ifdef __cplusplus
extern "C" {
#endif
#define LWIPERF_TCP_PORT_DEFAULT 5001
/** lwIPerf test results */
enum lwiperf_report_type
{
/** The server side test is done */
LWIPERF_TCP_DONE_SERVER,
/** The client side test is done */
LWIPERF_TCP_DONE_CLIENT,
/** Local error lead to test abort */
LWIPERF_TCP_ABORTED_LOCAL,
/** Data check error lead to test abort */
LWIPERF_TCP_ABORTED_LOCAL_DATAERROR,
/** Transmit error lead to test abort */
LWIPERF_TCP_ABORTED_LOCAL_TXERROR,
/** Remote side aborted the test */
LWIPERF_TCP_ABORTED_REMOTE
};
/** Control */
enum lwiperf_client_type
{
/** Unidirectional tx only test */
LWIPERF_CLIENT,
/** Do a bidirectional test simultaneously */
LWIPERF_DUAL,
/** Do a bidirectional test individually */
LWIPERF_TRADEOFF
};
/** Prototype of a report function that is called when a session is finished.
This report function can show the test results.
@param report_type contains the test result */
typedef void (*lwiperf_report_fn)(void *arg, enum lwiperf_report_type report_type,
const ip_addr_t* local_addr, u16_t local_port, const ip_addr_t* remote_addr, u16_t remote_port,
u32_t bytes_transferred, u32_t ms_duration, u32_t bandwidth_kbitpsec);
void* lwiperf_start_tcp_server(const ip_addr_t* local_addr, u16_t local_port,
lwiperf_report_fn report_fn, void* report_arg);
void* lwiperf_start_tcp_server_default(lwiperf_report_fn report_fn, void* report_arg);
void* lwiperf_start_tcp_client(const ip_addr_t* remote_addr, u16_t remote_port,
enum lwiperf_client_type type,
lwiperf_report_fn report_fn, void* report_arg);
void* lwiperf_start_tcp_client_default(const ip_addr_t* remote_addr,
lwiperf_report_fn report_fn, void* report_arg);
void lwiperf_abort(void* lwiperf_session);
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_APPS_LWIPERF_H */

View File

@ -1,105 +0,0 @@
/**
* @file
* MDNS responder
*/
/*
* Copyright (c) 2015 Verisure Innovation AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Erik Ekman <erik@kryo.se>
*
*/
#ifndef LWIP_HDR_APPS_MDNS_H
#define LWIP_HDR_APPS_MDNS_H
#include "lwip/apps/mdns_opts.h"
#include "lwip/netif.h"
#ifdef __cplusplus
extern "C" {
#endif
#if LWIP_MDNS_RESPONDER
enum mdns_sd_proto {
DNSSD_PROTO_UDP = 0,
DNSSD_PROTO_TCP = 1
};
#define MDNS_PROBING_CONFLICT 0
#define MDNS_PROBING_SUCCESSFUL 1
#define MDNS_LABEL_MAXLEN 63
struct mdns_host;
struct mdns_service;
/** Callback function to add text to a reply, called when generating the reply */
typedef void (*service_get_txt_fn_t)(struct mdns_service *service, void *txt_userdata);
/** Callback function to let application know the result of probing network for name
* uniqueness, called with result MDNS_PROBING_SUCCESSFUL if no other node claimed
* use for the name for the netif or a service and is safe to use, or MDNS_PROBING_CONFLICT
* if another node is already using it and mdns is disabled on this interface */
typedef void (*mdns_name_result_cb_t)(struct netif* netif, u8_t result);
void mdns_resp_init(void);
void mdns_resp_register_name_result_cb(mdns_name_result_cb_t cb);
err_t mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl);
err_t mdns_resp_remove_netif(struct netif *netif);
err_t mdns_resp_rename_netif(struct netif *netif, const char *hostname);
s8_t mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_userdata);
err_t mdns_resp_del_service(struct netif *netif, s8_t slot);
err_t mdns_resp_rename_service(struct netif *netif, s8_t slot, const char *name);
err_t mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len);
void mdns_resp_restart(struct netif *netif);
void mdns_resp_announce(struct netif *netif);
/**
* @ingroup mdns
* Announce IP settings have changed on netif.
* Call this in your callback registered by netif_set_status_callback().
* No need to call this function when LWIP_NETIF_EXT_STATUS_CALLBACK==1,
* this handled automatically for you.
* @param netif The network interface where settings have changed.
*/
#define mdns_resp_netif_settings_changed(netif) mdns_resp_announce(netif)
#endif /* LWIP_MDNS_RESPONDER */
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_APPS_MDNS_H */

View File

@ -1,81 +0,0 @@
/**
* @file
* MDNS responder
*/
/*
* Copyright (c) 2015 Verisure Innovation AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Erik Ekman <erik@kryo.se>
*
*/
#ifndef LWIP_HDR_APPS_MDNS_OPTS_H
#define LWIP_HDR_APPS_MDNS_OPTS_H
#include "lwip/opt.h"
/**
* @defgroup mdns_opts Options
* @ingroup mdns
* @{
*/
/**
* LWIP_MDNS_RESPONDER==1: Turn on multicast DNS module. UDP must be available for MDNS
* transport. IGMP is needed for IPv4 multicast.
*/
#ifndef LWIP_MDNS_RESPONDER
#define LWIP_MDNS_RESPONDER 0
#endif /* LWIP_MDNS_RESPONDER */
/** The maximum number of services per netif */
#ifndef MDNS_MAX_SERVICES
#define MDNS_MAX_SERVICES 1
#endif
/** MDNS_RESP_USENETIF_EXTCALLBACK==1: register an ext_callback on the netif
* to automatically restart probing/announcing on status or address change.
*/
#ifndef MDNS_RESP_USENETIF_EXTCALLBACK
#define MDNS_RESP_USENETIF_EXTCALLBACK LWIP_NETIF_EXT_STATUS_CALLBACK
#endif
/**
* MDNS_DEBUG: Enable debugging for multicast DNS.
*/
#ifndef MDNS_DEBUG
#define MDNS_DEBUG LWIP_DBG_OFF
#endif
/**
* @}
*/
#endif /* LWIP_HDR_APPS_MDNS_OPTS_H */

View File

@ -1,74 +0,0 @@
/**
* @file
* MDNS responder private definitions
*/
/*
* Copyright (c) 2015 Verisure Innovation AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Erik Ekman <erik@kryo.se>
*
*/
#ifndef LWIP_HDR_MDNS_PRIV_H
#define LWIP_HDR_MDNS_PRIV_H
#include "lwip/apps/mdns_opts.h"
#include "lwip/pbuf.h"
#ifdef __cplusplus
extern "C" {
#endif
#if LWIP_MDNS_RESPONDER
/* Domain struct and methods - visible for unit tests */
#define MDNS_DOMAIN_MAXLEN 256
#define MDNS_READNAME_ERROR 0xFFFF
struct mdns_domain {
/* Encoded domain name */
u8_t name[MDNS_DOMAIN_MAXLEN];
/* Total length of domain name, including zero */
u16_t length;
/* Set if compression of this domain is not allowed */
u8_t skip_compression;
};
err_t mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len);
u16_t mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain);
int mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b);
u16_t mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain);
#endif /* LWIP_MDNS_RESPONDER */
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_MDNS_PRIV_H */

View File

@ -1,205 +0,0 @@
/**
* @file
* MQTT client
*/
/*
* Copyright (c) 2016 Erik Andersson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Erik Andersson
*
*/
#ifndef LWIP_HDR_APPS_MQTT_CLIENT_H
#define LWIP_HDR_APPS_MQTT_CLIENT_H
#include "lwip/apps/mqtt_opts.h"
#include "lwip/err.h"
#include "lwip/ip_addr.h"
#include "lwip/prot/iana.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct mqtt_client_s mqtt_client_t;
#if LWIP_ALTCP && LWIP_ALTCP_TLS
struct altcp_tls_config;
#endif
/** @ingroup mqtt
* Default MQTT port (non-TLS) */
#define MQTT_PORT LWIP_IANA_PORT_MQTT
/** @ingroup mqtt
* Default MQTT TLS port */
#define MQTT_TLS_PORT LWIP_IANA_PORT_SECURE_MQTT
/*---------------------------------------------------------------------------------------------- */
/* Connection with server */
/**
* @ingroup mqtt
* Client information and connection parameters */
struct mqtt_connect_client_info_t {
/** Client identifier, must be set by caller */
const char *client_id;
/** User name, set to NULL if not used */
const char* client_user;
/** Password, set to NULL if not used */
const char* client_pass;
/** keep alive time in seconds, 0 to disable keep alive functionality*/
u16_t keep_alive;
/** will topic, set to NULL if will is not to be used,
will_msg, will_qos and will retain are then ignored */
const char* will_topic;
/** will_msg, see will_topic */
const char* will_msg;
/** will_qos, see will_topic */
u8_t will_qos;
/** will_retain, see will_topic */
u8_t will_retain;
#if LWIP_ALTCP && LWIP_ALTCP_TLS
/** TLS configuration for secure connections */
struct altcp_tls_config *tls_config;
#endif
};
/**
* @ingroup mqtt
* Connection status codes */
typedef enum
{
/** Accepted */
MQTT_CONNECT_ACCEPTED = 0,
/** Refused protocol version */
MQTT_CONNECT_REFUSED_PROTOCOL_VERSION = 1,
/** Refused identifier */
MQTT_CONNECT_REFUSED_IDENTIFIER = 2,
/** Refused server */
MQTT_CONNECT_REFUSED_SERVER = 3,
/** Refused user credentials */
MQTT_CONNECT_REFUSED_USERNAME_PASS = 4,
/** Refused not authorized */
MQTT_CONNECT_REFUSED_NOT_AUTHORIZED_ = 5,
/** Disconnected */
MQTT_CONNECT_DISCONNECTED = 256,
/** Timeout */
MQTT_CONNECT_TIMEOUT = 257
} mqtt_connection_status_t;
/**
* @ingroup mqtt
* Function prototype for mqtt connection status callback. Called when
* client has connected to the server after initiating a mqtt connection attempt by
* calling mqtt_client_connect() or when connection is closed by server or an error
*
* @param client MQTT client itself
* @param arg Additional argument to pass to the callback function
* @param status Connect result code or disconnection notification @see mqtt_connection_status_t
*
*/
typedef void (*mqtt_connection_cb_t)(mqtt_client_t *client, void *arg, mqtt_connection_status_t status);
/**
* @ingroup mqtt
* Data callback flags */
enum {
/** Flag set when last fragment of data arrives in data callback */
MQTT_DATA_FLAG_LAST = 1
};
/**
* @ingroup mqtt
* Function prototype for MQTT incoming publish data callback function. Called when data
* arrives to a subscribed topic @see mqtt_subscribe
*
* @param arg Additional argument to pass to the callback function
* @param data User data, pointed object, data may not be referenced after callback return,
NULL is passed when all publish data are delivered
* @param len Length of publish data fragment
* @param flags MQTT_DATA_FLAG_LAST set when this call contains the last part of data from publish message
*
*/
typedef void (*mqtt_incoming_data_cb_t)(void *arg, const u8_t *data, u16_t len, u8_t flags);
/**
* @ingroup mqtt
* Function prototype for MQTT incoming publish function. Called when an incoming publish
* arrives to a subscribed topic @see mqtt_subscribe
*
* @param arg Additional argument to pass to the callback function
* @param topic Zero terminated Topic text string, topic may not be referenced after callback return
* @param tot_len Total length of publish data, if set to 0 (no publish payload) data callback will not be invoked
*/
typedef void (*mqtt_incoming_publish_cb_t)(void *arg, const char *topic, u32_t tot_len);
/**
* @ingroup mqtt
* Function prototype for mqtt request callback. Called when a subscribe, unsubscribe
* or publish request has completed
* @param arg Pointer to user data supplied when invoking request
* @param err ERR_OK on success
* ERR_TIMEOUT if no response was received within timeout,
* ERR_ABRT if (un)subscribe was denied
*/
typedef void (*mqtt_request_cb_t)(void *arg, err_t err);
err_t mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ipaddr, u16_t port, mqtt_connection_cb_t cb, void *arg,
const struct mqtt_connect_client_info_t *client_info);
void mqtt_disconnect(mqtt_client_t *client);
mqtt_client_t *mqtt_client_new(void);
void mqtt_client_free(mqtt_client_t* client);
u8_t mqtt_client_is_connected(mqtt_client_t *client);
void mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t,
mqtt_incoming_data_cb_t data_cb, void *arg);
err_t mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub);
/** @ingroup mqtt
*Subscribe to topic */
#define mqtt_subscribe(client, topic, qos, cb, arg) mqtt_sub_unsub(client, topic, qos, cb, arg, 1)
/** @ingroup mqtt
* Unsubscribe to topic */
#define mqtt_unsubscribe(client, topic, cb, arg) mqtt_sub_unsub(client, topic, 0, cb, arg, 0)
err_t mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain,
mqtt_request_cb_t cb, void *arg);
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_APPS_MQTT_CLIENT_H */

View File

@ -1,103 +0,0 @@
/**
* @file
* MQTT client options
*/
/*
* Copyright (c) 2016 Erik Andersson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Erik Andersson
*
*/
#ifndef LWIP_HDR_APPS_MQTT_OPTS_H
#define LWIP_HDR_APPS_MQTT_OPTS_H
#include "lwip/opt.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup mqtt_opts Options
* @ingroup mqtt
* @{
*/
/**
* Output ring-buffer size, must be able to fit largest outgoing publish message topic+payloads
*/
#ifndef MQTT_OUTPUT_RINGBUF_SIZE
#define MQTT_OUTPUT_RINGBUF_SIZE 256
#endif
/**
* Number of bytes in receive buffer, must be at least the size of the longest incoming topic + 8
* If one wants to avoid fragmented incoming publish, set length to max incoming topic length + max payload length + 8
*/
#ifndef MQTT_VAR_HEADER_BUFFER_LEN
#define MQTT_VAR_HEADER_BUFFER_LEN 128
#endif
/**
* Maximum number of pending subscribe, unsubscribe and publish requests to server .
*/
#ifndef MQTT_REQ_MAX_IN_FLIGHT
#define MQTT_REQ_MAX_IN_FLIGHT 4
#endif
/**
* Seconds between each cyclic timer call.
*/
#ifndef MQTT_CYCLIC_TIMER_INTERVAL
#define MQTT_CYCLIC_TIMER_INTERVAL 5
#endif
/**
* Publish, subscribe and unsubscribe request timeout in seconds.
*/
#ifndef MQTT_REQ_TIMEOUT
#define MQTT_REQ_TIMEOUT 30
#endif
/**
* Seconds for MQTT connect response timeout after sending connect request
*/
#ifndef MQTT_CONNECT_TIMOUT
#define MQTT_CONNECT_TIMOUT 100
#endif
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_APPS_MQTT_OPTS_H */

View File

@ -1,104 +0,0 @@
/**
* @file
* MQTT client (private interface)
*/
/*
* Copyright (c) 2016 Erik Andersson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
* Author: Erik Andersson
*
*/
#ifndef LWIP_HDR_APPS_MQTT_PRIV_H
#define LWIP_HDR_APPS_MQTT_PRIV_H
#include "lwip/apps/mqtt.h"
#include "lwip/altcp.h"
#ifdef __cplusplus
extern "C" {
#endif
/** Pending request item, binds application callback to pending server requests */
struct mqtt_request_t
{
/** Next item in list, NULL means this is the last in chain,
next pointing at itself means request is unallocated */
struct mqtt_request_t *next;
/** Callback to upper layer */
mqtt_request_cb_t cb;
void *arg;
/** MQTT packet identifier */
u16_t pkt_id;
/** Expire time relative to element before this */
u16_t timeout_diff;
};
/** Ring buffer */
struct mqtt_ringbuf_t {
u16_t put;
u16_t get;
u8_t buf[MQTT_OUTPUT_RINGBUF_SIZE];
};
/** MQTT client */
struct mqtt_client_s
{
/** Timers and timeouts */
u16_t cyclic_tick;
u16_t keep_alive;
u16_t server_watchdog;
/** Packet identifier generator*/
u16_t pkt_id_seq;
/** Packet identifier of pending incoming publish */
u16_t inpub_pkt_id;
/** Connection state */
u8_t conn_state;
struct altcp_pcb *conn;
/** Connection callback */
void *connect_arg;
mqtt_connection_cb_t connect_cb;
/** Pending requests to server */
struct mqtt_request_t *pend_req_queue;
struct mqtt_request_t req_list[MQTT_REQ_MAX_IN_FLIGHT];
void *inpub_arg;
/** Incoming data callback */
mqtt_incoming_data_cb_t data_cb;
mqtt_incoming_publish_cb_t pub_cb;
/** Input */
u32_t msg_idx;
u8_t rx_buffer[MQTT_VAR_HEADER_BUFFER_LEN];
/** Output ring-buffer */
struct mqtt_ringbuf_t output;
};
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_APPS_MQTT_PRIV_H */

View File

@ -1,51 +0,0 @@
/**
* @file
* NETBIOS name service responder
*/
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* This file is part of the lwIP TCP/IP stack.
*
*/
#ifndef LWIP_HDR_APPS_NETBIOS_H
#define LWIP_HDR_APPS_NETBIOS_H
#include "lwip/apps/netbiosns_opts.h"
#ifdef __cplusplus
extern "C" {
#endif
void netbiosns_init(void);
#ifndef NETBIOS_LWIP_NAME
void netbiosns_set_name(const char* hostname);
#endif
void netbiosns_stop(void);
#ifdef __cplusplus
}
#endif
#endif /* LWIP_HDR_APPS_NETBIOS_H */

Some files were not shown because too many files have changed in this diff Show More