2005-08-10 03:14:34 +00:00
|
|
|
#ifndef _DCCP_H
|
|
|
|
#define _DCCP_H
|
|
|
|
/*
|
|
|
|
* net/dccp/dccp.h
|
|
|
|
*
|
|
|
|
* An implementation of the DCCP protocol
|
2005-08-20 03:23:43 +00:00
|
|
|
* Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
2006-08-27 02:01:30 +00:00
|
|
|
* Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
|
2005-08-10 03:14:34 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/dccp.h>
|
2007-08-20 00:16:35 +00:00
|
|
|
#include <linux/ktime.h>
|
2005-08-10 03:14:34 +00:00
|
|
|
#include <net/snmp.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/tcp.h>
|
2005-09-18 07:17:51 +00:00
|
|
|
#include "ackvec.h"
|
2005-08-10 03:14:34 +00:00
|
|
|
|
2006-11-20 20:39:23 +00:00
|
|
|
/*
|
|
|
|
* DCCP - specific warning and debugging macros.
|
|
|
|
*/
|
|
|
|
#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \
|
2008-03-06 04:47:47 +00:00
|
|
|
__func__, ##a)
|
2006-11-20 20:39:23 +00:00
|
|
|
#define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \
|
2008-03-06 04:47:47 +00:00
|
|
|
__FILE__, __LINE__, __func__)
|
2006-11-20 20:39:23 +00:00
|
|
|
#define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0)
|
|
|
|
#define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \
|
|
|
|
DCCP_BUG("\"%s\" holds (exception!)", \
|
|
|
|
__stringify(cond)); \
|
2006-11-15 23:27:47 +00:00
|
|
|
} while (0)
|
|
|
|
|
2006-11-20 20:26:03 +00:00
|
|
|
#define DCCP_PRINTK(enable, fmt, args...) do { if (enable) \
|
|
|
|
printk(fmt, ##args); \
|
2007-02-09 14:24:38 +00:00
|
|
|
} while(0)
|
2006-11-20 20:26:03 +00:00
|
|
|
#define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \
|
2008-03-06 04:47:47 +00:00
|
|
|
"%s: " fmt, __func__, ##a)
|
2006-11-20 20:26:03 +00:00
|
|
|
|
2005-08-13 23:35:39 +00:00
|
|
|
#ifdef CONFIG_IP_DCCP_DEBUG
|
2005-08-10 03:14:34 +00:00
|
|
|
extern int dccp_debug;
|
2006-11-20 20:26:03 +00:00
|
|
|
#define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a)
|
|
|
|
#define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
|
2009-01-16 23:36:33 +00:00
|
|
|
#define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
|
2005-08-10 03:14:34 +00:00
|
|
|
#else
|
|
|
|
#define dccp_pr_debug(format, a...)
|
|
|
|
#define dccp_pr_debug_cat(format, a...)
|
2009-01-16 23:36:33 +00:00
|
|
|
#define dccp_debug(format, a...)
|
2005-08-10 03:14:34 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
extern struct inet_hashinfo dccp_hashinfo;
|
|
|
|
|
2008-11-26 05:17:14 +00:00
|
|
|
extern struct percpu_counter dccp_orphan_count;
|
2005-08-10 03:14:34 +00:00
|
|
|
|
|
|
|
extern void dccp_time_wait(struct sock *sk, int state, int timeo);
|
|
|
|
|
2006-11-10 04:13:56 +00:00
|
|
|
/*
|
|
|
|
* Set safe upper bounds for header and option length. Since Data Offset is 8
|
|
|
|
* bits (RFC 4340, sec. 5.1), the total header length can never be more than
|
|
|
|
* 4 * 255 = 1020 bytes. The largest possible header length is 28 bytes (X=1):
|
|
|
|
* - DCCP-Response with ACK Subheader and 4 bytes of Service code OR
|
|
|
|
* - DCCP-Reset with ACK Subheader and 4 bytes of Reset Code fields
|
|
|
|
* Hence a safe upper bound for the maximum option length is 1020-28 = 992
|
|
|
|
*/
|
dccp: Do not let initial option overhead shrink the MPS
This fixes a problem caused by the overlap of the connection-setup and
established-state phases of DCCP connections.
During connection setup, the client retransmits Confirm Feature-Negotiation
options until a response from the server signals that it can move from the
half-established PARTOPEN into the OPEN state, whereupon the connection is
fully established on both ends (RFC 4340, 8.1.5).
However, since the client may already send data while it is in the PARTOPEN
state, consequences arise for the Maximum Packet Size: the problem is that the
initial option overhead is much higher than for the subsequent established
phase, as it involves potentially many variable-length list-type options
(server-priority options, RFC 4340, 6.4).
Applying the standard MPS is insufficient here: especially with larger
payloads this can lead to annoying, counter-intuitive EMSGSIZE errors.
On the other hand, reducing the MPS available for the established phase by
the added initial overhead is highly wasteful and inefficient.
The solution chosen therefore is a two-phase strategy:
If the payload length of the DataAck in PARTOPEN is too large, an Ack is sent
to carry the options, and the feature-negotiation list is then flushed.
This means that the server gets two Acks for one Response. If both Acks get
lost, it is probably better to restart the connection anyway and devising yet
another special-case does not seem worth the extra complexity.
The result is a higher utilisation of the available packet space for the data
transmission phase (established state) of a connection.
The patch (over-)estimates the initial overhead to be 32*4 bytes -- commonly
seen values were around 90 bytes for initial feature-negotiation options.
It uses sizeof(u32) to mean "aligned units of 4 bytes".
For consistency, another use of 4-byte alignment is adapted.
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-02-27 22:38:29 +00:00
|
|
|
#define MAX_DCCP_SPECIFIC_HEADER (255 * sizeof(uint32_t))
|
2006-11-10 04:13:56 +00:00
|
|
|
#define DCCP_MAX_PACKET_HDR 28
|
|
|
|
#define DCCP_MAX_OPT_LEN (MAX_DCCP_SPECIFIC_HEADER - DCCP_MAX_PACKET_HDR)
|
|
|
|
#define MAX_DCCP_HEADER (MAX_DCCP_SPECIFIC_HEADER + MAX_HEADER)
|
2005-08-10 03:14:34 +00:00
|
|
|
|
dccp: Do not let initial option overhead shrink the MPS
This fixes a problem caused by the overlap of the connection-setup and
established-state phases of DCCP connections.
During connection setup, the client retransmits Confirm Feature-Negotiation
options until a response from the server signals that it can move from the
half-established PARTOPEN into the OPEN state, whereupon the connection is
fully established on both ends (RFC 4340, 8.1.5).
However, since the client may already send data while it is in the PARTOPEN
state, consequences arise for the Maximum Packet Size: the problem is that the
initial option overhead is much higher than for the subsequent established
phase, as it involves potentially many variable-length list-type options
(server-priority options, RFC 4340, 6.4).
Applying the standard MPS is insufficient here: especially with larger
payloads this can lead to annoying, counter-intuitive EMSGSIZE errors.
On the other hand, reducing the MPS available for the established phase by
the added initial overhead is highly wasteful and inefficient.
The solution chosen therefore is a two-phase strategy:
If the payload length of the DataAck in PARTOPEN is too large, an Ack is sent
to carry the options, and the feature-negotiation list is then flushed.
This means that the server gets two Acks for one Response. If both Acks get
lost, it is probably better to restart the connection anyway and devising yet
another special-case does not seem worth the extra complexity.
The result is a higher utilisation of the available packet space for the data
transmission phase (established state) of a connection.
The patch (over-)estimates the initial overhead to be 32*4 bytes -- commonly
seen values were around 90 bytes for initial feature-negotiation options.
It uses sizeof(u32) to mean "aligned units of 4 bytes".
For consistency, another use of 4-byte alignment is adapted.
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-02-27 22:38:29 +00:00
|
|
|
/* Upper bound for initial feature-negotiation overhead (padded to 32 bits) */
|
|
|
|
#define DCCP_FEATNEG_OVERHEAD (32 * sizeof(uint32_t))
|
|
|
|
|
2005-08-10 03:14:34 +00:00
|
|
|
#define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT
|
|
|
|
* state, about 60 seconds */
|
|
|
|
|
2006-10-24 23:17:51 +00:00
|
|
|
/* RFC 1122, 4.2.3.1 initial RTO value */
|
2005-08-10 03:14:34 +00:00
|
|
|
#define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ))
|
|
|
|
|
2007-12-13 14:16:23 +00:00
|
|
|
/*
|
|
|
|
* The maximum back-off value for retransmissions. This is needed for
|
|
|
|
* - retransmitting client-Requests (sec. 8.1.1),
|
|
|
|
* - retransmitting Close/CloseReq when closing (sec. 8.3),
|
|
|
|
* - feature-negotiation retransmission (sec. 6.6.3),
|
|
|
|
* - Acks in client-PARTOPEN state (sec. 8.1.5).
|
|
|
|
*/
|
|
|
|
#define DCCP_RTO_MAX ((unsigned)(64 * HZ))
|
2007-03-20 18:23:18 +00:00
|
|
|
|
2007-12-12 16:06:14 +00:00
|
|
|
/*
|
|
|
|
* RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4
|
|
|
|
*/
|
2007-03-20 18:23:18 +00:00
|
|
|
#define DCCP_SANE_RTT_MIN 100
|
2007-12-12 16:06:14 +00:00
|
|
|
#define DCCP_FALLBACK_RTT (USEC_PER_SEC / 5)
|
|
|
|
#define DCCP_SANE_RTT_MAX (3 * USEC_PER_SEC)
|
2007-03-20 18:23:18 +00:00
|
|
|
|
2006-11-13 15:23:52 +00:00
|
|
|
/* sysctl variables for DCCP */
|
|
|
|
extern int sysctl_dccp_request_retries;
|
|
|
|
extern int sysctl_dccp_retries1;
|
|
|
|
extern int sysctl_dccp_retries2;
|
2006-11-20 20:30:17 +00:00
|
|
|
extern int sysctl_dccp_tx_qlen;
|
2007-09-26 14:31:49 +00:00
|
|
|
extern int sysctl_dccp_sync_ratelimit;
|
2006-11-13 15:23:52 +00:00
|
|
|
|
2007-03-20 15:26:51 +00:00
|
|
|
/*
|
|
|
|
* 48-bit sequence number arithmetic (signed and unsigned)
|
|
|
|
*/
|
|
|
|
#define INT48_MIN 0x800000000000LL /* 2^47 */
|
|
|
|
#define UINT48_MAX 0xFFFFFFFFFFFFLL /* 2^48 - 1 */
|
|
|
|
#define COMPLEMENT48(x) (0x1000000000000LL - (x)) /* 2^48 - x */
|
|
|
|
#define TO_SIGNED48(x) (((x) < INT48_MIN)? (x) : -COMPLEMENT48( (x)))
|
|
|
|
#define TO_UNSIGNED48(x) (((x) >= 0)? (x) : COMPLEMENT48(-(x)))
|
|
|
|
#define ADD48(a, b) (((a) + (b)) & UINT48_MAX)
|
|
|
|
#define SUB48(a, b) ADD48((a), COMPLEMENT48(b))
|
|
|
|
|
|
|
|
static inline void dccp_set_seqno(u64 *seqno, u64 value)
|
|
|
|
{
|
|
|
|
*seqno = value & UINT48_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dccp_inc_seqno(u64 *seqno)
|
|
|
|
{
|
|
|
|
*seqno = ADD48(*seqno, 1);
|
|
|
|
}
|
|
|
|
|
2007-03-20 15:45:59 +00:00
|
|
|
/* signed mod-2^48 distance: pos. if seqno1 < seqno2, neg. if seqno1 > seqno2 */
|
|
|
|
static inline s64 dccp_delta_seqno(const u64 seqno1, const u64 seqno2)
|
2007-03-20 15:26:51 +00:00
|
|
|
{
|
2007-03-20 15:45:59 +00:00
|
|
|
u64 delta = SUB48(seqno2, seqno1);
|
|
|
|
|
|
|
|
return TO_SIGNED48(delta);
|
2007-03-20 15:26:51 +00:00
|
|
|
}
|
|
|
|
|
2005-08-10 03:14:34 +00:00
|
|
|
/* is seq1 < seq2 ? */
|
2005-08-15 00:05:53 +00:00
|
|
|
static inline int before48(const u64 seq1, const u64 seq2)
|
2005-08-10 03:14:34 +00:00
|
|
|
{
|
2007-03-20 16:00:26 +00:00
|
|
|
return (s64)((seq2 << 16) - (seq1 << 16)) > 0;
|
2005-08-10 03:14:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* is seq1 > seq2 ? */
|
2007-03-20 16:00:26 +00:00
|
|
|
#define after48(seq1, seq2) before48(seq2, seq1)
|
2005-08-10 03:14:34 +00:00
|
|
|
|
|
|
|
/* is seq2 <= seq1 <= seq3 ? */
|
2005-08-15 00:05:53 +00:00
|
|
|
static inline int between48(const u64 seq1, const u64 seq2, const u64 seq3)
|
2005-08-10 03:14:34 +00:00
|
|
|
{
|
|
|
|
return (seq3 << 16) - (seq2 << 16) >= (seq1 << 16) - (seq2 << 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 max48(const u64 seq1, const u64 seq2)
|
|
|
|
{
|
|
|
|
return after48(seq1, seq2) ? seq1 : seq2;
|
|
|
|
}
|
|
|
|
|
2008-07-13 10:51:40 +00:00
|
|
|
/**
|
2010-10-11 18:40:04 +00:00
|
|
|
* dccp_loss_count - Approximate the number of lost data packets in a burst loss
|
|
|
|
* @s1: last known sequence number before the loss ('hole')
|
|
|
|
* @s2: first sequence number seen after the 'hole'
|
2008-07-13 10:51:40 +00:00
|
|
|
* @ndp: NDP count on packet with sequence number @s2
|
|
|
|
*/
|
2010-10-11 18:40:04 +00:00
|
|
|
static inline u64 dccp_loss_count(const u64 s1, const u64 s2, const u64 ndp)
|
2008-07-13 10:51:40 +00:00
|
|
|
{
|
|
|
|
s64 delta = dccp_delta_seqno(s1, s2);
|
|
|
|
|
2008-07-26 04:43:18 +00:00
|
|
|
WARN_ON(delta < 0);
|
2010-10-11 18:40:04 +00:00
|
|
|
delta -= ndp + 1;
|
|
|
|
|
|
|
|
return delta > 0 ? delta : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* dccp_loss_free - Evaluate condition for data loss from RFC 4340, 7.7.1
|
|
|
|
*/
|
|
|
|
static inline bool dccp_loss_free(const u64 s1, const u64 s2, const u64 ndp)
|
|
|
|
{
|
|
|
|
return dccp_loss_count(s1, s2, ndp) == 0;
|
2008-07-13 10:51:40 +00:00
|
|
|
}
|
|
|
|
|
2005-08-10 03:14:34 +00:00
|
|
|
enum {
|
|
|
|
DCCP_MIB_NUM = 0,
|
|
|
|
DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
|
|
|
|
DCCP_MIB_ESTABRESETS, /* EstabResets */
|
|
|
|
DCCP_MIB_CURRESTAB, /* CurrEstab */
|
2007-02-09 14:24:38 +00:00
|
|
|
DCCP_MIB_OUTSEGS, /* OutSegs */
|
2005-08-10 03:14:34 +00:00
|
|
|
DCCP_MIB_OUTRSTS,
|
|
|
|
DCCP_MIB_ABORTONTIMEOUT,
|
|
|
|
DCCP_MIB_TIMEOUTS,
|
|
|
|
DCCP_MIB_ABORTFAILED,
|
|
|
|
DCCP_MIB_PASSIVEOPENS,
|
|
|
|
DCCP_MIB_ATTEMPTFAILS,
|
|
|
|
DCCP_MIB_OUTDATAGRAMS,
|
|
|
|
DCCP_MIB_INERRS,
|
|
|
|
DCCP_MIB_OPTMANDATORYERROR,
|
|
|
|
DCCP_MIB_INVALIDOPT,
|
|
|
|
__DCCP_MIB_MAX
|
|
|
|
};
|
|
|
|
|
|
|
|
#define DCCP_MIB_MAX __DCCP_MIB_MAX
|
|
|
|
struct dccp_mib {
|
|
|
|
unsigned long mibs[DCCP_MIB_MAX];
|
2010-03-18 20:36:06 +00:00
|
|
|
};
|
2005-08-10 03:14:34 +00:00
|
|
|
|
|
|
|
DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
|
2005-08-13 23:34:54 +00:00
|
|
|
#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
|
|
|
|
#define DCCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(dccp_statistics, field)
|
|
|
|
#define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field)
|
2005-08-10 03:14:34 +00:00
|
|
|
|
2006-11-10 19:43:06 +00:00
|
|
|
/*
|
|
|
|
* Checksumming routines
|
|
|
|
*/
|
2007-05-28 19:32:26 +00:00
|
|
|
static inline unsigned int dccp_csum_coverage(const struct sk_buff *skb)
|
2006-11-10 19:43:06 +00:00
|
|
|
{
|
|
|
|
const struct dccp_hdr* dh = dccp_hdr(skb);
|
|
|
|
|
|
|
|
if (dh->dccph_cscov == 0)
|
|
|
|
return skb->len;
|
|
|
|
return (dh->dccph_doff + dh->dccph_cscov - 1) * sizeof(u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dccp_csum_outgoing(struct sk_buff *skb)
|
|
|
|
{
|
2007-05-28 19:32:26 +00:00
|
|
|
unsigned int cov = dccp_csum_coverage(skb);
|
2006-11-10 19:43:06 +00:00
|
|
|
|
|
|
|
if (cov >= skb->len)
|
|
|
|
dccp_hdr(skb)->dccph_cscov = 0;
|
|
|
|
|
|
|
|
skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
|
|
|
|
}
|
|
|
|
|
2010-04-11 02:15:55 +00:00
|
|
|
extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
|
2006-11-10 19:43:06 +00:00
|
|
|
|
2008-07-26 10:59:09 +00:00
|
|
|
extern int dccp_retransmit_skb(struct sock *sk);
|
2005-08-10 03:14:34 +00:00
|
|
|
|
|
|
|
extern void dccp_send_ack(struct sock *sk);
|
2008-08-07 06:50:04 +00:00
|
|
|
extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
|
|
|
|
struct request_sock *rsk);
|
2006-11-10 14:32:01 +00:00
|
|
|
|
2005-08-17 06:10:59 +00:00
|
|
|
extern void dccp_send_sync(struct sock *sk, const u64 seq,
|
|
|
|
const enum dccp_pkt_type pkt_type);
|
2005-08-10 03:14:34 +00:00
|
|
|
|
2010-12-04 12:38:01 +00:00
|
|
|
/*
|
|
|
|
* TX Packet Dequeueing Interface
|
|
|
|
*/
|
|
|
|
extern void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
|
|
|
|
extern bool dccp_qpolicy_full(struct sock *sk);
|
|
|
|
extern void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
|
|
|
|
extern struct sk_buff *dccp_qpolicy_top(struct sock *sk);
|
|
|
|
extern struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
|
2010-12-04 12:39:13 +00:00
|
|
|
extern bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
|
2010-12-04 12:38:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* TX Packet Output and TX Timers
|
|
|
|
*/
|
dccp: Refine the wait-for-ccid mechanism
This extends the existing wait-for-ccid routine so that it may be used with
different types of CCID, addressing the following problems:
1) The queue-drain mechanism only works with rate-based CCIDs. If CCID-2 for
example has a full TX queue and becomes network-limited just as the
application wants to close, then waiting for CCID-2 to become unblocked
could lead to an indefinite delay (i.e., application "hangs").
2) Since each TX CCID in turn uses a feedback mechanism, there may be changes
in its sending policy while the queue is being drained. This can lead to
further delays during which the application will not be able to terminate.
3) The minimum wait time for CCID-3/4 can be expected to be the queue length
times the current inter-packet delay. For example if tx_qlen=100 and a delay
of 15 ms is used for each packet, then the application would have to wait
for a minimum of 1.5 seconds before being allowed to exit.
4) There is no way for the user/application to control this behaviour. It would
be good to use the timeout argument of dccp_close() as an upper bound. Then
the maximum time that an application is willing to wait for its CCIDs to can
be set via the SO_LINGER option.
These problems are addressed by giving the CCID a grace period of up to the
`timeout' value.
The wait-for-ccid function is, as before, used when the application
(a) has read all the data in its receive buffer and
(b) if SO_LINGER was set with a non-zero linger time, or
(c) the socket is either in the OPEN (active close) or in the PASSIVE_CLOSEREQ
state (client application closes after receiving CloseReq).
In addition, there is a catch-all case of __skb_queue_purge() after waiting for
the CCID. This is necessary since the write queue may still have data when
(a) the host has been passively-closed,
(b) abnormal termination (unread data, zero linger time),
(c) wait-for-ccid could not finish within the given time limit.
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
2010-10-27 19:16:27 +00:00
|
|
|
extern void dccp_write_xmit(struct sock *sk);
|
|
|
|
extern void dccp_write_space(struct sock *sk);
|
|
|
|
extern void dccp_flush_write_queue(struct sock *sk, long *time_budget);
|
2005-08-10 03:30:56 +00:00
|
|
|
|
2005-08-10 03:14:34 +00:00
|
|
|
extern void dccp_init_xmit_timers(struct sock *sk);
|
|
|
|
static inline void dccp_clear_xmit_timers(struct sock *sk)
|
|
|
|
{
|
|
|
|
inet_csk_clear_xmit_timers(sk);
|
|
|
|
}
|
|
|
|
|
|
|
|
extern unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
|
|
|
|
|
|
|
|
extern const char *dccp_packet_name(const int type);
|
|
|
|
|
2006-03-21 05:58:56 +00:00
|
|
|
extern void dccp_set_state(struct sock *sk, const int state);
|
|
|
|
extern void dccp_done(struct sock *sk);
|
2005-08-10 03:14:34 +00:00
|
|
|
|
2008-11-05 07:55:49 +00:00
|
|
|
extern int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
|
|
|
|
struct sk_buff const *skb);
|
2005-08-10 03:14:34 +00:00
|
|
|
|
|
|
|
extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
|
|
|
|
|
|
|
|
extern struct sock *dccp_create_openreq_child(struct sock *sk,
|
|
|
|
const struct request_sock *req,
|
|
|
|
const struct sk_buff *skb);
|
|
|
|
|
|
|
|
extern int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
|
|
|
|
|
|
|
|
extern struct sock *dccp_v4_request_recv_sock(struct sock *sk,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct request_sock *req,
|
|
|
|
struct dst_entry *dst);
|
|
|
|
extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
|
|
|
|
struct request_sock *req,
|
|
|
|
struct request_sock **prev);
|
|
|
|
|
|
|
|
extern int dccp_child_process(struct sock *parent, struct sock *child,
|
|
|
|
struct sk_buff *skb);
|
|
|
|
extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|
|
|
struct dccp_hdr *dh, unsigned len);
|
|
|
|
extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
|
|
const struct dccp_hdr *dh, const unsigned len);
|
|
|
|
|
2006-03-21 06:00:37 +00:00
|
|
|
extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
|
2008-06-15 00:04:49 +00:00
|
|
|
extern void dccp_destroy_sock(struct sock *sk);
|
2005-12-14 07:24:16 +00:00
|
|
|
|
2005-08-10 03:14:34 +00:00
|
|
|
extern void dccp_close(struct sock *sk, long timeout);
|
|
|
|
extern struct sk_buff *dccp_make_response(struct sock *sk,
|
|
|
|
struct dst_entry *dst,
|
|
|
|
struct request_sock *req);
|
|
|
|
|
|
|
|
extern int dccp_connect(struct sock *sk);
|
|
|
|
extern int dccp_disconnect(struct sock *sk, int flags);
|
|
|
|
extern int dccp_getsockopt(struct sock *sk, int level, int optname,
|
2005-08-14 01:42:25 +00:00
|
|
|
char __user *optval, int __user *optlen);
|
|
|
|
extern int dccp_setsockopt(struct sock *sk, int level, int optname,
|
2009-09-30 23:12:20 +00:00
|
|
|
char __user *optval, unsigned int optlen);
|
2006-03-21 06:45:21 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
extern int compat_dccp_getsockopt(struct sock *sk,
|
|
|
|
int level, int optname,
|
|
|
|
char __user *optval, int __user *optlen);
|
|
|
|
extern int compat_dccp_setsockopt(struct sock *sk,
|
|
|
|
int level, int optname,
|
2009-09-30 23:12:20 +00:00
|
|
|
char __user *optval, unsigned int optlen);
|
2006-03-21 06:45:21 +00:00
|
|
|
#endif
|
2005-08-10 03:14:34 +00:00
|
|
|
extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
|
2005-08-13 23:34:54 +00:00
|
|
|
extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
|
|
|
|
struct msghdr *msg, size_t size);
|
2005-08-10 03:14:34 +00:00
|
|
|
extern int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
|
|
|
|
struct msghdr *msg, size_t len, int nonblock,
|
|
|
|
int flags, int *addr_len);
|
|
|
|
extern void dccp_shutdown(struct sock *sk, int how);
|
2005-12-14 07:24:16 +00:00
|
|
|
extern int inet_dccp_listen(struct socket *sock, int backlog);
|
|
|
|
extern unsigned int dccp_poll(struct file *file, struct socket *sock,
|
|
|
|
poll_table *wait);
|
|
|
|
extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
|
|
|
|
int addr_len);
|
2005-08-10 03:14:34 +00:00
|
|
|
|
2008-04-03 21:20:52 +00:00
|
|
|
extern struct sk_buff *dccp_ctl_make_reset(struct sock *sk,
|
[DCCP]: Factor out common code for generating Resets
This factors code common to dccp_v{4,6}_ctl_send_reset into a separate function,
and adds support for filling in the Data 1 ... Data 3 fields from RFC 4340, 5.6.
It is useful to have this separate, since the following Reset codes will always
be generated from the control socket rather than via dccp_send_reset:
* Code 3, "No Connection", cf. 8.3.1;
* Code 4, "Packet Error" (identification for Data 1 added);
* Code 5, "Option Error" (identification for Data 1..3 added, will be used later);
* Code 6, "Mandatory Error" (same as Option Error);
* Code 7, "Connection Refused" (what on Earth is the difference to "No Connection"?);
* Code 8, "Bad Service Code";
* Code 9, "Too Busy";
* Code 10, "Bad Init Cookie" (not used).
Code 0 is not recommended by the RFC, the following codes would be used in
dccp_send_reset() instead, since they all relate to an established DCCP connection:
* Code 1, "Closed";
* Code 2, "Aborted";
* Code 11, "Aggression Penalty" (12.3).
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
2007-09-26 17:35:19 +00:00
|
|
|
struct sk_buff *skb);
|
2006-03-21 03:25:24 +00:00
|
|
|
extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
|
2005-08-24 04:50:06 +00:00
|
|
|
extern void dccp_send_close(struct sock *sk, const int active);
|
2005-12-14 07:24:16 +00:00
|
|
|
extern int dccp_invalid_packet(struct sk_buff *skb);
|
2007-09-26 05:40:44 +00:00
|
|
|
extern u32 dccp_sample_rtt(struct sock *sk, long delta);
|
2005-12-14 07:24:16 +00:00
|
|
|
|
|
|
|
static inline int dccp_bad_service_code(const struct sock *sk,
|
2006-03-21 03:23:32 +00:00
|
|
|
const __be32 service)
|
2005-12-14 07:24:16 +00:00
|
|
|
{
|
|
|
|
const struct dccp_sock *dp = dccp_sk(sk);
|
|
|
|
|
|
|
|
if (dp->dccps_service == service)
|
|
|
|
return 0;
|
|
|
|
return !dccp_list_has_service(dp->dccps_service_list, service);
|
|
|
|
}
|
2005-08-10 03:14:34 +00:00
|
|
|
|
2007-09-26 14:27:56 +00:00
|
|
|
/**
|
|
|
|
* dccp_skb_cb - DCCP per-packet control information
|
|
|
|
* @dccpd_type: one of %dccp_pkt_type (or unknown)
|
|
|
|
* @dccpd_ccval: CCVal field (5.1), see e.g. RFC 4342, 8.1
|
|
|
|
* @dccpd_reset_code: one of %dccp_reset_codes
|
|
|
|
* @dccpd_reset_data: Data1..3 fields (depend on @dccpd_reset_code)
|
|
|
|
* @dccpd_opt_len: total length of all options (5.8) in the packet
|
|
|
|
* @dccpd_seq: sequence number
|
|
|
|
* @dccpd_ack_seq: acknowledgment number subheader field value
|
|
|
|
* This is used for transmission as well as for reception.
|
|
|
|
*/
|
2005-08-10 03:14:34 +00:00
|
|
|
struct dccp_skb_cb {
|
2008-04-13 01:35:41 +00:00
|
|
|
union {
|
|
|
|
struct inet_skb_parm h4;
|
|
|
|
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
|
|
|
struct inet6_skb_parm h6;
|
|
|
|
#endif
|
|
|
|
} header;
|
2005-09-16 23:58:40 +00:00
|
|
|
__u8 dccpd_type:4;
|
|
|
|
__u8 dccpd_ccval:4;
|
2007-09-26 14:27:56 +00:00
|
|
|
__u8 dccpd_reset_code,
|
|
|
|
dccpd_reset_data[3];
|
2005-09-16 23:58:40 +00:00
|
|
|
__u16 dccpd_opt_len;
|
2005-08-10 03:14:34 +00:00
|
|
|
__u64 dccpd_seq;
|
|
|
|
__u64 dccpd_ack_seq;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define DCCP_SKB_CB(__skb) ((struct dccp_skb_cb *)&((__skb)->cb[0]))
|
|
|
|
|
2007-12-06 14:27:15 +00:00
|
|
|
/* RFC 4340, sec. 7.7 */
|
2005-08-10 03:14:34 +00:00
|
|
|
static inline int dccp_non_data_packet(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
|
|
|
|
|
|
|
|
return type == DCCP_PKT_ACK ||
|
|
|
|
type == DCCP_PKT_CLOSE ||
|
|
|
|
type == DCCP_PKT_CLOSEREQ ||
|
|
|
|
type == DCCP_PKT_RESET ||
|
|
|
|
type == DCCP_PKT_SYNC ||
|
|
|
|
type == DCCP_PKT_SYNCACK;
|
|
|
|
}
|
|
|
|
|
2007-12-06 14:27:15 +00:00
|
|
|
/* RFC 4340, sec. 7.7 */
|
|
|
|
static inline int dccp_data_packet(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
|
|
|
|
|
|
|
|
return type == DCCP_PKT_DATA ||
|
|
|
|
type == DCCP_PKT_DATAACK ||
|
|
|
|
type == DCCP_PKT_REQUEST ||
|
|
|
|
type == DCCP_PKT_RESPONSE;
|
|
|
|
}
|
|
|
|
|
2005-08-10 03:14:34 +00:00
|
|
|
static inline int dccp_packet_without_ack(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
|
|
|
|
|
|
|
|
return type == DCCP_PKT_DATA || type == DCCP_PKT_REQUEST;
|
|
|
|
}
|
|
|
|
|
2007-03-20 15:26:51 +00:00
|
|
|
#define DCCP_PKT_WITHOUT_ACK_SEQ (UINT48_MAX << 2)
|
2005-08-10 03:14:34 +00:00
|
|
|
|
|
|
|
static inline void dccp_hdr_set_seq(struct dccp_hdr *dh, const u64 gss)
|
|
|
|
{
|
2005-08-13 23:34:54 +00:00
|
|
|
struct dccp_hdr_ext *dhx = (struct dccp_hdr_ext *)((void *)dh +
|
|
|
|
sizeof(*dh));
|
2006-03-21 03:23:32 +00:00
|
|
|
dh->dccph_seq2 = 0;
|
|
|
|
dh->dccph_seq = htons((gss >> 32) & 0xfffff);
|
2005-08-10 03:14:34 +00:00
|
|
|
dhx->dccph_seq_low = htonl(gss & 0xffffffff);
|
|
|
|
}
|
|
|
|
|
2005-08-13 23:34:54 +00:00
|
|
|
static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack,
|
|
|
|
const u64 gsr)
|
2005-08-10 03:14:34 +00:00
|
|
|
{
|
2006-03-21 03:23:32 +00:00
|
|
|
dhack->dccph_reserved1 = 0;
|
|
|
|
dhack->dccph_ack_nr_high = htons(gsr >> 32);
|
2005-08-10 03:14:34 +00:00
|
|
|
dhack->dccph_ack_nr_low = htonl(gsr & 0xffffffff);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dccp_update_gsr(struct sock *sk, u64 seq)
|
|
|
|
{
|
|
|
|
struct dccp_sock *dp = dccp_sk(sk);
|
|
|
|
|
2010-12-30 11:15:41 +00:00
|
|
|
if (after48(seq, dp->dccps_gsr))
|
|
|
|
dp->dccps_gsr = seq;
|
2009-01-16 23:36:31 +00:00
|
|
|
/* Sequence validity window depends on remote Sequence Window (7.5.1) */
|
|
|
|
dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
|
2010-10-11 18:35:40 +00:00
|
|
|
/*
|
|
|
|
* Adjust SWL so that it is not below ISR. In contrast to RFC 4340,
|
|
|
|
* 7.5.1 we perform this check beyond the initial handshake: W/W' are
|
|
|
|
* always > 32, so for the first W/W' packets in the lifetime of a
|
|
|
|
* connection we always have to adjust SWL.
|
|
|
|
* A second reason why we are doing this is that the window depends on
|
|
|
|
* the feature-remote value of Sequence Window: nothing stops the peer
|
|
|
|
* from updating this value while we are busy adjusting SWL for the
|
|
|
|
* first W packets (we would have to count from scratch again then).
|
|
|
|
* Therefore it is safer to always make sure that the Sequence Window
|
|
|
|
* is not artificially extended by a peer who grows SWL downwards by
|
|
|
|
* continually updating the feature-remote Sequence-Window.
|
|
|
|
* If sequence numbers wrap it is bad luck. But that will take a while
|
|
|
|
* (48 bit), and this measure prevents Sequence-number attacks.
|
|
|
|
*/
|
|
|
|
if (before48(dp->dccps_swl, dp->dccps_isr))
|
|
|
|
dp->dccps_swl = dp->dccps_isr;
|
2009-01-16 23:36:31 +00:00
|
|
|
dp->dccps_swh = ADD48(dp->dccps_gsr, (3 * dp->dccps_r_seq_win) / 4);
|
2005-08-10 03:14:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dccp_update_gss(struct sock *sk, u64 seq)
|
|
|
|
{
|
|
|
|
struct dccp_sock *dp = dccp_sk(sk);
|
|
|
|
|
2009-01-16 23:36:31 +00:00
|
|
|
dp->dccps_gss = seq;
|
|
|
|
/* Ack validity window depends on local Sequence Window value (7.5.1) */
|
|
|
|
dp->dccps_awl = SUB48(ADD48(dp->dccps_gss, 1), dp->dccps_l_seq_win);
|
2010-10-11 18:35:40 +00:00
|
|
|
/* Adjust AWL so that it is not below ISS - see comment above for SWL */
|
|
|
|
if (before48(dp->dccps_awl, dp->dccps_iss))
|
|
|
|
dp->dccps_awl = dp->dccps_iss;
|
2009-01-16 23:36:31 +00:00
|
|
|
dp->dccps_awh = dp->dccps_gss;
|
2005-08-10 03:14:34 +00:00
|
|
|
}
|
2007-02-09 14:24:38 +00:00
|
|
|
|
2010-11-10 20:21:35 +00:00
|
|
|
static inline int dccp_ackvec_pending(const struct sock *sk)
|
|
|
|
{
|
|
|
|
return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL &&
|
|
|
|
!dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec);
|
|
|
|
}
|
|
|
|
|
2005-09-18 07:17:51 +00:00
|
|
|
static inline int dccp_ack_pending(const struct sock *sk)
|
|
|
|
{
|
2010-11-10 20:21:35 +00:00
|
|
|
return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
|
2005-09-18 07:17:51 +00:00
|
|
|
}
|
2005-08-10 03:14:34 +00:00
|
|
|
|
2011-07-25 02:22:29 +00:00
|
|
|
extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
|
dccp: Resolve dependencies of features on choice of CCID
This provides a missing link in the code chain, as several features implicitly
depend and/or rely on the choice of CCID. Most notably, this is the Send Ack Vector
feature, but also Ack Ratio and Send Loss Event Rate (also taken care of).
For Send Ack Vector, the situation is as follows:
* since CCID2 mandates the use of Ack Vectors, there is no point in allowing
endpoints which use CCID2 to disable Ack Vector features such a connection;
* a peer with a TX CCID of CCID2 will always expect Ack Vectors, and a peer
with a RX CCID of CCID2 must always send Ack Vectors (RFC 4341, sec. 4);
* for all other CCIDs, the use of (Send) Ack Vector is optional and thus
negotiable. However, this implies that the code negotiating the use of Ack
Vectors also supports it (i.e. is able to supply and to either parse or
ignore received Ack Vectors). Since this is not the case (CCID-3 has no Ack
Vector support), the use of Ack Vectors is here disabled, with a comment
in the source code.
An analogous consideration arises for the Send Loss Event Rate feature,
since the CCID-3 implementation does not support the loss interval options
of RFC 4342. To make such use explicit, corresponding feature-negotiation
options are inserted which signal the use of the loss event rate option,
as it is used by the CCID3 code.
Lastly, the values of the Ack Ratio feature are matched to the choice of CCID.
The patch implements this as a function which is called after the user has
made all other registrations for changing default values of features.
The table is variable-length, the reserved (and hence for feature-negotiation
invalid, confirmed by considering section 19.4 of RFC 4340) feature number `0'
is used to mark the end of the table.
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Acked-by: Ian McDonald <ian.mcdonald@jandi.co.nz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-11-12 08:48:44 +00:00
|
|
|
extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
|
2008-11-17 06:49:52 +00:00
|
|
|
extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
|
2008-12-02 07:27:31 +00:00
|
|
|
extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
|
|
|
|
struct sk_buff *skb);
|
2008-12-02 07:34:01 +00:00
|
|
|
extern int dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
|
2008-11-05 07:54:04 +00:00
|
|
|
extern void dccp_feat_list_purge(struct list_head *fn_list);
|
|
|
|
|
2006-03-21 06:32:06 +00:00
|
|
|
extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
|
2007-12-13 14:38:11 +00:00
|
|
|
extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*);
|
2010-06-22 01:14:34 +00:00
|
|
|
extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
|
2007-09-26 05:40:13 +00:00
|
|
|
extern u32 dccp_timestamp(void);
|
|
|
|
extern void dccp_timestamping_init(void);
|
2010-06-22 01:14:34 +00:00
|
|
|
extern int dccp_insert_option(struct sk_buff *skb, unsigned char option,
|
|
|
|
const void *value, unsigned char len);
|
2005-08-10 03:14:34 +00:00
|
|
|
|
2006-03-21 03:25:02 +00:00
|
|
|
#ifdef CONFIG_SYSCTL
|
|
|
|
extern int dccp_sysctl_init(void);
|
|
|
|
extern void dccp_sysctl_exit(void);
|
|
|
|
#else
|
|
|
|
static inline int dccp_sysctl_init(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dccp_sysctl_exit(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-08-10 03:14:34 +00:00
|
|
|
#endif /* _DCCP_H */
|