import (
"bufio"
"bytes"
- "compress/gzip"
"errors"
"fmt"
+ "internal/godebug"
"io"
- "io/ioutil"
"net/http/httptrace"
"net/http/internal"
+ "net/http/internal/ascii"
"net/textproto"
"reflect"
"sort"
ByteReadCh chan readResult // non-nil if probeRequestBody called
}
-func newTransferWriter(r interface{}) (t *transferWriter, err error) {
+func newTransferWriter(r any) (t *transferWriter, err error) {
t = &transferWriter{}
// Extract relevant fields
// servers. See Issue 18257, as one example.
//
// The only reason we'd send such a request is if the user set the Body to a
-// non-nil value (say, ioutil.NopCloser(bytes.NewReader(nil))) and didn't
+// non-nil value (say, io.NopCloser(bytes.NewReader(nil))) and didn't
// set ContentLength, or NewRequest set it to -1 (unknown), so then we assume
// there's bytes to send.
//
// headers before the pipe is fed data), we need to be careful and bound how
// long we wait for it. This delay will only affect users if all the following
// are true:
-// * the request body blocks
-// * the content length is not set (or set to -1)
-// * the method doesn't usually have a body (GET, HEAD, DELETE, ...)
-// * there is no transfer-encoding=chunked already set.
+// - the request body blocks
+// - the content length is not set (or set to -1)
+// - the method doesn't usually have a body (GET, HEAD, DELETE, ...)
+// - there is no transfer-encoding=chunked already set.
+//
// In other words, this delay will not normally affect anybody, and there
// are workarounds if it does.
func (t *transferWriter) probeRequestBody() {
rres.b = buf[0]
}
t.ByteReadCh <- rres
+ close(t.ByteReadCh)
}(t.Body)
timer := time.NewTimer(200 * time.Millisecond)
select {
return false
}
// Many servers expect a Content-Length for these methods
- if t.Method == "POST" || t.Method == "PUT" {
+ if t.Method == "POST" || t.Method == "PUT" || t.Method == "PATCH" {
return true
}
if t.ContentLength == 0 && isIdentity(t.TransferEncoding) {
k = CanonicalHeaderKey(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
- return &badStringError{"invalid Trailer key", k}
+ return badStringError("invalid Trailer key", k)
}
keys = append(keys, k)
}
return nil
}
-func (t *transferWriter) writeBody(w io.Writer) error {
- var err error
+// always closes t.BodyCloser
+func (t *transferWriter) writeBody(w io.Writer) (err error) {
var ncopy int64
+ closed := false
+ defer func() {
+ if closed || t.BodyCloser == nil {
+ return
+ }
+ if closeErr := t.BodyCloser.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ }()
// Write body. We "unwrap" the body first if it was wrapped in a
- // nopCloser. This is to ensure that we can take advantage of
+ // nopCloser or readTrackingBody. This is to ensure that we can take advantage of
// OS-level optimizations in the event that the body is an
// *os.File.
if t.Body != nil {
return err
}
var nextra int64
- nextra, err = t.doBodyCopy(ioutil.Discard, body)
+ nextra, err = t.doBodyCopy(io.Discard, body)
ncopy += nextra
}
if err != nil {
}
}
if t.BodyCloser != nil {
+ closed = true
if err := t.BodyCloser.Close(); err != nil {
return err
}
//
// This function is only intended for use in writeBody.
func (t *transferWriter) doBodyCopy(dst io.Writer, src io.Reader) (n int64, err error) {
- n, err = io.Copy(dst, src)
+ bufp := copyBufPool.Get().(*[]byte)
+ buf := *bufp
+ defer copyBufPool.Put(bufp)
+
+ n, err = io.CopyBuffer(dst, src, buf)
if err != nil && err != io.EOF {
t.bodyReadError = err
}
return
}
-// unwrapBodyReader unwraps the body's inner reader if it's a
+// unwrapBody unwraps the body's inner reader if it's a
// nopCloser. This is to ensure that body writes sourced from local
// files (*os.File types) are properly optimized.
//
// This function is only intended for use in writeBody.
func (t *transferWriter) unwrapBody() io.Reader {
- if reflect.TypeOf(t.Body) == nopCloserType {
- return reflect.ValueOf(t.Body).Field(0).Interface().(io.Reader)
+ if r, ok := unwrapNopCloser(t.Body); ok {
+ return r
+ }
+ if r, ok := t.Body.(*readTrackingBody); ok {
+ r.didRead = true
+ return r.ReadCloser
}
-
return t.Body
}
ProtoMajor int
ProtoMinor int
// Output
- Body io.ReadCloser
- ContentLength int64
- TransferEncoding []string
- Close bool
- Trailer Header
+ Body io.ReadCloser
+ ContentLength int64
+ Chunked bool
+ Close bool
+ Trailer Header
}
func (t *transferReader) protoAtLeast(m, n int) bool {
var (
suppressedHeaders304 = []string{"Content-Type", "Content-Length", "Transfer-Encoding"}
suppressedHeadersNoBody = []string{"Content-Length", "Transfer-Encoding"}
+ excludedHeadersNoBody = map[string]bool{"Content-Length": true, "Transfer-Encoding": true}
)
func suppressedHeaders(status int) []string {
return nil
}
-// proxyingReadCloser is a composite type that accepts and proxies
-// io.Read and io.Close calls to its respective Reader and Closer.
-//
-// It is composed of:
-// a) a top-level reader e.g. the result of decompression
-// b) a symbolic Closer e.g. the result of decompression, the
-// original body and the connection itself.
-type proxyingReadCloser struct {
- io.Reader
- io.Closer
-}
-
-// multiCloser implements io.Closer and allows a bunch of io.Closer values
-// to all be closed once.
-// Example usage is with proxyingReadCloser if we are decompressing a response
-// body on the fly and would like to close both *gzip.Reader and underlying body.
-type multiCloser []io.Closer
-
-func (mc multiCloser) Close() error {
- var err error
- for _, c := range mc {
- if err1 := c.Close(); err1 != nil && err == nil {
- err = err1
- }
- }
- return err
-}
-
// msg is *Request or *Response.
-func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
+func readTransfer(msg any, r *bufio.Reader) (err error) {
t := &transferReader{RequestMethod: "GET"}
// Unify input
t.ProtoMajor, t.ProtoMinor = 1, 1
}
- // Transfer encoding, content length
- err = t.fixTransferEncoding()
- if err != nil {
+ // Transfer-Encoding: chunked, and overriding Content-Length.
+ if err := t.parseTransferEncoding(); err != nil {
return err
}
- realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding)
+ realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked)
if err != nil {
return err
}
if isResponse && t.RequestMethod == "HEAD" {
- if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil {
+ if n, err := parseContentLength(t.Header["Content-Length"]); err != nil {
return err
} else {
t.ContentLength = n
}
// Trailer
- t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding)
+ t.Trailer, err = fixTrailer(t.Header, t.Chunked)
if err != nil {
return err
}
// See RFC 7230, section 3.3.
switch msg.(type) {
case *Response:
- if realLength == -1 &&
- !chunked(t.TransferEncoding) &&
- bodyAllowedForStatus(t.StatusCode) {
+ if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) {
// Unbounded body.
t.Close = true
}
// Prepare body reader. ContentLength < 0 means chunked encoding
// or close connection when finished, since multipart is not supported yet
switch {
- case chunked(t.TransferEncoding) || implicitlyChunked(t.TransferEncoding):
- if noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) {
+ case t.Chunked:
+ if isResponse && (noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode)) {
t.Body = NoBody
} else {
t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close}
}
}
- // Finally if "gzip" was one of the requested transfer-encodings,
- // we'll unzip the concatenated body/payload of the request.
- // TODO: As we support more transfer-encodings, extract
- // this code and apply the un-codings in reverse.
- if t.Body != NoBody && gzipped(t.TransferEncoding) {
- zr, err := gzip.NewReader(t.Body)
- if err != nil {
- return fmt.Errorf("http: failed to gunzip body: %v", err)
- }
- t.Body = &proxyingReadCloser{
- Reader: zr,
- Closer: multiCloser{zr, t.Body},
- }
- }
-
// Unify output
switch rr := msg.(type) {
case *Request:
rr.Body = t.Body
rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
rr.Close = t.Close
rr.Trailer = t.Trailer
case *Response:
rr.Body = t.Body
rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
rr.Close = t.Close
rr.Trailer = t.Trailer
}
return nil
}
-// Checks whether chunked is the last part of the encodings stack
-func chunked(te []string) bool { return len(te) > 0 && te[len(te)-1] == "chunked" }
-
-// implicitlyChunked is a helper to check for implicity of chunked, because
-// RFC 7230 Section 3.3.1 says that the sender MUST apply chunked as the final
-// payload body to ensure that the message is framed for both the request
-// and the body. Since "identity" is incompatabile with any other transformational
-// encoding cannot co-exist, the presence of "identity" will cause implicitlyChunked
-// to return false.
-func implicitlyChunked(te []string) bool {
- if len(te) == 0 { // No transfer-encodings passed in, so not implicity chunked.
- return false
- }
- for _, tei := range te {
- if tei == "identity" {
- return false
- }
- }
- return true
-}
-
-func isGzipTransferEncoding(tei string) bool {
- // RFC 7230 4.2.3 requests that "x-gzip" SHOULD be considered the same as "gzip".
- return tei == "gzip" || tei == "x-gzip"
-}
-
-// Checks where either of "gzip" or "x-gzip" are contained in transfer encodings.
-func gzipped(te []string) bool {
- for _, tei := range te {
- if isGzipTransferEncoding(tei) {
- return true
- }
- }
- return false
-}
+// Checks whether chunked is part of the encodings stack.
+func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
// Checks whether the encoding is explicitly "identity".
func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
return ok
}
-// fixTransferEncoding sanitizes t.TransferEncoding, if needed.
-func (t *transferReader) fixTransferEncoding() error {
+// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header.
+func (t *transferReader) parseTransferEncoding() error {
raw, present := t.Header["Transfer-Encoding"]
if !present {
return nil
return nil
}
- encodings := strings.Split(raw[0], ",")
- te := make([]string, 0, len(encodings))
-
- // When adding new encodings, please maintain the invariant:
- // if chunked encoding is present, it must always
- // come last and it must be applied only once.
- // See RFC 7230 Section 3.3.1 Transfer-Encoding.
- for i, encoding := range encodings {
- encoding = strings.ToLower(strings.TrimSpace(encoding))
-
- if encoding == "identity" {
- // "identity" should not be mixed with other transfer-encodings/compressions
- // because it means "no compression, no transformation".
- if len(encodings) != 1 {
- return &badStringError{`"identity" when present must be the only transfer encoding`, strings.Join(encodings, ",")}
- }
- // "identity" is not recorded.
- break
- }
-
- switch {
- case encoding == "chunked":
- // "chunked" MUST ALWAYS be the last
- // encoding as per the loop invariant.
- // That is:
- // Invalid: [chunked, gzip]
- // Valid: [gzip, chunked]
- if i+1 != len(encodings) {
- return &badStringError{"chunked must be applied only once, as the last encoding", strings.Join(encodings, ",")}
- }
- // Supported otherwise.
-
- case isGzipTransferEncoding(encoding):
- // Supported
-
- default:
- return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", encoding)}
- }
-
- te = te[0 : len(te)+1]
- te[len(te)-1] = encoding
- }
-
- if len(te) > 0 {
- // RFC 7230 3.3.2 says "A sender MUST NOT send a
- // Content-Length header field in any message that
- // contains a Transfer-Encoding header field."
- //
- // but also:
- // "If a message is received with both a
- // Transfer-Encoding and a Content-Length header
- // field, the Transfer-Encoding overrides the
- // Content-Length. Such a message might indicate an
- // attempt to perform request smuggling (Section 9.5)
- // or response splitting (Section 9.4) and ought to be
- // handled as an error. A sender MUST remove the
- // received Content-Length field prior to forwarding
- // such a message downstream."
- //
- // Reportedly, these appear in the wild.
- delete(t.Header, "Content-Length")
- t.TransferEncoding = te
- return nil
+ // Like nginx, we only support a single Transfer-Encoding header field, and
+ // only if set to "chunked". This is one of the most security sensitive
+ // surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it
+ // strict and simple.
+ if len(raw) != 1 {
+ return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)}
+ }
+ if !ascii.EqualFold(raw[0], "chunked") {
+ return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])}
}
+ // RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field
+ // in any message that contains a Transfer-Encoding header field."
+ //
+ // but also: "If a message is received with both a Transfer-Encoding and a
+ // Content-Length header field, the Transfer-Encoding overrides the
+ // Content-Length. Such a message might indicate an attempt to perform
+ // request smuggling (Section 9.5) or response splitting (Section 9.4) and
+ // ought to be handled as an error. A sender MUST remove the received
+ // Content-Length field prior to forwarding such a message downstream."
+ //
+ // Reportedly, these appear in the wild.
+ delete(t.Header, "Content-Length")
+
+ t.Chunked = true
return nil
}
// Determine the expected body length, using RFC 7230 Section 3.3. This
// function is not a method, because ultimately it should be shared by
// ReadResponse and ReadRequest.
-func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) {
+func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (int64, error) {
isRequest := !isResponse
contentLens := header["Content-Length"]
// Content-Length headers if they differ in value.
// If there are dups of the value, remove the dups.
// See Issue 16490.
- first := strings.TrimSpace(contentLens[0])
+ first := textproto.TrimString(contentLens[0])
for _, ct := range contentLens[1:] {
- if first != strings.TrimSpace(ct) {
+ if first != textproto.TrimString(ct) {
return 0, fmt.Errorf("http: message cannot contain multiple Content-Length headers; got %q", contentLens)
}
}
}
// Logic based on response type or status
- if noResponseBodyExpected(requestMethod) {
- // For HTTP requests, as part of hardening against request
- // smuggling (RFC 7230), don't allow a Content-Length header for
- // methods which don't permit bodies. As an exception, allow
- // exactly one Content-Length header if its value is "0".
- if isRequest && len(contentLens) > 0 && !(len(contentLens) == 1 && contentLens[0] == "0") {
- return 0, fmt.Errorf("http: method cannot contain a Content-Length; got %q", contentLens)
- }
+ if isResponse && noResponseBodyExpected(requestMethod) {
return 0, nil
}
if status/100 == 1 {
}
// Logic based on Transfer-Encoding
- if chunked(te) {
+ if chunked {
return -1, nil
}
- // Logic based on Content-Length
- var cl string
- if len(contentLens) == 1 {
- cl = strings.TrimSpace(contentLens[0])
- }
- if cl != "" {
- n, err := parseContentLength(cl)
+ if len(contentLens) > 0 {
+ // Logic based on Content-Length
+ n, err := parseContentLength(contentLens)
if err != nil {
return -1, err
}
return n, nil
}
+
header.Del("Content-Length")
if isRequest {
// Determine whether to hang up after sending a request and body, or
// receiving a response and body
-// 'header' is the request headers
+// 'header' is the request headers.
func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
if major < 1 {
return true
return hasClose
}
-// Parse the trailer header
-func fixTrailer(header Header, te []string) (Header, error) {
+// Parse the trailer header.
+func fixTrailer(header Header, chunked bool) (Header, error) {
vv, ok := header["Trailer"]
if !ok {
return nil, nil
}
- if !chunked(te) {
+ if !chunked {
// Trailer and no chunking:
// this is an invalid use case for trailer header.
// Nevertheless, no error will be returned and we
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
if err == nil {
- err = &badStringError{"bad trailer key", key}
+ err = badStringError("bad trailer key", key)
return
}
}
// and then reads the trailer if necessary.
type body struct {
src io.Reader
- hdr interface{} // non-nil (Response or Request) value means read trailer
+ hdr any // non-nil (Response or Request) value means read trailer
r *bufio.Reader // underlying wire-format reader for the trailer
closing bool // is the connection to be closed after reading body?
doEarlyClose bool // whether Close should stop early
var n int64
// Consume the body, or, which will also lead to us reading
// the trailer headers after the body, if present.
- n, err = io.CopyN(ioutil.Discard, bodyLocked{b}, maxPostHandlerReadBytes)
+ n, err = io.CopyN(io.Discard, bodyLocked{b}, maxPostHandlerReadBytes)
if err == io.EOF {
err = nil
}
default:
// Fully consume the body, which will also lead to us reading
// the trailer headers after the body, if present.
- _, err = io.Copy(ioutil.Discard, bodyLocked{b})
+ _, err = io.Copy(io.Discard, bodyLocked{b})
}
b.closed = true
return err
b.onHitEOF = fn
}
-// bodyLocked is a io.Reader reading from a *body when its mutex is
+// bodyLocked is an io.Reader reading from a *body when its mutex is
// already held.
type bodyLocked struct {
b *body
return bl.b.readLocked(p)
}
-// parseContentLength trims whitespace from s and returns -1 if no value
-// is set, or the value if it's >= 0.
-func parseContentLength(cl string) (int64, error) {
- cl = strings.TrimSpace(cl)
- if cl == "" {
+var laxContentLength = godebug.New("httplaxcontentlength")
+
+// parseContentLength checks that the header is valid and then trims
+// whitespace. It returns -1 if no value is set otherwise the value
+// if it's >= 0.
+func parseContentLength(clHeaders []string) (int64, error) {
+ if len(clHeaders) == 0 {
return -1, nil
}
- n, err := strconv.ParseInt(cl, 10, 64)
- if err != nil || n < 0 {
- return 0, &badStringError{"bad Content-Length", cl}
- }
- return n, nil
+ cl := textproto.TrimString(clHeaders[0])
+ // The Content-Length must be a valid numeric value.
+ // See: https://datatracker.ietf.org/doc/html/rfc2616/#section-14.13
+ if cl == "" {
+ if laxContentLength.Value() == "1" {
+ laxContentLength.IncNonDefault()
+ return -1, nil
+ }
+ return 0, badStringError("invalid empty Content-Length", cl)
+ }
+ n, err := strconv.ParseUint(cl, 10, 63)
+ if err != nil {
+ return 0, badStringError("bad Content-Length", cl)
+ }
+ return int64(n), nil
}
// finishAsyncByteRead finishes reading the 1-byte sniff
if n == 1 {
p[0] = rres.b
}
+ if err == nil {
+ err = io.EOF
+ }
return
}
-var nopCloserType = reflect.TypeOf(ioutil.NopCloser(nil))
+var nopCloserType = reflect.TypeOf(io.NopCloser(nil))
+var nopCloserWriterToType = reflect.TypeOf(io.NopCloser(struct {
+ io.Reader
+ io.WriterTo
+}{}))
+
+// unwrapNopCloser return the underlying reader and true if r is a NopCloser
+// else it return false.
+func unwrapNopCloser(r io.Reader) (underlyingReader io.Reader, isNopCloser bool) {
+ switch reflect.TypeOf(r) {
+ case nopCloserType, nopCloserWriterToType:
+ return reflect.ValueOf(r).Field(0).Interface().(io.Reader), true
+ default:
+ return nil, false
+ }
+}
// isKnownInMemoryReader reports whether r is a type known to not
// block on Read. Its caller uses this as an optional optimization to
case *bytes.Reader, *bytes.Buffer, *strings.Reader:
return true
}
- if reflect.TypeOf(r) == nopCloserType {
- return isKnownInMemoryReader(reflect.ValueOf(r).Field(0).Interface().(io.Reader))
+ if r, ok := unwrapNopCloser(r); ok {
+ return isKnownInMemoryReader(r)
+ }
+ if r, ok := r.(*readTrackingBody); ok {
+ return isKnownInMemoryReader(r.ReadCloser)
}
return false
}