"bytes"
"errors"
"fmt"
+ "internal/godebug"
"io"
- "io/ioutil"
+ "net/http/httptrace"
"net/http/internal"
+ "net/http/internal/ascii"
"net/textproto"
+ "reflect"
"sort"
"strconv"
"strings"
"sync"
"time"
- "golang_org/x/net/http/httpguts"
+ "golang.org/x/net/http/httpguts"
)
// ErrLineTooLong is returned when reading request or response bodies
return 1, io.EOF
}
-// transferBodyReader is an io.Reader that reads from tw.Body
-// and records any non-EOF error in tw.bodyReadError.
-// It is exactly 1 pointer wide to avoid allocations into interfaces.
-type transferBodyReader struct{ tw *transferWriter }
-
-func (br transferBodyReader) Read(p []byte) (n int, err error) {
- n, err = br.tw.Body.Read(p)
- if err != nil && err != io.EOF {
- br.tw.bodyReadError = err
- }
- return
-}
-
// transferWriter inspects the fields of a user-supplied Request or Response,
// sanitizes them without changing the user object and provides methods for
// writing the respective header, body and trailer in wire format.
ByteReadCh chan readResult // non-nil if probeRequestBody called
}
-func newTransferWriter(r interface{}) (t *transferWriter, err error) {
+func newTransferWriter(r any) (t *transferWriter, err error) {
t = &transferWriter{}
// Extract relevant fields
if t.ContentLength < 0 && len(t.TransferEncoding) == 0 && t.shouldSendChunkedRequestBody() {
t.TransferEncoding = []string{"chunked"}
}
+ // If there's a body, conservatively flush the headers
+ // to any bufio.Writer we're writing to, just in case
+ // the server needs the headers early, before we copy
+ // the body and possibly block. We make an exception
+ // for the common standard library in-memory types,
+ // though, to avoid unnecessary TCP packets on the
+ // wire. (Issue 22088.)
+ if t.ContentLength != 0 && !isKnownInMemoryReader(t.Body) {
+ t.FlushHeaders = true
+ }
+
atLeastHTTP11 = true // Transport requests are always 1.1 or 2.0
case *Response:
t.IsResponse = true
// servers. See Issue 18257, as one example.
//
// The only reason we'd send such a request is if the user set the Body to a
-// non-nil value (say, ioutil.NopCloser(bytes.NewReader(nil))) and didn't
+// non-nil value (say, io.NopCloser(bytes.NewReader(nil))) and didn't
// set ContentLength, or NewRequest set it to -1 (unknown), so then we assume
// there's bytes to send.
//
if t.ContentLength >= 0 || t.Body == nil { // redundant checks; caller did them
return false
}
+ if t.Method == "CONNECT" {
+ return false
+ }
if requestMethodUsuallyLacksBody(t.Method) {
// Only probe the Request.Body for GET/HEAD/DELETE/etc
// requests, because it's only those types of requests
// headers before the pipe is fed data), we need to be careful and bound how
// long we wait for it. This delay will only affect users if all the following
// are true:
-// * the request body blocks
-// * the content length is not set (or set to -1)
-// * the method doesn't usually have a body (GET, HEAD, DELETE, ...)
-// * there is no transfer-encoding=chunked already set.
+// - the request body blocks
+// - the content length is not set (or set to -1)
+// - the method doesn't usually have a body (GET, HEAD, DELETE, ...)
+// - there is no transfer-encoding=chunked already set.
+//
// In other words, this delay will not normally affect anybody, and there
// are workarounds if it does.
func (t *transferWriter) probeRequestBody() {
rres.b = buf[0]
}
t.ByteReadCh <- rres
+ close(t.ByteReadCh)
}(t.Body)
timer := time.NewTimer(200 * time.Millisecond)
select {
return false
}
// Many servers expect a Content-Length for these methods
- if t.Method == "POST" || t.Method == "PUT" {
+ if t.Method == "POST" || t.Method == "PUT" || t.Method == "PATCH" {
return true
}
if t.ContentLength == 0 && isIdentity(t.TransferEncoding) {
return false
}
-func (t *transferWriter) WriteHeader(w io.Writer) error {
+func (t *transferWriter) writeHeader(w io.Writer, trace *httptrace.ClientTrace) error {
if t.Close && !hasToken(t.Header.get("Connection"), "close") {
if _, err := io.WriteString(w, "Connection: close\r\n"); err != nil {
return err
}
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Connection", []string{"close"})
+ }
}
// Write Content-Length and/or Transfer-Encoding whose values are a
if _, err := io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n"); err != nil {
return err
}
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Content-Length", []string{strconv.FormatInt(t.ContentLength, 10)})
+ }
} else if chunked(t.TransferEncoding) {
if _, err := io.WriteString(w, "Transfer-Encoding: chunked\r\n"); err != nil {
return err
}
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Transfer-Encoding", []string{"chunked"})
+ }
}
// Write Trailer header
k = CanonicalHeaderKey(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
- return &badStringError{"invalid Trailer key", k}
+ return badStringError("invalid Trailer key", k)
}
keys = append(keys, k)
}
if _, err := io.WriteString(w, "Trailer: "+strings.Join(keys, ",")+"\r\n"); err != nil {
return err
}
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Trailer", keys)
+ }
}
}
return nil
}
-func (t *transferWriter) WriteBody(w io.Writer) error {
- var err error
+// always closes t.BodyCloser
+func (t *transferWriter) writeBody(w io.Writer) (err error) {
var ncopy int64
+ closed := false
+ defer func() {
+ if closed || t.BodyCloser == nil {
+ return
+ }
+ if closeErr := t.BodyCloser.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ }()
- // Write body
+ // Write body. We "unwrap" the body first if it was wrapped in a
+ // nopCloser or readTrackingBody. This is to ensure that we can take advantage of
+ // OS-level optimizations in the event that the body is an
+ // *os.File.
if t.Body != nil {
- var body = transferBodyReader{t}
+ var body = t.unwrapBody()
if chunked(t.TransferEncoding) {
if bw, ok := w.(*bufio.Writer); ok && !t.IsResponse {
w = &internal.FlushAfterChunkWriter{Writer: bw}
}
cw := internal.NewChunkedWriter(w)
- _, err = io.Copy(cw, body)
+ _, err = t.doBodyCopy(cw, body)
if err == nil {
err = cw.Close()
}
} else if t.ContentLength == -1 {
- ncopy, err = io.Copy(w, body)
+ dst := w
+ if t.Method == "CONNECT" {
+ dst = bufioFlushWriter{dst}
+ }
+ ncopy, err = t.doBodyCopy(dst, body)
} else {
- ncopy, err = io.Copy(w, io.LimitReader(body, t.ContentLength))
+ ncopy, err = t.doBodyCopy(w, io.LimitReader(body, t.ContentLength))
if err != nil {
return err
}
var nextra int64
- nextra, err = io.Copy(ioutil.Discard, body)
+ nextra, err = t.doBodyCopy(io.Discard, body)
ncopy += nextra
}
if err != nil {
}
}
if t.BodyCloser != nil {
+ closed = true
if err := t.BodyCloser.Close(); err != nil {
return err
}
return err
}
+// doBodyCopy wraps a copy operation, with any resulting error also
+// being saved in bodyReadError.
+//
+// This function is only intended for use in writeBody.
+func (t *transferWriter) doBodyCopy(dst io.Writer, src io.Reader) (n int64, err error) {
+ bufp := copyBufPool.Get().(*[]byte)
+ buf := *bufp
+ defer copyBufPool.Put(bufp)
+
+ n, err = io.CopyBuffer(dst, src, buf)
+ if err != nil && err != io.EOF {
+ t.bodyReadError = err
+ }
+ return
+}
+
+// unwrapBody unwraps the body's inner reader if it's a
+// nopCloser. This is to ensure that body writes sourced from local
+// files (*os.File types) are properly optimized.
+//
+// This function is only intended for use in writeBody.
+func (t *transferWriter) unwrapBody() io.Reader {
+ if r, ok := unwrapNopCloser(t.Body); ok {
+ return r
+ }
+ if r, ok := t.Body.(*readTrackingBody); ok {
+ r.didRead = true
+ return r.ReadCloser
+ }
+ return t.Body
+}
+
type transferReader struct {
// Input
Header Header
ProtoMajor int
ProtoMinor int
// Output
- Body io.ReadCloser
- ContentLength int64
- TransferEncoding []string
- Close bool
- Trailer Header
+ Body io.ReadCloser
+ ContentLength int64
+ Chunked bool
+ Close bool
+ Trailer Header
}
func (t *transferReader) protoAtLeast(m, n int) bool {
var (
suppressedHeaders304 = []string{"Content-Type", "Content-Length", "Transfer-Encoding"}
suppressedHeadersNoBody = []string{"Content-Length", "Transfer-Encoding"}
+ excludedHeadersNoBody = map[string]bool{"Content-Length": true, "Transfer-Encoding": true}
)
func suppressedHeaders(status int) []string {
}
// msg is *Request or *Response.
-func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
+func readTransfer(msg any, r *bufio.Reader) (err error) {
t := &transferReader{RequestMethod: "GET"}
// Unify input
t.ProtoMajor, t.ProtoMinor = 1, 1
}
- // Transfer encoding, content length
- err = t.fixTransferEncoding()
- if err != nil {
+ // Transfer-Encoding: chunked, and overriding Content-Length.
+ if err := t.parseTransferEncoding(); err != nil {
return err
}
- realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding)
+ realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked)
if err != nil {
return err
}
if isResponse && t.RequestMethod == "HEAD" {
- if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil {
+ if n, err := parseContentLength(t.Header["Content-Length"]); err != nil {
return err
} else {
t.ContentLength = n
}
// Trailer
- t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding)
+ t.Trailer, err = fixTrailer(t.Header, t.Chunked)
if err != nil {
return err
}
// See RFC 7230, section 3.3.
switch msg.(type) {
case *Response:
- if realLength == -1 &&
- !chunked(t.TransferEncoding) &&
- bodyAllowedForStatus(t.StatusCode) {
+ if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) {
// Unbounded body.
t.Close = true
}
// Prepare body reader. ContentLength < 0 means chunked encoding
// or close connection when finished, since multipart is not supported yet
switch {
- case chunked(t.TransferEncoding):
- if noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) {
+ case t.Chunked:
+ if isResponse && (noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode)) {
t.Body = NoBody
} else {
t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close}
case *Request:
rr.Body = t.Body
rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
rr.Close = t.Close
rr.Trailer = t.Trailer
case *Response:
rr.Body = t.Body
rr.ContentLength = t.ContentLength
- rr.TransferEncoding = t.TransferEncoding
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
rr.Close = t.Close
rr.Trailer = t.Trailer
}
return nil
}
-// Checks whether chunked is part of the encodings stack
+// Checks whether chunked is part of the encodings stack.
func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
// Checks whether the encoding is explicitly "identity".
func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
-// fixTransferEncoding sanitizes t.TransferEncoding, if needed.
-func (t *transferReader) fixTransferEncoding() error {
+// unsupportedTEError reports unsupported transfer-encodings.
+type unsupportedTEError struct {
+ err string
+}
+
+func (uste *unsupportedTEError) Error() string {
+ return uste.err
+}
+
+// isUnsupportedTEError checks if the error is of type
+// unsupportedTEError. It is usually invoked with a non-nil err.
+func isUnsupportedTEError(err error) bool {
+ _, ok := err.(*unsupportedTEError)
+ return ok
+}
+
+// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header.
+func (t *transferReader) parseTransferEncoding() error {
raw, present := t.Header["Transfer-Encoding"]
if !present {
return nil
return nil
}
- encodings := strings.Split(raw[0], ",")
- te := make([]string, 0, len(encodings))
- // TODO: Even though we only support "identity" and "chunked"
- // encodings, the loop below is designed with foresight. One
- // invariant that must be maintained is that, if present,
- // chunked encoding must always come first.
- for _, encoding := range encodings {
- encoding = strings.ToLower(strings.TrimSpace(encoding))
- // "identity" encoding is not recorded
- if encoding == "identity" {
- break
- }
- if encoding != "chunked" {
- return &badStringError{"unsupported transfer encoding", encoding}
- }
- te = te[0 : len(te)+1]
- te[len(te)-1] = encoding
- }
- if len(te) > 1 {
- return &badStringError{"too many transfer encodings", strings.Join(te, ",")}
- }
- if len(te) > 0 {
- // RFC 7230 3.3.2 says "A sender MUST NOT send a
- // Content-Length header field in any message that
- // contains a Transfer-Encoding header field."
- //
- // but also:
- // "If a message is received with both a
- // Transfer-Encoding and a Content-Length header
- // field, the Transfer-Encoding overrides the
- // Content-Length. Such a message might indicate an
- // attempt to perform request smuggling (Section 9.5)
- // or response splitting (Section 9.4) and ought to be
- // handled as an error. A sender MUST remove the
- // received Content-Length field prior to forwarding
- // such a message downstream."
- //
- // Reportedly, these appear in the wild.
- delete(t.Header, "Content-Length")
- t.TransferEncoding = te
- return nil
+ // Like nginx, we only support a single Transfer-Encoding header field, and
+ // only if set to "chunked". This is one of the most security sensitive
+ // surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it
+ // strict and simple.
+ if len(raw) != 1 {
+ return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)}
+ }
+ if !ascii.EqualFold(raw[0], "chunked") {
+ return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])}
}
+ // RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field
+ // in any message that contains a Transfer-Encoding header field."
+ //
+ // but also: "If a message is received with both a Transfer-Encoding and a
+ // Content-Length header field, the Transfer-Encoding overrides the
+ // Content-Length. Such a message might indicate an attempt to perform
+ // request smuggling (Section 9.5) or response splitting (Section 9.4) and
+ // ought to be handled as an error. A sender MUST remove the received
+ // Content-Length field prior to forwarding such a message downstream."
+ //
+ // Reportedly, these appear in the wild.
+ delete(t.Header, "Content-Length")
+
+ t.Chunked = true
return nil
}
// Determine the expected body length, using RFC 7230 Section 3.3. This
// function is not a method, because ultimately it should be shared by
// ReadResponse and ReadRequest.
-func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) {
+func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (int64, error) {
isRequest := !isResponse
contentLens := header["Content-Length"]
// Content-Length headers if they differ in value.
// If there are dups of the value, remove the dups.
// See Issue 16490.
- first := strings.TrimSpace(contentLens[0])
+ first := textproto.TrimString(contentLens[0])
for _, ct := range contentLens[1:] {
- if first != strings.TrimSpace(ct) {
+ if first != textproto.TrimString(ct) {
return 0, fmt.Errorf("http: message cannot contain multiple Content-Length headers; got %q", contentLens)
}
}
}
// Logic based on response type or status
- if noResponseBodyExpected(requestMethod) {
- // For HTTP requests, as part of hardening against request
- // smuggling (RFC 7230), don't allow a Content-Length header for
- // methods which don't permit bodies. As an exception, allow
- // exactly one Content-Length header if its value is "0".
- if isRequest && len(contentLens) > 0 && !(len(contentLens) == 1 && contentLens[0] == "0") {
- return 0, fmt.Errorf("http: method cannot contain a Content-Length; got %q", contentLens)
- }
+ if isResponse && noResponseBodyExpected(requestMethod) {
return 0, nil
}
if status/100 == 1 {
}
// Logic based on Transfer-Encoding
- if chunked(te) {
+ if chunked {
return -1, nil
}
- // Logic based on Content-Length
- var cl string
- if len(contentLens) == 1 {
- cl = strings.TrimSpace(contentLens[0])
- }
- if cl != "" {
- n, err := parseContentLength(cl)
+ if len(contentLens) > 0 {
+ // Logic based on Content-Length
+ n, err := parseContentLength(contentLens)
if err != nil {
return -1, err
}
return n, nil
}
+
header.Del("Content-Length")
if isRequest {
// Determine whether to hang up after sending a request and body, or
// receiving a response and body
-// 'header' is the request headers
+// 'header' is the request headers.
func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
if major < 1 {
return true
return hasClose
}
-// Parse the trailer header
-func fixTrailer(header Header, te []string) (Header, error) {
+// Parse the trailer header.
+func fixTrailer(header Header, chunked bool) (Header, error) {
vv, ok := header["Trailer"]
if !ok {
return nil, nil
}
+ if !chunked {
+ // Trailer and no chunking:
+ // this is an invalid use case for trailer header.
+ // Nevertheless, no error will be returned and we
+ // let users decide if this is a valid HTTP message.
+ // The Trailer header will be kept in Response.Header
+ // but not populate Response.Trailer.
+ // See issue #27197.
+ return nil, nil
+ }
header.Del("Trailer")
trailer := make(Header)
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
if err == nil {
- err = &badStringError{"bad trailer key", key}
+ err = badStringError("bad trailer key", key)
return
}
}
if len(trailer) == 0 {
return nil, nil
}
- if !chunked(te) {
- // Trailer and no chunking
- return nil, ErrUnexpectedTrailer
- }
return trailer, nil
}
// and then reads the trailer if necessary.
type body struct {
src io.Reader
- hdr interface{} // non-nil (Response or Request) value means read trailer
+ hdr any // non-nil (Response or Request) value means read trailer
r *bufio.Reader // underlying wire-format reader for the trailer
closing bool // is the connection to be closed after reading body?
doEarlyClose bool // whether Close should stop early
// no trailer and closing the connection next.
// no point in reading to EOF.
case b.doEarlyClose:
- // Read up to maxPostHandlerReadBytes bytes of the body, looking for
+ // Read up to maxPostHandlerReadBytes bytes of the body, looking
// for EOF (and trailers), so we can re-use this connection.
if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes {
// There was a declared Content-Length, and we have more bytes remaining
var n int64
// Consume the body, or, which will also lead to us reading
// the trailer headers after the body, if present.
- n, err = io.CopyN(ioutil.Discard, bodyLocked{b}, maxPostHandlerReadBytes)
+ n, err = io.CopyN(io.Discard, bodyLocked{b}, maxPostHandlerReadBytes)
if err == io.EOF {
err = nil
}
default:
// Fully consume the body, which will also lead to us reading
// the trailer headers after the body, if present.
- _, err = io.Copy(ioutil.Discard, bodyLocked{b})
+ _, err = io.Copy(io.Discard, bodyLocked{b})
}
b.closed = true
return err
b.onHitEOF = fn
}
-// bodyLocked is a io.Reader reading from a *body when its mutex is
+// bodyLocked is an io.Reader reading from a *body when its mutex is
// already held.
type bodyLocked struct {
b *body
return bl.b.readLocked(p)
}
-// parseContentLength trims whitespace from s and returns -1 if no value
-// is set, or the value if it's >= 0.
-func parseContentLength(cl string) (int64, error) {
- cl = strings.TrimSpace(cl)
- if cl == "" {
+var laxContentLength = godebug.New("httplaxcontentlength")
+
+// parseContentLength checks that the header is valid and then trims
+// whitespace. It returns -1 if no value is set otherwise the value
+// if it's >= 0.
+func parseContentLength(clHeaders []string) (int64, error) {
+ if len(clHeaders) == 0 {
return -1, nil
}
- n, err := strconv.ParseInt(cl, 10, 64)
- if err != nil || n < 0 {
- return 0, &badStringError{"bad Content-Length", cl}
- }
- return n, nil
+ cl := textproto.TrimString(clHeaders[0])
+ // The Content-Length must be a valid numeric value.
+ // See: https://datatracker.ietf.org/doc/html/rfc2616/#section-14.13
+ if cl == "" {
+ if laxContentLength.Value() == "1" {
+ laxContentLength.IncNonDefault()
+ return -1, nil
+ }
+ return 0, badStringError("invalid empty Content-Length", cl)
+ }
+ n, err := strconv.ParseUint(cl, 10, 63)
+ if err != nil {
+ return 0, badStringError("bad Content-Length", cl)
+ }
+ return int64(n), nil
}
// finishAsyncByteRead finishes reading the 1-byte sniff
if n == 1 {
p[0] = rres.b
}
+ if err == nil {
+ err = io.EOF
+ }
+ return
+}
+
+var nopCloserType = reflect.TypeOf(io.NopCloser(nil))
+var nopCloserWriterToType = reflect.TypeOf(io.NopCloser(struct {
+ io.Reader
+ io.WriterTo
+}{}))
+
+// unwrapNopCloser return the underlying reader and true if r is a NopCloser
+// else it return false.
+func unwrapNopCloser(r io.Reader) (underlyingReader io.Reader, isNopCloser bool) {
+ switch reflect.TypeOf(r) {
+ case nopCloserType, nopCloserWriterToType:
+ return reflect.ValueOf(r).Field(0).Interface().(io.Reader), true
+ default:
+ return nil, false
+ }
+}
+
+// isKnownInMemoryReader reports whether r is a type known to not
+// block on Read. Its caller uses this as an optional optimization to
+// send fewer TCP packets.
+func isKnownInMemoryReader(r io.Reader) bool {
+ switch r.(type) {
+ case *bytes.Reader, *bytes.Buffer, *strings.Reader:
+ return true
+ }
+ if r, ok := unwrapNopCloser(r); ok {
+ return isKnownInMemoryReader(r)
+ }
+ if r, ok := r.(*readTrackingBody); ok {
+ return isKnownInMemoryReader(r.ReadCloser)
+ }
+ return false
+}
+
+// bufioFlushWriter is an io.Writer wrapper that flushes all writes
+// on its wrapped writer if it's a *bufio.Writer.
+type bufioFlushWriter struct{ w io.Writer }
+
+func (fw bufioFlushWriter) Write(p []byte) (n int, err error) {
+ n, err = fw.w.Write(p)
+ if bw, ok := fw.w.(*bufio.Writer); n > 0 && ok {
+ ferr := bw.Flush()
+ if ferr != nil && err == nil {
+ err = ferr
+ }
+ }
return
}