diff --git a/httpbin/handlers.go b/httpbin/handlers.go
index 0feec5c819d3c74d0f17354aaa177f2efdc60bd3..261c460c1f0036bf9e44bb6285d2d8767676f859 100644
--- a/httpbin/handlers.go
+++ b/httpbin/handlers.go
@@ -6,6 +6,7 @@ import (
 	"compress/gzip"
 	"encoding/json"
 	"fmt"
+	"math/rand"
 	"net/http"
 	"strconv"
 	"strings"
@@ -638,3 +639,96 @@ func (h *HTTPBin) ETag(w http.ResponseWriter, r *http.Request) {
 	// https://golang.org/pkg/net/http/#ServeContent
 	http.ServeContent(w, r, "response.json", time.Now(), bytes.NewReader(body))
 }
+
+// Bytes returns N random bytes generated with an optional seed
+func (h *HTTPBin) Bytes(w http.ResponseWriter, r *http.Request) {
+	handleBytes(w, r, false)
+}
+
+// StreamBytes streams N random bytes generated with an optional seed in chunks
+// of a given size.
+func (h *HTTPBin) StreamBytes(w http.ResponseWriter, r *http.Request) {
+	handleBytes(w, r, true)
+}
+
+// handleBytes consolidates the logic for validating input params of the Bytes
+// and StreamBytes endpoints and knows how to write the response in chunks if
+// streaming is true.
+func handleBytes(w http.ResponseWriter, r *http.Request, streaming bool) {
+	parts := strings.Split(r.URL.Path, "/")
+	if len(parts) != 3 {
+		http.Error(w, "Not found", http.StatusNotFound)
+		return
+	}
+
+	numBytes, err := strconv.Atoi(parts[2])
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusBadRequest)
+		return
+	}
+
+	if numBytes < 1 {
+		numBytes = 1
+	} else if numBytes > 100*1024 {
+		numBytes = 100 * 1024
+	}
+
+	var chunkSize int
+	var write func([]byte)
+
+	if streaming {
+		if r.URL.Query().Get("chunk_size") != "" {
+			chunkSize, err = strconv.Atoi(r.URL.Query().Get("chunk_size"))
+			if err != nil {
+				http.Error(w, err.Error(), http.StatusBadRequest)
+				return
+			}
+		} else {
+			chunkSize = 10 * 1024
+		}
+
+		write = func() func(chunk []byte) {
+			f := w.(http.Flusher)
+			return func(chunk []byte) {
+				w.Write(chunk)
+				f.Flush()
+			}
+		}()
+	} else {
+		chunkSize = numBytes
+		write = func(chunk []byte) {
+			w.Header().Set("Content-Length", strconv.Itoa(len(chunk)))
+			w.Write(chunk)
+		}
+	}
+
+	var seed int64
+	rawSeed := r.URL.Query().Get("seed")
+	if rawSeed != "" {
+		seed, err = strconv.ParseInt(rawSeed, 10, 64)
+		if err != nil {
+			http.Error(w, "invalid seed", http.StatusBadRequest)
+			return
+		}
+	} else {
+		seed = time.Now().Unix()
+	}
+
+	src := rand.NewSource(seed)
+	rng := rand.New(src)
+
+	w.Header().Set("Content-Type", "application/octet-stream")
+	w.WriteHeader(http.StatusOK)
+
+	var chunk []byte
+	for i := 0; i < numBytes; i++ {
+		chunk = append(chunk, byte(rng.Intn(256)))
+		if len(chunk) == chunkSize {
+			write(chunk)
+			chunk = nil
+		}
+	}
+	if len(chunk) > 0 {
+		write(chunk)
+	}
+}
diff --git a/httpbin/handlers_test.go b/httpbin/handlers_test.go
index 0580736d37d07fe599284647458537f25e452918..824cb6beb8a3aa91f2e0057f400a0057f26ffc3b 100644
--- a/httpbin/handlers_test.go
+++ b/httpbin/handlers_test.go
@@ -1192,9 +1192,14 @@ func TestStream(t *testing.T) {
 			w := httptest.NewRecorder()
 			handler.ServeHTTP(w, r)
 
-			// The stdlib seems to automagically unchunk these responses and
-			// I'm not quite sure how to test this
-			// assertHeader(t, w, "Transfer-Encoding", "chunked")
+			// TODO: The stdlib seems to automagically unchunk these responses
+			// and I'm not quite sure how to test this:
+			//
+			//     assertHeader(t, w, "Transfer-Encoding", "chunked")
+			//
+			// Instead, we assert that we got no Content-Length header, which
+			// is an indication that the Go stdlib streamed the response.
+			assertHeader(t, w, "Content-Length", "")
 
 			var resp *streamResponse
 			var err error
@@ -1691,3 +1696,140 @@ func TestETag(t *testing.T) {
 		})
 	}
 }
+
+func TestBytes(t *testing.T) {
+	t.Run("ok_no_seed", func(t *testing.T) {
+		url := "/bytes/1024"
+		r, _ := http.NewRequest("GET", url, nil)
+		w := httptest.NewRecorder()
+		handler.ServeHTTP(w, r)
+
+		assertStatusCode(t, w, http.StatusOK)
+		assertContentType(t, w, "application/octet-stream")
+		if len(w.Body.String()) != 1024 {
+			t.Errorf("expected content length 1024, got %d", len(w.Body.String()))
+		}
+	})
+
+	t.Run("ok_seed", func(t *testing.T) {
+		url := "/bytes/16?seed=1234567890"
+		r, _ := http.NewRequest("GET", url, nil)
+		w := httptest.NewRecorder()
+		handler.ServeHTTP(w, r)
+
+		assertStatusCode(t, w, http.StatusOK)
+		assertContentType(t, w, "application/octet-stream")
+
+		bodyHex := fmt.Sprintf("%x", w.Body.Bytes())
+		wantHex := "bfcd2afa15a2b372c707985a22024a8e"
+		if bodyHex != wantHex {
+			t.Errorf("expected body in hexadecimal = %v, got %v", wantHex, bodyHex)
+		}
+	})
+
+	var edgeCaseTests = []struct {
+		url                   string
+		expectedContentLength int
+	}{
+		{"/bytes/-1", 1},
+		{"/bytes/99999999", 100 * 1024},
+
+		// negative seed allowed
+		{"/bytes/16?seed=-12345", 16},
+	}
+	for _, test := range edgeCaseTests {
+		t.Run("bad"+test.url, func(t *testing.T) {
+			r, _ := http.NewRequest("GET", test.url, nil)
+			w := httptest.NewRecorder()
+			handler.ServeHTTP(w, r)
+			assertStatusCode(t, w, http.StatusOK)
+			assertHeader(t, w, "Content-Length", fmt.Sprintf("%d", test.expectedContentLength))
+			if len(w.Body.Bytes()) != test.expectedContentLength {
+				t.Errorf("expected body of length %d, got %d", test.expectedContentLength, len(w.Body.Bytes()))
+			}
+		})
+	}
+
+	var badTests = []struct {
+		url            string
+		expectedStatus int
+	}{
+		{"/bytes", http.StatusNotFound},
+		{"/bytes/16/foo", http.StatusNotFound},
+
+		{"/bytes/foo", http.StatusBadRequest},
+		{"/bytes/3.14", http.StatusBadRequest},
+
+		{"/bytes/16?seed=12345678901234567890", http.StatusBadRequest}, // seed too big
+		{"/bytes/16?seed=foo", http.StatusBadRequest},
+		{"/bytes/16?seed=3.14", http.StatusBadRequest},
+	}
+	for _, test := range badTests {
+		t.Run("bad"+test.url, func(t *testing.T) {
+			r, _ := http.NewRequest("GET", test.url, nil)
+			w := httptest.NewRecorder()
+			handler.ServeHTTP(w, r)
+			assertStatusCode(t, w, test.expectedStatus)
+		})
+	}
+}
+
+func TestStreamBytes(t *testing.T) {
+	var okTests = []struct {
+		url                   string
+		expectedContentLength int
+	}{
+		{"/stream-bytes/256", 256},
+		{"/stream-bytes/256?chunk_size=1", 256},
+		{"/stream-bytes/256?chunk_size=256", 256},
+		{"/stream-bytes/256?chunk_size=7", 256},
+
+		// too-large chunk size is okay
+		{"/stream-bytes/256?chunk_size=512", 256},
+
+		// as is negative chunk size
+		{"/stream-bytes/256?chunk_size=-10", 256},
+	}
+	for _, test := range okTests {
+		t.Run("ok"+test.url, func(t *testing.T) {
+			r, _ := http.NewRequest("GET", test.url, nil)
+			w := httptest.NewRecorder()
+			handler.ServeHTTP(w, r)
+
+			// TODO: The stdlib seems to automagically unchunk these responses
+			// and I'm not quite sure how to test this:
+			//
+			//     assertHeader(t, w, "Transfer-Encoding", "chunked")
+			//
+			// Instead, we assert that we got no Content-Length header, which
+			// is an indication that the Go stdlib streamed the response.
+			assertHeader(t, w, "Content-Length", "")
+
+			if len(w.Body.Bytes()) != test.expectedContentLength {
+				t.Fatalf("expected body of length %d, got %d", test.expectedContentLength, len(w.Body.Bytes()))
+			}
+		})
+	}
+
+	var badTests = []struct {
+		url  string
+		code int
+	}{
+		{"/stream-bytes", http.StatusNotFound},
+		{"/stream-bytes/10/foo", http.StatusNotFound},
+
+		{"/stream-bytes/foo", http.StatusBadRequest},
+		{"/stream-bytes/3.1415", http.StatusBadRequest},
+
+		{"/stream-bytes/16?chunk_size=foo", http.StatusBadRequest},
+		{"/stream-bytes/16?chunk_size=3.14", http.StatusBadRequest},
+	}
+	for _, test := range badTests {
+		t.Run("bad"+test.url, func(t *testing.T) {
+			r, _ := http.NewRequest("GET", test.url, nil)
+			w := httptest.NewRecorder()
+			handler.ServeHTTP(w, r)
+			assertStatusCode(t, w, test.code)
+		})
+	}
+}
diff --git a/httpbin/httpbin.go b/httpbin/httpbin.go
index 70da32ee1d1a7fc099d35e675c469f55d8eb96bd..9cabf8d659b7824ea913886d15e0461f32ab03e5 100644
--- a/httpbin/httpbin.go
+++ b/httpbin/httpbin.go
@@ -120,7 +120,10 @@ func (h *HTTPBin) Handler() http.Handler {
 	mux.HandleFunc("/stream/", h.Stream)
 	mux.HandleFunc("/delay/", h.Delay)
 	mux.HandleFunc("/drip", h.Drip)
+
 	mux.HandleFunc("/range/", h.Range)
+	mux.HandleFunc("/bytes/", h.Bytes)
+	mux.HandleFunc("/stream-bytes/", h.StreamBytes)
 
 	mux.HandleFunc("/html", h.HTML)
 	mux.HandleFunc("/robots.txt", h.Robots)
@@ -142,6 +145,8 @@ func (h *HTTPBin) Handler() http.Handler {
 	mux.HandleFunc("/relative-redirect", http.NotFound)
 	mux.HandleFunc("/status", http.NotFound)
 	mux.HandleFunc("/stream", http.NotFound)
+	mux.HandleFunc("/bytes", http.NotFound)
+	mux.HandleFunc("/stream-bytes", http.NotFound)
 
 	return logger(cors(mux))
 }