Skip to content
This repository has been archived by the owner on Mar 5, 2022. It is now read-only.

Commit

Permalink
does your proxy make TOO MUCH NOISE?!
Browse files Browse the repository at this point in the history
Clean up logging a bit, having 3 loggers (info, debug, warn) that
allow the user to control the logging detail. Now most of the noise is
hidden behind a -debug flag or a BOLT_PROXY_DEBUG environment
variable.

Uncovered some areas that need some cleanup. Joy.
voutilad committed Dec 16, 2020

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
1 parent d6cd147 commit b953a88
Showing 6 changed files with 171 additions and 91 deletions.
30 changes: 27 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
@@ -105,6 +105,8 @@ Usage of ./bolt-proxy:
host:port to bind to (default "localhost:8888")
-cert string
x509 certificate
-debug
enable debug logging
-key string
x509 private key
-pass string
@@ -124,12 +126,28 @@ configuration easier in the "cloud":
- `BOLT_PROXY_PASSWORD` -- password for the backend neo4j user for use
by the monitor
- `BOLT_PROXY_CERT` -- path to the x509 certificate (.pem) file
- `BOLE_PROXY_KEY` -- path to the x509 private key file
- `BOLT_PROXY_KEY` -- path to the x509 private key file
- `BOLT_PROXY_DEBUG` -- set to any value to enable debug mode/logging

### Lifecycle
When you start the proxy, it'll immediately try to connect to the
target backend using the provided bolt uri, username, and password. It
will then begin monitoring the routing table.
target backend using the provided bolt uri, username, and
password. The server version is extracted and it will then begin
monitoring the routing table.

When clients connect, the following occurs:

1. The proxy determines the connection type (direct vs. websocket)
2. The bolt handshake occurs, negotiating a version the client and
server can both speak.
3. The proxy brokers authentication with one of the backend servers.
4. If auth succeeds, the proxy then authenticates the client with all
other servers in the cluster.
5. The main client event loop kicks in, dealing with mapping bolt
messages from the client to the appropriate backend server based on
the target database and transaction type.
6. If all parties enjoy themselves, they say goodbye and everyone
thinks fondly of their experience.

### Connecting
You then tell your client application (e.g. cypher-shell, Browser) to
@@ -170,5 +188,11 @@ A bad request takes 2 forms and each has a different result:
2. A request to `/health` that's not a valid HTTP request will result
in an `HTTP/1.1 400 Bad Request` response.

### Logging
Some very verbose logging is available behind the `-debug` flag or the
`BOLT_PROXY_DEBUG` environment variable. It will log most Bolt
chatter, truncating messages, and will provide details on the state
changes of the event loops. Enjoy paying your log vendor!

# License
Provided under MIT. See [LICENSE](./LICENSE).
21 changes: 10 additions & 11 deletions backend/auth.go
Original file line number Diff line number Diff line change
@@ -5,7 +5,7 @@ package backend
import (
"crypto/tls"
"errors"
"log"
"fmt"
"net"

"github.com/voutilad/bolt-proxy/bolt"
@@ -45,34 +45,34 @@ func authClient(hello, version []byte, network, address string, useTls bool) (ne
0x00, 0x00, 0x00, 0x00}...)
_, err = conn.Write(handshake)
if err != nil {
log.Println("couldn't send handshake to auth server", address)
msg := fmt.Sprintf("couldn't send handshake to auth server %s: %s", address, err)
conn.Close()
return nil, err
return nil, errors.New(msg)
}

// Server should pick a version and provide as 4-byte array
// TODO: we eventually need version handling...for now ignore :-/
buf := make([]byte, 256)
n, err := conn.Read(buf)
if err != nil || n != 4 {
log.Println("didn't get valid handshake response from auth server", address)
msg := fmt.Sprintf("didn't get valid handshake response from auth server %s: %s", address, err)
conn.Close()
return nil, err
return nil, errors.New(msg)
}

// Try performing the bolt auth the given hello message
_, err = conn.Write(hello)
if err != nil {
log.Println("failed to send hello buffer to server", address)
msg := fmt.Sprintf("failed to send hello buffer to server %s: %s", address, err)
conn.Close()
return nil, err
return nil, errors.New(msg)
}

n, err = conn.Read(buf)
if err != nil {
log.Println("failed to get auth response from auth server", address)
msg := fmt.Sprintf("failed to get auth response from auth server %s: %s", address, err)
conn.Close()
return nil, err
return nil, errors.New(msg)
}

msg := bolt.IdentifyType(buf)
@@ -92,9 +92,8 @@ func authClient(hello, version []byte, network, address string, useTls bool) (ne
return nil, errors.New(failmsg)
}
}
log.Printf("!!! auth failure, but could not parse response: %v\n", r)
conn.Close()
return nil, errors.New("unknown auth failure")
return nil, errors.New("could not parse auth server response")
} else if msg == bolt.SuccessMsg {
// The only happy outcome! Keep conn open.
return conn, nil
22 changes: 12 additions & 10 deletions backend/backend.go
Original file line number Diff line number Diff line change
@@ -13,11 +13,12 @@ type Backend struct {
monitor *Monitor
routingTable *RoutingTable
tls bool
log *log.Logger
// map of principals -> hosts -> connections
connectionPool map[string]map[string]bolt.BoltConn
}

func NewBackend(username, password string, uri string, hosts ...string) (*Backend, error) {
func NewBackend(logger *log.Logger, username, password string, uri string, hosts ...string) (*Backend, error) {
monitor, err := NewMonitor(username, password, uri, hosts...)
if err != nil {
return nil, err
@@ -35,6 +36,7 @@ func NewBackend(username, password string, uri string, hosts ...string) (*Backen
monitor: monitor,
routingTable: routingTable,
tls: tls,
log: logger,
}, nil
}

@@ -47,17 +49,17 @@ func (b *Backend) RoutingTable() *RoutingTable {
panic("attempting to use uninitialized BackendClient")
}

log.Println("checking routing table...")
b.log.Println("checking routing table...")
if b.routingTable.Expired() {
select {
case rt := <-b.monitor.C:
b.routingTable = rt
case <-time.After(60 * time.Second):
log.Fatal("timeout waiting for new routing table!")
b.log.Fatal("timeout waiting for new routing table!")
}
}

log.Println("using routing table")
b.log.Println("using routing table")
return b.routingTable
}

@@ -75,14 +77,14 @@ func (b *Backend) Authenticate(hello *bolt.Message) (map[string]bolt.BoltConn, e
// TODO: clean up this api...push the dirt into Bolt package
msg, pos, err := bolt.ParseTinyMap(hello.Data[4:])
if err != nil {
log.Printf("XXX pos: %d, hello map: %#v\n", pos, msg)
b.log.Printf("XXX pos: %d, hello map: %#v\n", pos, msg)
panic(err)
}
principal, ok := msg["principal"].(string)
if !ok {
panic("principal in Hello message was not a string")
}
log.Println("found principal:", principal)
b.log.Println("found principal:", principal)

// refresh routing table
// TODO: this api seems backwards...push down into table?
@@ -93,7 +95,7 @@ func (b *Backend) Authenticate(hello *bolt.Message) (map[string]bolt.BoltConn, e
writers, _ := rt.WritersFor(rt.DefaultDb)
defaultWriter := writers[0]

log.Printf("trying to auth %s to host %s\n", principal, defaultWriter)
b.log.Printf("trying to auth %s to host %s\n", principal, defaultWriter)
conn, err := authClient(hello.Data, b.Version().Bytes(),
"tcp", defaultWriter, b.tls)
if err != nil {
@@ -120,10 +122,10 @@ func (b *Backend) Authenticate(hello *bolt.Message) (map[string]bolt.BoltConn, e
defer wg.Done()
conn, err := authClient(hello.Data, b.Version().Bytes(), "tcp", h, b.tls)
if err != nil {
log.Printf("failed to auth %s to %s!?\n", principal, h)
b.log.Printf("failed to auth %s to %s!?\n", principal, h)
return
}
log.Printf("auth'd %s to host %s\n", principal, h)
b.log.Printf("auth'd %s to host %s\n", principal, h)
c <- pair{bolt.NewDirectConn(conn), h}
}(host)
}
@@ -136,6 +138,6 @@ func (b *Backend) Authenticate(hello *bolt.Message) (map[string]bolt.BoltConn, e
conns[p.host] = p.conn
}

log.Printf("auth'd principal to %d hosts\n", len(conns))
b.log.Printf("auth'd principal to %d hosts\n", len(conns))
return conns, err
}
42 changes: 27 additions & 15 deletions backend/monitor.go
Original file line number Diff line number Diff line change
@@ -3,7 +3,6 @@ package backend
import (
"errors"
"fmt"
"log"
"strings"
"time"

@@ -31,8 +30,20 @@ type Version struct {
Major, Minor, Patch uint8
}

func ParseVersion(buf []byte) (Version, error) {
if len(buf) < 4 {
return Version{}, errors.New("buffer too short (< 4)")
}

version := Version{}
version.Major = uint8(buf[3])
version.Minor = uint8(buf[2])
version.Patch = uint8(buf[1])
return version, nil
}

func (v Version) String() string {
return fmt.Sprintf("{ major: %d, minor: %d, patch: %d }",
return fmt.Sprintf("Bolt{major: %d, minor: %d, patch: %d}",
v.Major,
v.Minor,
v.Patch)
@@ -133,9 +144,9 @@ func NewMonitor(user, password, uri string, hosts ...string) (*Monitor, error) {

version, err := getVersion(&driver)
if err != nil {
log.Fatal(err)
panic(err)
}
log.Printf("found neo4j version %v\n", version)
// log.Printf("found neo4j version %v\n", version)

// TODO: check if in SINGLE, CORE, or READ_REPLICA mode
// We can run `CALL dbms.listConfig('dbms.mode') YIELD value` and
@@ -146,7 +157,7 @@ func NewMonitor(user, password, uri string, hosts ...string) (*Monitor, error) {
// Get the first routing table and ttl details
rt, err := getNewRoutingTable(&driver)
if err != nil {
log.Fatal(err)
panic(err)
}
c <- rt

@@ -159,7 +170,7 @@ func NewMonitor(user, password, uri string, hosts ...string) (*Monitor, error) {
case <-ticker.C:
rt, err := getNewRoutingTable(monitor.driver)
if err != nil {
log.Fatal(err)
panic(err)
}
ticker.Reset(rt.Ttl)

@@ -179,9 +190,10 @@ func NewMonitor(user, password, uri string, hosts ...string) (*Monitor, error) {
}
case <-h:
ticker.Stop()
log.Println("monitor stopped")
case <-time.After(5 * rt.Ttl):
log.Fatalf("monitor timeout reached of 5 x %v\n", rt.Ttl)
// log.Println("monitor stopped")
case <-time.After(10 * rt.Ttl):
msg := fmt.Sprintf("monitor timeout of 10*%v reached\n", rt.Ttl)
panic(msg)
}
}
}()
@@ -381,14 +393,14 @@ func routingTableTx(tx neo4j.Transaction, names []string) (interface{}, error) {
func getNewRoutingTable(driver *neo4j.Driver) (*RoutingTable, error) {
names, err := queryDbNames(driver)
if err != nil {
log.Printf("error getting database names: %v\n", err)
return nil, err
msg := fmt.Sprintf("error getting database names: %v\n", err)
return nil, errors.New(msg)
}

tableMap, err := queryRoutingTable(driver, names)
if err != nil {
log.Printf("error getting routing table: %v\n", err)
return nil, err
msg := fmt.Sprintf("error getting routing table: %v\n", err)
return nil, errors.New(msg)
}

// build the new routing table instance
@@ -422,8 +434,8 @@ func getNewRoutingTable(driver *neo4j.Driver) (*RoutingTable, error) {
}
}

log.Printf("updated routing table: %s\n", &rt)
log.Printf("known hosts look like: %v\n", rt.Hosts)
// log.Printf("updated routing table: %s\n", &rt)
// log.Printf("known hosts look like: %v\n", rt.Hosts)

return &rt, nil
}
20 changes: 19 additions & 1 deletion bolt/conn.go
Original file line number Diff line number Diff line change
@@ -6,7 +6,7 @@ import (
"errors"
"fmt"
"io"
// "log"
"net"

"github.com/gobwas/ws"
)
@@ -82,6 +82,15 @@ func NewDirectConn(c io.ReadWriteCloser) DirectConn {
return dc
}

func (c DirectConn) String() string {
switch c.conn.(type) {
case net.Conn:
return fmt.Sprintf("Direct[%s]", c.conn.(net.Conn).RemoteAddr())
default:
return fmt.Sprintf("Direct[%s]", &c.conn)
}
}

func (c DirectConn) R() <-chan *Message {
return c.r
}
@@ -210,6 +219,15 @@ func (c WsConn) R() <-chan *Message {
return c.r
}

func (c WsConn) String() string {
switch c.conn.(type) {
case net.Conn:
return fmt.Sprintf("WebSocket[%s]", c.conn.(net.Conn).RemoteAddr())
default:
return fmt.Sprintf("WebSocket[%s]", &c.conn)
}
}

// Read 0 or many Bolt Messages from a WebSocket frame since, apparently,
// small Bolt Messages sometimes get packed into a single Frame(?!).
//
127 changes: 76 additions & 51 deletions proxy.go
Original file line number Diff line number Diff line change
@@ -6,6 +6,7 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
@@ -29,6 +30,12 @@ const (
MAX_BYTES int = 32
)

var (
debug *log.Logger
info *log.Logger
warn *log.Logger
)

// Crude logging routine for helping debug bolt Messages. Tries not to clutter
// output too much due to large messages while trying to deliniate who logged
// the message.
@@ -43,11 +50,11 @@ func logMessage(who string, msg *bolt.Message) {
switch msg.T {
case bolt.HelloMsg:
// make sure we don't print the secrets in a Hello!
log.Printf("[%s] <%s>: %#v\n\n", who, msg.T, msg.Data[:4])
debug.Printf("[%s] <%s>: %#v\n\n", who, msg.T, msg.Data[:4])
case bolt.BeginMsg, bolt.FailureMsg:
log.Printf("[%s] <%s>: %#v\n%s\n", who, msg.T, msg.Data[:end], msg.Data)
debug.Printf("[%s] <%s>: %#v\n%s\n", who, msg.T, msg.Data[:end], msg.Data)
default:
log.Printf("[%s] <%s>: %#v%s\n", who, msg.T, msg.Data[:end], suffix)
debug.Printf("[%s] <%s>: %#v%s\n", who, msg.T, msg.Data[:end], suffix)
}
}

@@ -89,24 +96,24 @@ func handleTx(client, server bolt.BoltConn, ack chan<- bool, halt <-chan bool) {
finished = true
}
} else {
log.Println("potential server hangup")
debug.Println("potential server hangup")
finished = true
}

case <-halt:
finished = true

case <-time.After(time.Duration(MAX_IDLE_MINS) * time.Minute):
log.Println("timeout reading server!")
warn.Println("timeout reading server!")
finished = true
}
}

select {
case ack <- true:
log.Println("tx handler stop ACK sent")
debug.Println("tx handler stop ACK sent")
default:
log.Println("couldn't put value in ack channel?!")
warn.Println("couldn't put value in ack channel?!")
}
}

@@ -117,7 +124,7 @@ func handleTx(client, server bolt.BoltConn, ack chan<- bool, halt <-chan bool) {
// a client handler
func handleClient(conn net.Conn, b *backend.Backend) {
defer func() {
log.Printf("closing client connection from %s\n",
debug.Printf("closing client connection from %s\n",
conn.RemoteAddr())
conn.Close()
}()
@@ -128,15 +135,15 @@ func handleClient(conn net.Conn, b *backend.Backend) {

n, err := conn.Read(buf[:4])
if err != nil || n != 4 {
log.Println("bad connection from", conn.RemoteAddr())
warn.Println("bad connection from", conn.RemoteAddr())
return
}

if bytes.Equal(buf[:4], []byte{0x60, 0x60, 0xb0, 0x17}) {
// First case: we have a direct bolt client connection
n, err := conn.Read(buf[:20])
if err != nil {
log.Println("error peeking at connection from", conn.RemoteAddr())
warn.Println("error peeking at connection from", conn.RemoteAddr())
return
}

@@ -145,11 +152,11 @@ func handleClient(conn net.Conn, b *backend.Backend) {
serverVersion := b.Version().Bytes()
clientVersion, err := bolt.ValidateHandshake(buf[:n], serverVersion)
if err != nil {
log.Fatal(err)
warn.Fatal(err)
}
_, err = conn.Write(clientVersion)
if err != nil {
log.Fatal(err)
warn.Fatal(err)
}

// regular bolt
@@ -162,15 +169,15 @@ func handleClient(conn net.Conn, b *backend.Backend) {
// Read the rest of the request
n, err = conn.Read(buf[4:])
if err != nil {
log.Printf("failed reading rest of GET request: %s\n", err)
warn.Printf("failed reading rest of GET request: %s\n", err)
return
}

// Health check, maybe? If so, handle and bail.
if health.IsHealthCheck(buf[:n+4]) {
err = health.HandleHealthCheck(conn, buf[:n+4])
if err != nil {
log.Println(err)
warn.Println(err)
}
return
}
@@ -180,28 +187,28 @@ func handleClient(conn net.Conn, b *backend.Backend) {
iobuf := bytes.NewBuffer(buf[:n+4])
_, err := ws.Upgrade(iobuf)
if err != nil {
log.Printf("failed to upgrade websocket client %s: %s\n",
warn.Printf("failed to upgrade websocket client %s: %s\n",
conn.RemoteAddr(), err)
return
}
// Relay the upgrade response
_, err = io.Copy(conn, iobuf)
if err != nil {
log.Printf("failed to copy upgrade to client %s\n",
warn.Printf("failed to copy upgrade to client %s\n",
conn.RemoteAddr())
return
}

// After upgrade, we should get a WebSocket message with header
header, err := ws.ReadHeader(conn)
if err != nil {
log.Printf("failed to read ws header from client %s: %s\n",
warn.Printf("failed to read ws header from client %s: %s\n",
conn.RemoteAddr(), err)
return
}
n, err := conn.Read(buf[:header.Length])
if err != nil {
log.Printf("failed to read payload from client %s\n",
warn.Printf("failed to read payload from client %s\n",
conn.RemoteAddr())
return
}
@@ -213,27 +220,27 @@ func handleClient(conn net.Conn, b *backend.Backend) {
magic, handshake := buf[:4], buf[4:20] // blaze it
valid, err := bolt.ValidateMagic(magic)
if !valid {
log.Fatal(err)
warn.Fatal(err)
}

// negotiate client & server side bolt versions
serverVersion := b.Version().Bytes()
clientVersion, err := bolt.ValidateHandshake(handshake, serverVersion)
if err != nil {
log.Fatal(err)
warn.Fatal(err)
}

// Complete Bolt handshake via WebSocket frame
frame := ws.NewBinaryFrame(clientVersion)
if err = ws.WriteFrame(conn, frame); err != nil {
log.Fatal(err)
warn.Fatal(err)
}

// Let there be Bolt-via-WebSockets!
handleBoltConn(bolt.NewWsConn(conn), clientVersion, b)
} else {
// not bolt, not http...something else?
log.Printf("client %s is speaking gibberish: %#v\n",
info.Printf("client %s is speaking gibberish: %#v\n",
conn.RemoteAddr(), buf[:4])
}
}
@@ -252,28 +259,35 @@ func handleBoltConn(client bolt.BoltConn, clientVersion []byte, b *backend.Backe
select {
case msg, ok := <-client.R():
if !ok {
log.Println("failed to read expected Hello from client")
warn.Println("failed to read expected Hello from client")
return
}
hello = msg
case <-time.After(30 * time.Second):
log.Println("timed out waiting for client to auth")
warn.Println("timed out waiting for client to auth")
return
}
logMessage("C->P", hello)

if hello.T != bolt.HelloMsg {
log.Println("expected HelloMsg, got:", hello.T)
warn.Println("expected HelloMsg, got:", hello.T)
return
}

// get backend connection
log.Println("trying to authenticate with backend...")
pool, err := b.Authenticate(hello)
if err != nil {
log.Fatal(err)
warn.Fatal(err)
}

// TODO: this seems odd...move parser and version stuff to bolt pkg
v, _ := backend.ParseVersion(clientVersion)
info.Printf("authenticated client %s speaking %s to %d host(s)\n",
client, v, len(pool))
defer func() {
info.Printf("goodbye to client %s\n", client)
}()

// TODO: Replace hardcoded Success message with dynamic one
success := bolt.Message{
T: bolt.SuccessMsg,
@@ -289,7 +303,7 @@ func handleBoltConn(client bolt.BoltConn, clientVersion []byte, b *backend.Backe
logMessage("P->C", &success)
err = client.WriteMessage(&success)
if err != nil {
log.Fatal(err)
warn.Fatal(err)
}

// Time to begin the client-side event loop!
@@ -307,17 +321,17 @@ func handleBoltConn(client bolt.BoltConn, clientVersion []byte, b *backend.Backe
msg = m
logMessage("C->P", msg)
} else {
log.Println("potential client hangup")
debug.Println("potential client hangup")
select {
case halt <- true:
log.Println("client hungup, asking tx to halt")
debug.Println("client hungup, asking tx to halt")
default:
log.Println("failed to send halt message to tx handler")
warn.Println("failed to send halt message to tx handler")
}
return
}
case <-time.After(time.Duration(MAX_IDLE_MINS) * time.Minute):
log.Println("client idle timeout")
warn.Println("client idle timeout")
return
}

@@ -370,11 +384,11 @@ func handleBoltConn(client bolt.BoltConn, clientVersion []byte, b *backend.Backe
hosts, err = rt.WritersFor(db)
}
if err != nil {
log.Printf("couldn't find host for '%s' in routing table", db)
warn.Printf("couldn't find host for '%s' in routing table", db)
}

if len(hosts) < 1 {
log.Println("empty hosts lists for database", db)
warn.Println("empty hosts lists for database", db)
// TODO: return FailureMsg???
return
}
@@ -385,12 +399,12 @@ func handleBoltConn(client bolt.BoltConn, clientVersion []byte, b *backend.Backe
if server != nil {
select {
case halt <- true:
log.Println("...asking current tx handler to halt")
debug.Println("...asking current tx handler to halt")
select {
case <-ack:
log.Println("tx handler ack'd stop")
debug.Println("tx handler ack'd stop")
case <-time.After(5 * time.Second):
log.Println("!!! timeout waiting for ack from tx handler")
warn.Println("!!! timeout waiting for ack from tx handler")
}
default:
// this shouldn't happen!
@@ -402,10 +416,10 @@ func handleBoltConn(client bolt.BoltConn, clientVersion []byte, b *backend.Backe
ok := false
server, ok = pool[host]
if !ok {
log.Println("no established connection for host", host)
warn.Println("no established connection for host", host)
return
}
log.Printf("grabbed conn for %s-access to db %s on host %s\n", mode, db, host)
debug.Printf("grabbed conn for %s-access to db %s on host %s\n", mode, db, host)

// TODO: refactor channel handling...probably have handleTx() return new ones
// instead of reusing the same ones. If we don't create new ones, there could
@@ -459,6 +473,7 @@ const (

func main() {
var (
debugMode bool
bindOn string
proxyTo string
username, password string
@@ -477,6 +492,7 @@ func main() {
if !found {
username = DEFAULT_USER
}
_, debugMode = os.LookupEnv("BOLT_PROXY_DEBUG")
password = os.Getenv("BOLT_PROXY_PASSWORD")
certFile = os.Getenv("BOLT_PROXY_CERT")
keyFile = os.Getenv("BOLT_PROXY_KEY")
@@ -488,52 +504,61 @@ func main() {
flag.StringVar(&password, "pass", password, "Neo4j password")
flag.StringVar(&certFile, "cert", certFile, "x509 certificate")
flag.StringVar(&keyFile, "key", keyFile, "x509 private key")
flag.BoolVar(&debugMode, "debug", debugMode, "enable debug logging")
flag.Parse()

// We log to stdout because our parents raised us right
log.SetOutput(os.Stdout)
info = log.New(os.Stdout, "INFO ", log.Ldate|log.Ltime|log.Lmsgprefix)
if debugMode {
debug = log.New(os.Stdout, "DEBUG ", log.Ldate|log.Ltime|log.Lmsgprefix)
} else {
debug = log.New(ioutil.Discard, "DEBUG ", 0)
}
warn = log.New(os.Stderr, "WARN ", log.Ldate|log.Ltime|log.Lmsgprefix)

// ---------- pprof debugger
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
info.Println(http.ListenAndServe("localhost:6060", nil))
}()

// ---------- BACK END
log.Println("Starting bolt-proxy backend...")
backend, err := backend.NewBackend(username, password, proxyTo)
info.Println("starting bolt-proxy backend")
backend, err := backend.NewBackend(debug, username, password, proxyTo)
if err != nil {
log.Fatal(err)
warn.Fatal(err)
}
info.Println("connected to backend", proxyTo)
info.Printf("found backend version %s\n", backend.Version())

// ---------- FRONT END
log.Println("Starting bolt-proxy frontend...")
info.Println("starting bolt-proxy frontend")
var listener net.Listener
if certFile == "" || keyFile == "" {
// non-tls
listener, err = net.Listen("tcp", bindOn)
if err != nil {
log.Fatal(err)
warn.Fatal(err)
}
log.Printf("Listening on %s\n", bindOn)
info.Printf("listening on %s\n", bindOn)
} else {
// tls
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
log.Fatal(err)
warn.Fatal(err)
}
config := &tls.Config{Certificates: []tls.Certificate{cert}}
listener, err = tls.Listen("tcp", bindOn, config)
if err != nil {
log.Fatal(err)
warn.Fatal(err)
}
log.Printf("Listening for TLS connections on %s\n", bindOn)
info.Printf("listening for TLS connections on %s\n", bindOn)
}

// ---------- Event Loop
for {
conn, err := listener.Accept()
if err != nil {
log.Printf("error: %v\n", err)
warn.Printf("error: %v\n", err)
} else {
go handleClient(conn, backend)
}

0 comments on commit b953a88

Please sign in to comment.