Merge pull request #585 from sputn1ck/sqlite

Migrate from boltdb to sql
pull/599/head
Konstantin Nick 11 months ago committed by GitHub
commit 0781cafbfb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -20,7 +20,7 @@ env:
# If you change this value, please change it in the following files as well:
# /Dockerfile
GO_VERSION: 1.19.2
GO_VERSION: 1.20.4
jobs:
########################
@ -124,3 +124,6 @@ jobs:
- name: run unit tests
run: make unit
- name: run unit test with postgres
run: make unit-postgres

@ -1,4 +1,4 @@
FROM --platform=${BUILDPLATFORM} golang:1.19.7-alpine as builder
FROM --platform=${BUILDPLATFORM} golang:1.20.4-alpine as builder
# Copy in the local repository to build from.
COPY . /go/src/github.com/lightningnetwork/loop

@ -95,6 +95,10 @@ unit:
@$(call print, "Running unit tests.")
$(UNIT)
unit-postgres:
@$(call print, "Running unit tests with postgres.")
$(UNIT) -tags=test_db_postgres
# =========
# UTILITIES
# =========
@ -122,4 +126,12 @@ mod-check:
$(GOMOD) tidy
if test -n "$$(git status | grep -e "go.mod\|go.sum")"; then echo "Running go mod tidy changes go.mod/go.sum"; git status; git diff; exit 1; fi
sqlc:
@$(call print, "Generating sql models and queries in Go")
./scripts/gen_sqlc_docker.sh
sqlc-check: sqlc
@$(call print, "Verifying sql code generation.")
if test -n "$$(git status --porcelain '*.go')"; then echo "SQL models not properly generated!"; git status --porcelain '*.go'; exit 1; fi

@ -121,11 +121,9 @@ type ClientConfig struct {
}
// NewClient returns a new instance to initiate swaps with.
func NewClient(dbDir string, cfg *ClientConfig) (*Client, func(), error) {
store, err := loopdb.NewBoltSwapStore(dbDir, cfg.Lnd.ChainParams)
if err != nil {
return nil, nil, err
}
func NewClient(dbDir string, loopDB loopdb.SwapStore,
cfg *ClientConfig) (*Client, func(), error) {
lsatStore, err := lsat.NewFileStore(dbDir)
if err != nil {
return nil, nil, err
@ -139,7 +137,7 @@ func NewClient(dbDir string, cfg *ClientConfig) (*Client, func(), error) {
config := &clientConfig{
LndServices: cfg.Lnd,
Server: swapServerClient,
Store: store,
Store: loopDB,
LsatStore: lsatStore,
CreateExpiryTimer: func(d time.Duration) <-chan time.Time {
return time.NewTimer(d).C
@ -153,7 +151,7 @@ func NewClient(dbDir string, cfg *ClientConfig) (*Client, func(), error) {
executor := newExecutor(&executorConfig{
lnd: cfg.Lnd,
store: store,
store: loopDB,
sweeper: sweeper,
createExpiryTimer: config.CreateExpiryTimer,
loopOutMaxParts: cfg.LoopOutMaxParts,
@ -185,20 +183,20 @@ func NewClient(dbDir string, cfg *ClientConfig) (*Client, func(), error) {
cleanup := func() {
swapServerClient.stop()
store.Close()
loopDB.Close()
}
return client, cleanup, nil
}
// FetchSwaps returns all loop in and out swaps currently in the database.
func (s *Client) FetchSwaps() ([]*SwapInfo, error) {
loopOutSwaps, err := s.Store.FetchLoopOutSwaps()
func (s *Client) FetchSwaps(ctx context.Context) ([]*SwapInfo, error) {
loopOutSwaps, err := s.Store.FetchLoopOutSwaps(ctx)
if err != nil {
return nil, err
}
loopInSwaps, err := s.Store.FetchLoopInSwaps()
loopInSwaps, err := s.Store.FetchLoopInSwaps(ctx)
if err != nil {
return nil, err
}
@ -292,12 +290,12 @@ func (s *Client) Run(ctx context.Context,
// Query store before starting event loop to prevent new swaps from
// being treated as swaps that need to be resumed.
pendingLoopOutSwaps, err := s.Store.FetchLoopOutSwaps()
pendingLoopOutSwaps, err := s.Store.FetchLoopOutSwaps(mainCtx)
if err != nil {
return err
}
pendingLoopInSwaps, err := s.Store.FetchLoopInSwaps()
pendingLoopInSwaps, err := s.Store.FetchLoopInSwaps(mainCtx)
if err != nil {
return err
}

@ -13,8 +13,12 @@ require (
github.com/davecgh/go-spew v1.1.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1
github.com/fortytw2/leaktest v1.3.0
github.com/golang-migrate/migrate/v4 v4.15.2
github.com/grpc-ecosystem/grpc-gateway/v2 v2.5.0
github.com/jackc/pgconn v1.10.0
github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa
github.com/jessevdk/go-flags v1.4.0
github.com/lib/pq v1.10.3
github.com/lightninglabs/aperture v0.1.20-beta
github.com/lightninglabs/lndclient v0.16.0-10
github.com/lightninglabs/loop/swapserverrpc v1.0.4
@ -25,18 +29,23 @@ require (
github.com/lightningnetwork/lnd/queue v1.1.0
github.com/lightningnetwork/lnd/ticker v1.1.0
github.com/lightningnetwork/lnd/tor v1.1.0
github.com/ory/dockertest/v3 v3.10.0
github.com/stretchr/testify v1.8.1
github.com/urfave/cli v1.22.9
golang.org/x/net v0.7.0
google.golang.org/grpc v1.41.0
golang.org/x/net v0.8.0
google.golang.org/grpc v1.45.0
google.golang.org/protobuf v1.27.1
gopkg.in/macaroon-bakery.v2 v2.0.1
gopkg.in/macaroon.v2 v2.1.0
modernc.org/sqlite v1.20.3
)
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e // indirect
github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/Yawning/aez v0.0.0-20211027044916-e49e68abd344 // indirect
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect
github.com/aead/siphash v1.0.1 // indirect
@ -49,30 +58,40 @@ require (
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/btcsuite/winsvc v1.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/continuity v0.3.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/decred/dcrd/lru v1.0.0 // indirect
github.com/docker/cli v20.10.17+incompatible // indirect
github.com/docker/docker v20.10.13+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dsnet/compress v0.0.1 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/fergusstrange/embedded-postgres v1.10.0 // indirect
github.com/go-errors/errors v1.0.1 // indirect
github.com/go-logr/logr v1.2.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.10.0 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgproto3/v2 v2.1.1 // indirect
@ -83,13 +102,12 @@ require (
github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/jrick/logrotate v1.0.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/juju/loggo v0.0.0-20210728185423-eebad3a902c4 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kkdai/bstream v1.0.0 // indirect
github.com/klauspost/compress v1.13.6 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/lib/pq v1.10.3 // indirect
github.com/lightninglabs/gozmq v0.0.0-20191113021534-d20a764486bf // indirect
github.com/lightninglabs/neutrino v0.15.0 // indirect
github.com/lightninglabs/neutrino/cache v1.1.1 // indirect
@ -99,23 +117,29 @@ require (
github.com/lightningnetwork/lnd/tlv v1.1.0 // indirect
github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mholt/archiver/v3 v3.5.0 // indirect
github.com/miekg/dns v1.1.43 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/nwaples/rardecode v1.1.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/runc v1.1.5 // indirect
github.com/pierrec/lz4/v4 v4.1.8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.11.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/prometheus/common v0.30.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rogpeppe/fastuuid v1.2.0 // indirect
github.com/russross/blackfriday/v2 v2.0.1 // indirect
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
github.com/sirupsen/logrus v1.7.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/soheilhy/cmux v0.1.5 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.0 // indirect
@ -123,6 +147,9 @@ require (
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
gitlab.com/yawning/bsaes.git v0.0.0-20190805113838-0a714cd429ec // indirect
@ -134,26 +161,27 @@ require (
go.etcd.io/etcd/pkg/v3 v3.5.7 // indirect
go.etcd.io/etcd/raft/v3 v3.5.7 // indirect
go.etcd.io/etcd/server/v3 v3.5.7 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0 // indirect
go.opentelemetry.io/otel v1.0.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1 // indirect
go.opentelemetry.io/otel/sdk v1.0.1 // indirect
go.opentelemetry.io/otel/trace v1.0.1 // indirect
go.opentelemetry.io/proto/otlp v0.9.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 // indirect
go.opentelemetry.io/otel v1.3.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 // indirect
go.opentelemetry.io/otel/sdk v1.3.0 // indirect
go.opentelemetry.io/otel/trace v1.3.0 // indirect
go.opentelemetry.io/proto/otlp v0.11.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/exp v0.0.0-20221111094246-ab4555d3164f // indirect
golang.org/x/mod v0.6.0 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.2.0 // indirect
google.golang.org/genproto v0.0.0-20210617175327-b9e0b3197ced // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 // indirect
gopkg.in/errgo.v1 v1.0.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
@ -165,7 +193,6 @@ require (
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.4.0 // indirect
modernc.org/opt v0.1.3 // indirect
modernc.org/sqlite v1.20.3 // indirect
modernc.org/strutil v1.1.3 // indirect
modernc.org/token v1.0.1 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect

1274
go.sum

File diff suppressed because it is too large Load Diff

@ -144,13 +144,15 @@ func newAutoloopTestCtx(t *testing.T, parameters Parameters,
return <-testCtx.loopInRestrictions, nil
},
ListLoopOut: func() ([]*loopdb.LoopOut, error) {
ListLoopOut: func(context.Context) ([]*loopdb.LoopOut, error) {
return <-testCtx.loopOuts, nil
},
GetLoopOut: func(hash lntypes.Hash) (*loopdb.LoopOut, error) {
GetLoopOut: func(ctx context.Context,
hash lntypes.Hash) (*loopdb.LoopOut, error) {
return testCtx.loopOutSingle, nil
},
ListLoopIn: func() ([]*loopdb.LoopIn, error) {
ListLoopIn: func(context.Context) ([]*loopdb.LoopIn, error) {
return <-testCtx.loopIns, nil
},
LoopOutQuote: func(_ context.Context,
@ -186,10 +188,10 @@ func newAutoloopTestCtx(t *testing.T, parameters Parameters,
MinimumConfirmations: loop.DefaultSweepConfTarget,
Lnd: &testCtx.lnd.LndServices,
Clock: testCtx.testClock,
PutLiquidityParams: func(_ []byte) error {
PutLiquidityParams: func(_ context.Context, _ []byte) error {
return nil
},
FetchLiquidityParams: func() ([]byte, error) {
FetchLiquidityParams: func(context.Context) ([]byte, error) {
return nil, nil
},
}

@ -10,22 +10,22 @@
//
// Fee restrictions are placed on swap suggestions to ensure that we only
// suggest swaps that fit the configured fee preferences.
// - Sweep Fee Rate Limit: the maximum sat/vByte fee estimate for our sweep
// transaction to confirm within our configured number of confirmations
// that we will suggest swaps for.
// - Maximum Swap Fee PPM: the maximum server fee, expressed as parts per
// million of the full swap amount
// - Maximum Routing Fee PPM: the maximum off-chain routing fees for the swap
// invoice, expressed as parts per million of the swap amount.
// - Maximum Prepay Routing Fee PPM: the maximum off-chain routing fees for the
// swap prepayment, expressed as parts per million of the prepay amount.
// - Maximum Prepay: the maximum now-show fee, expressed in satoshis. This
// amount is only payable in the case where the swap server broadcasts a htlc
// and the client fails to sweep the preimage.
// - Maximum miner fee: the maximum miner fee we are willing to pay to sweep the
// on chain htlc. Note that the client will use current fee estimates to
// sweep, so this value acts more as a sanity check in the case of a large fee
// spike.
// - Sweep Fee Rate Limit: the maximum sat/vByte fee estimate for our sweep
// transaction to confirm within our configured number of confirmations
// that we will suggest swaps for.
// - Maximum Swap Fee PPM: the maximum server fee, expressed as parts per
// million of the full swap amount
// - Maximum Routing Fee PPM: the maximum off-chain routing fees for the swap
// invoice, expressed as parts per million of the swap amount.
// - Maximum Prepay Routing Fee PPM: the maximum off-chain routing fees for the
// swap prepayment, expressed as parts per million of the prepay amount.
// - Maximum Prepay: the maximum now-show fee, expressed in satoshis. This
// amount is only payable in the case where the swap server broadcasts a htlc
// and the client fails to sweep the preimage.
// - Maximum miner fee: the maximum miner fee we are willing to pay to sweep the
// on chain htlc. Note that the client will use current fee estimates to
// sweep, so this value acts more as a sanity check in the case of a large fee
// spike.
//
// The maximum fee per-swap is calculated as follows:
// (swap amount * serverPPM/1e6) + miner fee + (swap amount * routingPPM/1e6)
@ -176,14 +176,14 @@ type Config struct {
Lnd *lndclient.LndServices
// ListLoopOut returns all of the loop our swaps stored on disk.
ListLoopOut func() ([]*loopdb.LoopOut, error)
ListLoopOut func(context.Context) ([]*loopdb.LoopOut, error)
// GetLoopOut returns a single loop out swap based on the provided swap
// hash.
GetLoopOut func(hash lntypes.Hash) (*loopdb.LoopOut, error)
GetLoopOut func(ctx context.Context, hash lntypes.Hash) (*loopdb.LoopOut, error)
// ListLoopIn returns all of the loop in swaps stored on disk.
ListLoopIn func() ([]*loopdb.LoopIn, error)
ListLoopIn func(ctx context.Context) ([]*loopdb.LoopIn, error)
// LoopOutQuote gets swap fee, estimated miner fee and prepay amount for
// a loop out swap.
@ -219,13 +219,13 @@ type Config struct {
//
// NOTE: the params are encoded using `proto.Marshal` over an RPC
// request.
PutLiquidityParams func(params []byte) error
PutLiquidityParams func(ctx context.Context, params []byte) error
// FetchLiquidityParams reads the serialized `Parameters` from db.
//
// NOTE: the params are decoded using `proto.Unmarshal` over a
// serialized RPC request.
FetchLiquidityParams func() ([]byte, error)
FetchLiquidityParams func(ctx context.Context) ([]byte, error)
}
// Manager contains a set of desired liquidity rules for our channel
@ -260,7 +260,7 @@ func (m *Manager) Run(ctx context.Context) error {
defer m.cfg.AutoloopTicker.Stop()
// Before we start the main loop, load the params from db.
req, err := m.loadParams()
req, err := m.loadParams(ctx)
if err != nil {
return err
}
@ -338,7 +338,7 @@ func (m *Manager) SetParameters(ctx context.Context,
// Since setting params is NOT a frequent action, it's should put
// little pressure on our db. Only when performance becomes an issue,
// we can then apply the alternative.
return m.saveParams(req)
return m.saveParams(ctx, req)
}
// SetParameters updates our current set of parameters if the new parameters
@ -372,7 +372,7 @@ func (m *Manager) setParameters(ctx context.Context,
}
// saveParams marshals an RPC request and saves it to db.
func (m *Manager) saveParams(req proto.Message) error {
func (m *Manager) saveParams(ctx context.Context, req proto.Message) error {
// Marshal the params.
paramsBytes, err := proto.Marshal(req)
if err != nil {
@ -380,7 +380,7 @@ func (m *Manager) saveParams(req proto.Message) error {
}
// Save the params on disk.
if err := m.cfg.PutLiquidityParams(paramsBytes); err != nil {
if err := m.cfg.PutLiquidityParams(ctx, paramsBytes); err != nil {
return fmt.Errorf("failed to save params: %v", err)
}
@ -389,8 +389,10 @@ func (m *Manager) saveParams(req proto.Message) error {
// loadParams unmarshals a serialized RPC request from db and returns the RPC
// request.
func (m *Manager) loadParams() (*clientrpc.LiquidityParameters, error) {
paramsBytes, err := m.cfg.FetchLiquidityParams()
func (m *Manager) loadParams(ctx context.Context) (
*clientrpc.LiquidityParameters, error) {
paramsBytes, err := m.cfg.FetchLiquidityParams(ctx)
if err != nil {
return nil, fmt.Errorf("failed to read params: %v", err)
}
@ -509,12 +511,12 @@ func (m *Manager) ForceAutoLoop(ctx context.Context) error {
// local balance back to the target.
func (m *Manager) dispatchBestEasyAutoloopSwap(ctx context.Context) error {
// Retrieve existing swaps.
loopOut, err := m.cfg.ListLoopOut()
loopOut, err := m.cfg.ListLoopOut(ctx)
if err != nil {
return err
}
loopIn, err := m.cfg.ListLoopIn()
loopIn, err := m.cfg.ListLoopIn(ctx)
if err != nil {
return err
}
@ -733,12 +735,12 @@ func (m *Manager) SuggestSwaps(ctx context.Context) (
// List our current set of swaps so that we can determine which channels
// are already being utilized by swaps. Note that these calls may race
// with manual initiation of swaps.
loopOut, err := m.cfg.ListLoopOut()
loopOut, err := m.cfg.ListLoopOut(ctx)
if err != nil {
return nil, err
}
loopIn, err := m.cfg.ListLoopIn()
loopIn, err := m.cfg.ListLoopIn(ctx)
if err != nil {
return nil, err
}
@ -1222,7 +1224,7 @@ func (m *Manager) refreshAutoloopBudget(ctx context.Context) {
return
}
err = m.saveParams(paramsRpc)
err = m.saveParams(ctx, paramsRpc)
if err != nil {
log.Errorf("Error saving parameters: %v", err)
}
@ -1344,7 +1346,7 @@ func (m *Manager) waitForSwapPayment(ctx context.Context, swapHash lntypes.Hash,
case <-time.After(interval):
}
swap, err = m.cfg.GetLoopOut(swapHash)
swap, err = m.cfg.GetLoopOut(ctx, swapHash)
if err != nil {
log.Errorf(
"Error getting swap with hash %x: %v", swapHash,

@ -154,10 +154,10 @@ func newTestConfig() (*Config, *test.LndMockServices) {
},
Lnd: &lnd.LndServices,
Clock: clock.NewTestClock(testTime),
ListLoopOut: func() ([]*loopdb.LoopOut, error) {
ListLoopOut: func(context.Context) ([]*loopdb.LoopOut, error) {
return nil, nil
},
ListLoopIn: func() ([]*loopdb.LoopIn, error) {
ListLoopIn: func(context.Context) ([]*loopdb.LoopIn, error) {
return nil, nil
},
LoopOutQuote: func(_ context.Context,
@ -266,30 +266,34 @@ func TestPersistParams(t *testing.T) {
cfg, _ := newTestConfig()
manager := NewManager(cfg)
ctxb := context.Background()
var paramsBytes []byte
// Mock the read method to return empty data.
manager.cfg.FetchLiquidityParams = func() ([]byte, error) {
manager.cfg.FetchLiquidityParams = func(context.Context) ([]byte, error) {
return paramsBytes, nil
}
// Test the nil params is returned.
req, err := manager.loadParams()
req, err := manager.loadParams(ctxb)
require.Nil(t, req)
require.NoError(t, err)
// Mock the write method to return no error.
manager.cfg.PutLiquidityParams = func(data []byte) error {
manager.cfg.PutLiquidityParams = func(ctx context.Context,
data []byte) error {
paramsBytes = data
return nil
}
// Test save the message.
err = manager.saveParams(rpcParams)
err = manager.saveParams(ctxb, rpcParams)
require.NoError(t, err)
// Test the nil params is returned.
req, err = manager.loadParams()
req, err = manager.loadParams(ctxb)
require.NoError(t, err)
// Check the specified fields are set as expected.
@ -565,10 +569,10 @@ func TestRestrictedSuggestions(t *testing.T) {
// Create a manager config which will return the test
// case's set of existing swaps.
cfg, lnd := newTestConfig()
cfg.ListLoopOut = func() ([]*loopdb.LoopOut, error) {
cfg.ListLoopOut = func(context.Context) ([]*loopdb.LoopOut, error) {
return testCase.loopOut, nil
}
cfg.ListLoopIn = func() ([]*loopdb.LoopIn, error) {
cfg.ListLoopIn = func(context.Context) ([]*loopdb.LoopIn, error) {
return testCase.loopIn, nil
}
@ -1093,7 +1097,7 @@ func TestFeeBudget(t *testing.T) {
})
}
cfg.ListLoopOut = func() ([]*loopdb.LoopOut, error) {
cfg.ListLoopOut = func(context.Context) ([]*loopdb.LoopOut, error) {
return swaps, nil
}
@ -1270,10 +1274,10 @@ func TestInFlightLimit(t *testing.T) {
t.Run(testCase.name, func(t *testing.T) {
cfg, lnd := newTestConfig()
cfg.ListLoopOut = func() ([]*loopdb.LoopOut, error) {
cfg.ListLoopOut = func(context.Context) ([]*loopdb.LoopOut, error) {
return testCase.existingSwaps, nil
}
cfg.ListLoopIn = func() ([]*loopdb.LoopIn, error) {
cfg.ListLoopIn = func(context.Context) ([]*loopdb.LoopIn, error) {
return testCase.existingInSwaps, nil
}
@ -1755,7 +1759,7 @@ func TestBudgetWithLoopin(t *testing.T) {
channel1,
}
cfg.ListLoopIn = func() ([]*loopdb.LoopIn, error) {
cfg.ListLoopIn = func(context.Context) ([]*loopdb.LoopIn, error) {
return testCase.loopIns, nil
}

@ -11,6 +11,7 @@ import (
"github.com/btcsuite/btcd/btcutil"
"github.com/lightninglabs/aperture/lsat"
"github.com/lightninglabs/loop/loopdb"
"github.com/lightningnetwork/lnd/cert"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
@ -28,11 +29,19 @@ var (
defaultLogDirname = "logs"
defaultLogFilename = "loopd.log"
defaultSqliteDatabaseFileName = "loop_sqlite.db"
defaultLogDir = filepath.Join(LoopDirBase, defaultLogDirname)
defaultConfigFile = filepath.Join(
LoopDirBase, DefaultNetwork, defaultConfigFilename,
)
// defaultSqliteDatabasePath is the default path under which we store
// the SQLite database file.
defaultSqliteDatabasePath = filepath.Join(
LoopDirBase, DefaultNetwork, defaultSqliteDatabaseFileName,
)
defaultMaxLogFiles = 3
defaultMaxLogFileSize = 10
defaultLoopOutMaxParts = uint32(5)
@ -47,6 +56,12 @@ var (
// TLS key.
DefaultTLSKeyFilename = "tls.key"
// DatabaseBackendSqlite is the name of the SQLite database backend.
DatabaseBackendSqlite = "sqlite"
// DatabaseBackendPostgres is the name of the Postgres database backend.
DatabaseBackendPostgres = "postgres"
defaultSelfSignedOrganization = "loop autogenerated cert"
// defaultLndMacaroon is the default macaroon file we use if the old,
@ -127,6 +142,10 @@ type Config struct {
ConfigFile string `long:"configfile" description:"Path to configuration file."`
DataDir string `long:"datadir" description:"Directory for loopdb."`
DatabaseBackend string `long:"databasebackend" description:"The database backend to use for storing all asset related data." choice:"sqlite" choice:"postgres"`
Sqlite *loopdb.SqliteConfig `group:"sqlite" namespace:"sqlite"`
Postgres *loopdb.PostgresConfig `group:"postgres" namespace:"postgres"`
TLSCertPath string `long:"tlscertpath" description:"Path to write the TLS certificate for loop's RPC and REST services."`
TLSKeyPath string `long:"tlskeypath" description:"Path to write the TLS private key for loop's RPC and REST services."`
TLSExtraIPs []string `long:"tlsextraip" description:"Adds an extra IP to the generated certificate."`
@ -172,9 +191,13 @@ func DefaultConfig() Config {
Server: &loopServerConfig{
NoTLS: false,
},
LoopDir: LoopDirBase,
ConfigFile: defaultConfigFile,
DataDir: LoopDirBase,
LoopDir: LoopDirBase,
ConfigFile: defaultConfigFile,
DataDir: LoopDirBase,
DatabaseBackend: DatabaseBackendSqlite,
Sqlite: &loopdb.SqliteConfig{
DatabaseFileName: defaultSqliteDatabasePath,
},
LogDir: defaultLogDir,
MaxLogFiles: defaultMaxLogFiles,
MaxLogFileSize: defaultMaxLogFileSize,
@ -276,6 +299,14 @@ func Validate(cfg *Config) error {
)
}
// We'll also update the database file location as well, if it wasn't
// set.
if cfg.Sqlite.DatabaseFileName == defaultSqliteDatabasePath {
cfg.Sqlite.DatabaseFileName = filepath.Join(
cfg.DataDir, defaultSqliteDatabaseFileName,
)
}
// If either of these directories do not exist, create them.
if err := os.MkdirAll(cfg.DataDir, os.ModePerm); err != nil {
return err

@ -370,8 +370,24 @@ func (d *Daemon) initialize(withMacaroonService bool) error {
}
}
// Both the client RPC server and and the swap server client should
// stop on main context cancel. So we create it early and pass it down.
d.mainCtx, d.mainCtxCancel = context.WithCancel(context.Background())
log.Infof("Swap server address: %v", d.cfg.Server.Host)
// Check if we need to migrate the database.
if needSqlMigration(d.cfg) {
log.Infof("Boltdb found, running migration")
err := migrateBoltdb(d.mainCtx, d.cfg)
if err != nil {
return fmt.Errorf("unable to migrate boltdb: %v", err)
}
log.Infof("Successfully migrated boltdb")
}
// Create an instance of the loop client library.
swapclient, clientCleanup, err := getClient(d.cfg, &d.lnd.LndServices)
if err != nil {
@ -379,10 +395,6 @@ func (d *Daemon) initialize(withMacaroonService bool) error {
}
d.clientCleanup = clientCleanup
// Both the client RPC server and and the swap server client should
// stop on main context cancel. So we create it early and pass it down.
d.mainCtx, d.mainCtxCancel = context.WithCancel(context.Background())
// Add our debug permissions to our main set of required permissions
// if compiled in.
for endpoint, perm := range debugRequiredPermissions {
@ -450,7 +462,7 @@ func (d *Daemon) initialize(withMacaroonService bool) error {
}
// Retrieve all currently existing swaps from the database.
swapsList, err := d.impl.FetchSwaps()
swapsList, err := d.impl.FetchSwaps(d.mainCtx)
if err != nil {
if d.macaroonService == nil {
cleanupMacaroonStore()

@ -0,0 +1,92 @@
package loopd
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/lightninglabs/lndclient"
"github.com/lightninglabs/loop/loopdb"
"github.com/lightningnetwork/lnd/lnrpc"
)
// migrateBoltdb migrates the boltdb to sqlite.
func migrateBoltdb(ctx context.Context, cfg *Config) error {
// First get the chain params.
chainParams, err := lndclient.Network(cfg.Network).ChainParams()
if err != nil {
return err
}
// First open the bolt db.
boltdb, err := loopdb.NewBoltSwapStore(cfg.DataDir, chainParams)
if err != nil {
return err
}
defer boltdb.Close()
var db loopdb.SwapStore
switch cfg.DatabaseBackend {
case DatabaseBackendSqlite:
log.Infof("Opening sqlite3 database at: %v",
cfg.Sqlite.DatabaseFileName)
db, err = loopdb.NewSqliteStore(
cfg.Sqlite, chainParams,
)
case DatabaseBackendPostgres:
log.Infof("Opening postgres database at: %v",
cfg.Postgres.DSN(true))
db, err = loopdb.NewPostgresStore(
cfg.Postgres, chainParams,
)
default:
return fmt.Errorf("unknown database backend: %s",
cfg.DatabaseBackend)
}
if err != nil {
return fmt.Errorf("unable to open database: %v", err)
}
defer db.Close()
// Create a new migrator manager.
migrator := loopdb.NewMigratorManager(boltdb, db)
// Run the migration.
err = migrator.RunMigrations(ctx)
if err != nil {
return err
}
// If the migration was successfull we'll rename the bolt db to
// loop.db.bk.
err = os.Rename(
filepath.Join(cfg.DataDir, "loop.db"),
filepath.Join(cfg.DataDir, "loop.db.bk"),
)
if err != nil {
return err
}
return nil
}
// needSqlMigration checks if the boltdb exists at it's default location
// and returns true if it does.
func needSqlMigration(cfg *Config) bool {
// First check if the data directory exists.
if !lnrpc.FileExists(cfg.DataDir) {
return false
}
// Now we'll check if the bolt db exists.
if !lnrpc.FileExists(filepath.Join(cfg.DataDir, "loop.db")) {
return false
}
// If both the folder and the bolt db exist, we'll return true.
return true
}

@ -741,11 +741,11 @@ func (s *swapClientServer) GetLsatTokens(ctx context.Context,
// GetInfo returns basic information about the loop daemon and details to swaps
// from the swap store.
func (s *swapClientServer) GetInfo(_ context.Context,
func (s *swapClientServer) GetInfo(ctx context.Context,
_ *clientrpc.GetInfoRequest) (*clientrpc.GetInfoResponse, error) {
// Fetch loop-outs from the loop db.
outSwaps, err := s.impl.Store.FetchLoopOutSwaps()
outSwaps, err := s.impl.Store.FetchLoopOutSwaps(ctx)
if err != nil {
return nil, err
}
@ -772,7 +772,7 @@ func (s *swapClientServer) GetInfo(_ context.Context,
}
// Fetch loop-ins from the loop db.
inSwaps, err := s.impl.Store.FetchLoopInSwaps()
inSwaps, err := s.impl.Store.FetchLoopInSwaps(ctx)
if err != nil {
return nil, err
}

@ -2,34 +2,67 @@ package loopd
import (
"context"
"fmt"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightninglabs/lndclient"
"github.com/lightninglabs/loop"
"github.com/lightninglabs/loop/liquidity"
"github.com/lightninglabs/loop/loopdb"
"github.com/lightninglabs/loop/swap"
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/ticker"
)
// getClient returns an instance of the swap client.
func getClient(config *Config, lnd *lndclient.LndServices) (*loop.Client,
func getClient(cfg *Config, lnd *lndclient.LndServices) (*loop.Client,
func(), error) {
clientConfig := &loop.ClientConfig{
ServerAddress: config.Server.Host,
ProxyAddress: config.Server.Proxy,
SwapServerNoTLS: config.Server.NoTLS,
TLSPathServer: config.Server.TLSPath,
ServerAddress: cfg.Server.Host,
ProxyAddress: cfg.Server.Proxy,
SwapServerNoTLS: cfg.Server.NoTLS,
TLSPathServer: cfg.Server.TLSPath,
Lnd: lnd,
MaxLsatCost: btcutil.Amount(config.MaxLSATCost),
MaxLsatFee: btcutil.Amount(config.MaxLSATFee),
LoopOutMaxParts: config.LoopOutMaxParts,
TotalPaymentTimeout: config.TotalPaymentTimeout,
MaxPaymentRetries: config.MaxPaymentRetries,
MaxLsatCost: btcutil.Amount(cfg.MaxLSATCost),
MaxLsatFee: btcutil.Amount(cfg.MaxLSATFee),
LoopOutMaxParts: cfg.LoopOutMaxParts,
TotalPaymentTimeout: cfg.TotalPaymentTimeout,
MaxPaymentRetries: cfg.MaxPaymentRetries,
}
swapClient, cleanUp, err := loop.NewClient(config.DataDir, clientConfig)
// Now that we know where the database will live, we'll go ahead and
// open up the default implementation of it.
var (
db loopdb.SwapStore
err error
)
switch cfg.DatabaseBackend {
case DatabaseBackendSqlite:
log.Infof("Opening sqlite3 database at: %v",
cfg.Sqlite.DatabaseFileName)
db, err = loopdb.NewSqliteStore(
cfg.Sqlite, clientConfig.Lnd.ChainParams,
)
case DatabaseBackendPostgres:
log.Infof("Opening postgres database at: %v",
cfg.Postgres.DSN(true))
db, err = loopdb.NewPostgresStore(
cfg.Postgres, clientConfig.Lnd.ChainParams,
)
default:
return nil, nil, fmt.Errorf("unknown database backend: %s",
cfg.DatabaseBackend)
}
if err != nil {
return nil, nil, fmt.Errorf("unable to open database: %v", err)
}
swapClient, cleanUp, err := loop.NewClient(
cfg.DataDir, db, clientConfig,
)
if err != nil {
return nil, nil, err
}

@ -1,6 +1,7 @@
package loopd
import (
"context"
"fmt"
"github.com/btcsuite/btcd/chaincfg"
@ -42,7 +43,7 @@ func view(config *Config, lisCfg *ListenerCfg) error {
}
func viewOut(swapClient *loop.Client, chainParams *chaincfg.Params) error {
swaps, err := swapClient.Store.FetchLoopOutSwaps()
swaps, err := swapClient.Store.FetchLoopOutSwaps(context.Background())
if err != nil {
return err
}
@ -91,7 +92,7 @@ func viewOut(swapClient *loop.Client, chainParams *chaincfg.Params) error {
}
func viewIn(swapClient *loop.Client, chainParams *chaincfg.Params) error {
swaps, err := swapClient.Store.FetchLoopInSwaps()
swaps, err := swapClient.Store.FetchLoopInSwaps(context.Background())
if err != nil {
return err
}

@ -1,6 +1,7 @@
package loopdb
import (
"context"
"time"
"github.com/lightningnetwork/lnd/lntypes"
@ -10,48 +11,69 @@ import (
// houses information for all pending completed/failed swaps.
type SwapStore interface {
// FetchLoopOutSwaps returns all swaps currently in the store.
FetchLoopOutSwaps() ([]*LoopOut, error)
FetchLoopOutSwaps(ctx context.Context) ([]*LoopOut, error)
// FetchLoopOutSwap returns the loop out swap with the given hash.
FetchLoopOutSwap(hash lntypes.Hash) (*LoopOut, error)
FetchLoopOutSwap(ctx context.Context, hash lntypes.Hash) (*LoopOut, error)
// CreateLoopOut adds an initiated swap to the store.
CreateLoopOut(hash lntypes.Hash, swap *LoopOutContract) error
CreateLoopOut(ctx context.Context, hash lntypes.Hash,
swap *LoopOutContract) error
// BatchCreateLoopOut creates a batch of loop out swaps to the store.
BatchCreateLoopOut(ctx context.Context,
swaps map[lntypes.Hash]*LoopOutContract) error
// UpdateLoopOut stores a new event for a target loop out swap. This
// appends to the event log for a particular swap as it goes through
// the various stages in its lifetime.
UpdateLoopOut(hash lntypes.Hash, time time.Time,
UpdateLoopOut(ctx context.Context, hash lntypes.Hash, time time.Time,
state SwapStateData) error
// FetchLoopInSwaps returns all swaps currently in the store.
FetchLoopInSwaps() ([]*LoopIn, error)
FetchLoopInSwaps(ctx context.Context) ([]*LoopIn, error)
// CreateLoopIn adds an initiated swap to the store.
CreateLoopIn(hash lntypes.Hash, swap *LoopInContract) error
CreateLoopIn(ctx context.Context, hash lntypes.Hash,
swap *LoopInContract) error
// BatchCreateLoopIn creates a batch of loop in swaps to the store.
BatchCreateLoopIn(ctx context.Context,
swaps map[lntypes.Hash]*LoopInContract) error
// UpdateLoopIn stores a new event for a target loop in swap. This
// appends to the event log for a particular swap as it goes through
// the various stages in its lifetime.
UpdateLoopIn(hash lntypes.Hash, time time.Time,
UpdateLoopIn(ctx context.Context, hash lntypes.Hash, time time.Time,
state SwapStateData) error
// BatchInsertUpdate inserts batch of swap updates to the store.
BatchInsertUpdate(ctx context.Context,
updateData map[lntypes.Hash][]BatchInsertUpdateData) error
// PutLiquidityParams writes the serialized `manager.Parameters` bytes
// into the bucket.
//
// NOTE: it's the caller's responsibility to encode the param. Atm,
// it's encoding using the proto package's `Marshal` method.
PutLiquidityParams(params []byte) error
PutLiquidityParams(ctx context.Context, params []byte) error
// FetchLiquidityParams reads the serialized `manager.Parameters` bytes
// from the bucket.
//
// NOTE: it's the caller's responsibility to decode the param. Atm,
// it's decoding using the proto package's `Unmarshal` method.
FetchLiquidityParams() ([]byte, error)
FetchLiquidityParams(ctx context.Context) ([]byte, error)
// Close closes the underlying database.
Close() error
}
// BatchInsertUpdateData is a struct that holds the data for the
// BatchInsertUpdate function.
type BatchInsertUpdateData struct {
Time time.Time
State SwapStateData
}
// TODO(roasbeef): back up method in interface?

@ -0,0 +1,426 @@
package loopdb
import (
"bytes"
"context"
"errors"
"fmt"
"sort"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
var (
ErrLoopOutsNotEqual = errors.New("loop outs not equal")
ErrLoopInsNotEqual = errors.New("loop ins not equal")
ErrLiquidityParamsNotEqual = errors.New("liquidity params not equal")
)
// MigratorManager is a struct that handles migrating data from one SwapStore
// to another.
type MigratorManager struct {
fromStore SwapStore
toStore SwapStore
}
// NewMigratorManager creates a new MigratorManager.
func NewMigratorManager(fromStore SwapStore,
toStore SwapStore) *MigratorManager {
return &MigratorManager{
fromStore: fromStore,
toStore: toStore,
}
}
// RunMigrations runs the migrations from the fromStore to the toStore.
func (m *MigratorManager) RunMigrations(ctx context.Context) error {
log.Infof("Migrating loop outs...")
// Migrate loop outs.
err := m.migrateLoopOuts(ctx)
if err != nil {
return err
}
log.Infof("Checking loop outs...")
// Check that the loop outs are equal.
err = m.checkLoopOuts(ctx)
if err != nil {
return err
}
log.Infof("Migrating loop ins...")
// Migrate loop ins.
err = m.migrateLoopIns(ctx)
if err != nil {
return err
}
log.Infof("Checking loop ins...")
// Check that the loop ins are equal.
err = m.checkLoopIns(ctx)
if err != nil {
return err
}
log.Infof("Migrating liquidity parameters...")
// Migrate liquidity parameters.
err = m.migrateLiquidityParams(ctx)
if err != nil {
return err
}
log.Infof("Checking liquidity parameters...")
// Check that the liquidity parameters are equal.
err = m.checkLiquidityParams(ctx)
if err != nil {
return err
}
log.Infof("Migrations complete!")
return nil
}
func (m *MigratorManager) migrateLoopOuts(ctx context.Context) error {
// Fetch all loop outs from the fromStore.
loopOuts, err := m.fromStore.FetchLoopOutSwaps(ctx)
if err != nil {
return err
}
swapMap := make(map[lntypes.Hash]*LoopOutContract)
updateMap := make(map[lntypes.Hash][]BatchInsertUpdateData)
// For each loop out, create a new loop out in the toStore.
for _, loopOut := range loopOuts {
swapMap[loopOut.Hash] = loopOut.Contract
for _, event := range loopOut.Events {
updateMap[loopOut.Hash] = append(
updateMap[loopOut.Hash],
BatchInsertUpdateData{
Time: event.Time,
State: event.SwapStateData,
},
)
}
}
// Create the loop outs in the toStore.
err = m.toStore.BatchCreateLoopOut(ctx, swapMap)
if err != nil {
return err
}
// Update the loop outs in the toStore.
err = m.toStore.BatchInsertUpdate(
ctx, updateMap,
)
if err != nil {
return err
}
return nil
}
// migrateLoopIns migrates all loop ins from the fromStore to the toStore.
func (m *MigratorManager) migrateLoopIns(ctx context.Context) error {
// Fetch all loop ins from the fromStore.
loopIns, err := m.fromStore.FetchLoopInSwaps(ctx)
if err != nil {
return err
}
swapMap := make(map[lntypes.Hash]*LoopInContract)
updateMap := make(map[lntypes.Hash][]BatchInsertUpdateData)
// For each loop in, create a new loop in in the toStore.
for _, loopIn := range loopIns {
swapMap[loopIn.Hash] = loopIn.Contract
for _, event := range loopIn.Events {
updateMap[loopIn.Hash] = append(
updateMap[loopIn.Hash],
BatchInsertUpdateData{
Time: event.Time,
State: event.SwapStateData,
},
)
}
}
// Create the loop outs in the toStore.
err = m.toStore.BatchCreateLoopIn(ctx, swapMap)
if err != nil {
return err
}
// Update the loop outs in the toStore.
err = m.toStore.BatchInsertUpdate(
ctx, updateMap,
)
if err != nil {
return err
}
return nil
}
// migrateLiquidityParams migrates the liquidity parameters from the fromStore
// to the toStore.
func (m *MigratorManager) migrateLiquidityParams(ctx context.Context) error {
// Fetch the liquidity parameters from the fromStore.
params, err := m.fromStore.FetchLiquidityParams(ctx)
if err != nil {
return err
}
// Put the liquidity parameters in the toStore.
err = m.toStore.PutLiquidityParams(ctx, params)
if err != nil {
return err
}
return nil
}
// checkLoopOuts checks that all loop outs in the toStore are the exact same as
// the loop outs in the fromStore.
func (m *MigratorManager) checkLoopOuts(ctx context.Context) error {
// Fetch all loop outs from the fromStore.
fromLoopOuts, err := m.fromStore.FetchLoopOutSwaps(ctx)
if err != nil {
return err
}
// Fetch all loop outs from the toStore.
toLoopOuts, err := m.toStore.FetchLoopOutSwaps(ctx)
if err != nil {
return err
}
// Check that the number of loop outs is the same.
if len(fromLoopOuts) != len(toLoopOuts) {
return NewMigrationError(
fmt.Errorf("from: %d, to: %d", len(fromLoopOuts), len(toLoopOuts)),
)
}
// Sort both list of loop outs by hash.
sortLoopOuts(fromLoopOuts)
sortLoopOuts(toLoopOuts)
// Check that each loop out is the same.
for i, fromLoopOut := range fromLoopOuts {
toLoopOut := toLoopOuts[i]
err := equalizeLoopOut(fromLoopOut, toLoopOut)
if err != nil {
return NewMigrationError(err)
}
err = equalValues(fromLoopOut, toLoopOut)
if err != nil {
return NewMigrationError(err)
}
}
return nil
}
// checkLoopIns checks that all loop ins in the toStore are the exact same as
// the loop ins in the fromStore.
func (m *MigratorManager) checkLoopIns(ctx context.Context) error {
// Fetch all loop ins from the fromStore.
fromLoopIns, err := m.fromStore.FetchLoopInSwaps(ctx)
if err != nil {
return err
}
// Fetch all loop ins from the toStore.
toLoopIns, err := m.toStore.FetchLoopInSwaps(ctx)
if err != nil {
return err
}
// Check that the number of loop ins is the same.
if len(fromLoopIns) != len(toLoopIns) {
return NewMigrationError(
fmt.Errorf("from: %d, to: %d", len(fromLoopIns), len(toLoopIns)),
)
}
// Sort both list of loop ins by hash.
sortLoopIns(fromLoopIns)
sortLoopIns(toLoopIns)
// Check that each loop in is the same.
for i, fromLoopIn := range fromLoopIns {
toLoopIn := toLoopIns[i]
err := equalizeLoopIns(fromLoopIn, toLoopIn)
if err != nil {
return NewMigrationError(err)
}
err = equalValues(fromLoopIn, toLoopIn)
if err != nil {
return NewMigrationError(err)
}
}
return nil
}
// checkLiquidityParams checks that the liquidity parameters in the toStore are
// the exact same as the liquidity parameters in the fromStore.
func (m *MigratorManager) checkLiquidityParams(ctx context.Context) error {
// Fetch the liquidity parameters from the fromStore.
fromParams, err := m.fromStore.FetchLiquidityParams(ctx)
if err != nil {
return err
}
// Fetch the liquidity parameters from the toStore.
toParams, err := m.toStore.FetchLiquidityParams(ctx)
if err != nil {
return err
}
// Check that the liquidity parameters are the same.
if !bytes.Equal(fromParams, toParams) {
return NewMigrationError(
fmt.Errorf("from: %v, to: %v", fromParams, toParams),
)
}
return nil
}
// equalizeLoopOut checks that the loop outs have the same time stored.
// Due to some weirdness with timezones between boltdb and sqlite we then
// set the times to the same value.
func equalizeLoopOut(fromLoopOut, toLoopOut *LoopOut) error {
if fromLoopOut.Contract.InitiationTime.Unix() !=
toLoopOut.Contract.InitiationTime.Unix() {
return fmt.Errorf("initiation time mismatch")
}
toLoopOut.Contract.InitiationTime = fromLoopOut.Contract.InitiationTime
if fromLoopOut.Contract.SwapPublicationDeadline.Unix() !=
toLoopOut.Contract.SwapPublicationDeadline.Unix() {
return fmt.Errorf("swap publication deadline mismatch")
}
toLoopOut.Contract.
SwapPublicationDeadline = fromLoopOut.Contract.SwapPublicationDeadline
for i, event := range fromLoopOut.Events {
if event.Time.Unix() != toLoopOut.Events[i].Time.Unix() {
return fmt.Errorf("event time mismatch")
}
toLoopOut.Events[i].Time = event.Time
}
return nil
}
func equalizeLoopIns(fromLoopIn, toLoopIn *LoopIn) error {
if fromLoopIn.Contract.InitiationTime.Unix() !=
toLoopIn.Contract.InitiationTime.Unix() {
return fmt.Errorf("initiation time mismatch")
}
toLoopIn.Contract.InitiationTime = fromLoopIn.Contract.InitiationTime
for i, event := range fromLoopIn.Events {
if event.Time.Unix() != toLoopIn.Events[i].Time.Unix() {
return fmt.Errorf("event time mismatch")
}
toLoopIn.Events[i].Time = event.Time
}
return nil
}
// sortLoopOuts sorts a list of loop outs by hash.
func sortLoopOuts(loopOuts []*LoopOut) {
sort.Slice(loopOuts, func(i, j int) bool {
return bytes.Compare(loopOuts[i].Hash[:], loopOuts[j].Hash[:]) < 0
})
}
// sortLoopIns sorts a list of loop ins by hash.
func sortLoopIns(loopIns []*LoopIn) {
sort.Slice(loopIns, func(i, j int) bool {
return bytes.Compare(loopIns[i].Hash[:], loopIns[j].Hash[:]) < 0
})
}
type migrationError struct {
Err error
}
func (e *migrationError) Error() string {
return fmt.Sprintf("migrator error: %v", e.Err)
}
func (e *migrationError) Unwrap() error {
return e.Err
}
func (e *migrationError) Is(target error) bool {
_, ok := target.(*migrationError)
return ok
}
func NewMigrationError(err error) *migrationError {
return &migrationError{Err: err}
}
func equalValues(src interface{}, dst interface{}) error {
mt := &mockTesting{}
require.EqualValues(mt, src, dst)
if mt.fail || mt.failNow {
return fmt.Errorf(mt.format, mt.args)
}
return nil
}
func elementsMatch(src interface{}, dst interface{}) error {
mt := &mockTesting{}
require.ElementsMatch(mt, src, dst)
if mt.fail || mt.failNow {
return fmt.Errorf(mt.format, mt.args)
}
return nil
}
type mockTesting struct {
failNow bool
fail bool
format string
args []interface{}
}
func (m *mockTesting) FailNow() {
m.failNow = true
}
func (m *mockTesting) Errorf(format string, args ...interface{}) {
m.format = format
m.args = args
}

@ -0,0 +1,38 @@
//go:build test_migration
// +build test_migration
package loopdb
import (
"context"
"testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/stretchr/testify/require"
)
var (
boltDbFile = "../loopdb-kon"
addr = "bc1p4g493qcmzt79r87363fvyvq5sfz58q5gsz74g2c4ejqy5xnpcpesh3yq2y"
addrBtc, _ = btcutil.DecodeAddress(addr, &chaincfg.MainNetParams)
)
// TestMigrationFromOnDiskBoltdb tests migrating from an on-disk boltdb to an
// sqlite database.
func TestMigrationFromOnDiskBoltdb(t *testing.T) {
ctxb := context.Background()
// Open a boltdbStore from the on-disk file.
boltDb, err := NewBoltSwapStore(boltDbFile, &chaincfg.TestNet3Params)
require.NoError(t, err)
// Create a new sqlite store for testing.
sqlDB := NewTestDB(t)
migrator := NewMigratorManager(boltDb, sqlDB)
// Run the migration.
err = migrator.RunMigrations(ctxb)
require.NoError(t, err)
}

@ -1,6 +1,7 @@
package loopdb
import (
"context"
"io/ioutil"
"os"
"path/filepath"
@ -44,6 +45,8 @@ func TestMigrationUpdates(t *testing.T) {
},
}
ctxb := context.Background()
// Restore a legacy database.
tempDirName, err := ioutil.TempDir("", "clientstore")
require.NoError(t, err)
@ -69,7 +72,7 @@ func TestMigrationUpdates(t *testing.T) {
// Fetch the legacy loop out swap and assert that the updates are still
// there.
outSwaps, err := store.FetchLoopOutSwaps()
outSwaps, err := store.FetchLoopOutSwaps(ctxb)
require.NoError(t, err)
outSwap := outSwaps[0]
@ -78,7 +81,7 @@ func TestMigrationUpdates(t *testing.T) {
// Fetch the legacy loop in swap and assert that the updates are still
// there.
inSwaps, err := store.FetchLoopInSwaps()
inSwaps, err := store.FetchLoopInSwaps(ctxb)
require.NoError(t, err)
inSwap := inSwaps[0]

@ -0,0 +1,147 @@
package loopdb
import (
"bytes"
"io"
"io/fs"
"net/http"
"strings"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database"
"github.com/golang-migrate/migrate/v4/source/httpfs"
)
// applyMigrations executes all database migration files found in the given file
// system under the given path, using the passed database driver and database
// name.
func applyMigrations(fs fs.FS, driver database.Driver, path,
dbName string) error {
// With the migrate instance open, we'll create a new migration source
// using the embedded file system stored in sqlSchemas. The library
// we're using can't handle a raw file system interface, so we wrap it
// in this intermediate layer.
migrateFileServer, err := httpfs.New(http.FS(fs), path)
if err != nil {
return err
}
// Finally, we'll run the migration with our driver above based on the
// open DB, and also the migration source stored in the file system
// above.
sqlMigrate, err := migrate.NewWithInstance(
"migrations", migrateFileServer, dbName, driver,
)
if err != nil {
return err
}
err = sqlMigrate.Up()
if err != nil && err != migrate.ErrNoChange {
return err
}
return nil
}
// replacerFS is an implementation of a fs.FS virtual file system that wraps an
// existing file system but does a search-and-replace operation on each file
// when it is opened.
type replacerFS struct {
parentFS fs.FS
replaces map[string]string
}
// A compile-time assertion to make sure replacerFS implements the fs.FS
// interface.
var _ fs.FS = (*replacerFS)(nil)
// newReplacerFS creates a new replacer file system, wrapping the given parent
// virtual file system. Each file within the file system is undergoing a
// search-and-replace operation when it is opened, using the given map where the
// key denotes the search term and the value the term to replace each occurrence
// with.
func newReplacerFS(parent fs.FS, replaces map[string]string) *replacerFS {
return &replacerFS{
parentFS: parent,
replaces: replaces,
}
}
// Open opens a file in the virtual file system.
//
// NOTE: This is part of the fs.FS interface.
func (t *replacerFS) Open(name string) (fs.File, error) {
f, err := t.parentFS.Open(name)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if err != nil {
return nil, err
}
if stat.IsDir() {
return f, err
}
return newReplacerFile(f, t.replaces)
}
type replacerFile struct {
parentFile fs.File
buf bytes.Buffer
}
// A compile-time assertion to make sure replacerFile implements the fs.File
// interface.
var _ fs.File = (*replacerFile)(nil)
func newReplacerFile(parent fs.File, replaces map[string]string) (*replacerFile,
error) {
content, err := io.ReadAll(parent)
if err != nil {
return nil, err
}
contentStr := string(content)
for from, to := range replaces {
contentStr = strings.Replace(contentStr, from, to, -1)
}
var buf bytes.Buffer
_, err = buf.WriteString(contentStr)
if err != nil {
return nil, err
}
return &replacerFile{
parentFile: parent,
buf: buf,
}, nil
}
// Stat returns statistics/info about the file.
//
// NOTE: This is part of the fs.File interface.
func (t *replacerFile) Stat() (fs.FileInfo, error) {
return t.parentFile.Stat()
}
// Read reads as many bytes as possible from the file into the given slice.
//
// NOTE: This is part of the fs.File interface.
func (t *replacerFile) Read(bytes []byte) (int, error) {
return t.buf.Read(bytes)
}
// Close closes the underlying file.
//
// NOTE: This is part of the fs.File interface.
func (t *replacerFile) Close() error {
// We already fully read and then closed the file when creating this
// instance, so there's nothing to do for us here.
return nil
}

@ -0,0 +1,135 @@
package loopdb
import (
"database/sql"
"fmt"
"testing"
"time"
"github.com/btcsuite/btcd/chaincfg"
postgres_migrate "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/lightninglabs/loop/loopdb/sqlc"
"github.com/stretchr/testify/require"
)
const (
dsnTemplate = "postgres://%v:%v@%v:%d/%v?sslmode=%v"
)
var (
// DefaultPostgresFixtureLifetime is the default maximum time a Postgres
// test fixture is being kept alive. After that time the docker
// container will be terminated forcefully, even if the tests aren't
// fully executed yet. So this time needs to be chosen correctly to be
// longer than the longest expected individual test run time.
DefaultPostgresFixtureLifetime = 10 * time.Minute
)
// PostgresConfig holds the postgres database configuration.
type PostgresConfig struct {
SkipMigrations bool `long:"skipmigrations" description:"Skip applying migrations on startup."`
Host string `long:"host" description:"Database server hostname."`
Port int `long:"port" description:"Database server port."`
User string `long:"user" description:"Database user."`
Password string `long:"password" description:"Database user's password."`
DBName string `long:"dbname" description:"Database name to use."`
MaxOpenConnections int32 `long:"maxconnections" description:"Max open connections to keep alive to the database server."`
RequireSSL bool `long:"requiressl" description:"Whether to require using SSL (mode: require) when connecting to the server."`
}
// DSN returns the dns to connect to the database.
func (s *PostgresConfig) DSN(hidePassword bool) string {
var sslMode = "disable"
if s.RequireSSL {
sslMode = "require"
}
password := s.Password
if hidePassword {
// Placeholder used for logging the DSN safely.
password = "****"
}
return fmt.Sprintf(dsnTemplate, s.User, password, s.Host, s.Port,
s.DBName, sslMode)
}
// PostgresStore is a database store implementation that uses a Postgres
// backend.
type PostgresStore struct {
cfg *PostgresConfig
*BaseDB
}
// NewPostgresStore creates a new store that is backed by a Postgres database
// backend.
func NewPostgresStore(cfg *PostgresConfig,
network *chaincfg.Params) (*PostgresStore, error) {
log.Infof("Using SQL database '%s'", cfg.DSN(true))
rawDb, err := sql.Open("pgx", cfg.DSN(false))
if err != nil {
return nil, err
}
if !cfg.SkipMigrations {
// Now that the database is open, populate the database with
// our set of schemas based on our embedded in-memory file
// system.
//
// First, we'll need to open up a new migration instance for
// our current target database: sqlite.
driver, err := postgres_migrate.WithInstance(
rawDb, &postgres_migrate.Config{},
)
if err != nil {
return nil, err
}
postgresFS := newReplacerFS(sqlSchemas, map[string]string{
"BLOB": "BYTEA",
"INTEGER PRIMARY KEY": "SERIAL PRIMARY KEY",
})
err = applyMigrations(
postgresFS, driver, "sqlc/migrations", cfg.DBName,
)
if err != nil {
return nil, err
}
}
queries := sqlc.New(rawDb)
return &PostgresStore{
cfg: cfg,
BaseDB: &BaseDB{
DB: rawDb,
Queries: queries,
network: network,
},
}, nil
}
// NewTestPostgresDB is a helper function that creates a Postgres database for
// testing.
func NewTestPostgresDB(t *testing.T) *PostgresStore {
t.Helper()
t.Logf("Creating new Postgres DB for testing")
sqlFixture := NewTestPgFixture(t, DefaultPostgresFixtureLifetime)
store, err := NewPostgresStore(
sqlFixture.GetConfig(), &chaincfg.MainNetParams,
)
require.NoError(t, err)
t.Cleanup(func() {
sqlFixture.TearDown(t)
})
return store
}

@ -0,0 +1,139 @@
package loopdb
import (
"context"
"database/sql"
"fmt"
"strconv"
"strings"
"testing"
"time"
_ "github.com/lib/pq"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/require"
)
const (
testPgUser = "test"
testPgPass = "test"
testPgDBName = "test"
PostgresTag = "11"
)
// TestPgFixture is a test fixture that starts a Postgres 11 instance in a
// docker container.
type TestPgFixture struct {
db *sql.DB
pool *dockertest.Pool
resource *dockertest.Resource
host string
port int
}
// NewTestPgFixture constructs a new TestPgFixture starting up a docker
// container running Postgres 11. The started container will expire in after
// the passed duration.
func NewTestPgFixture(t *testing.T, expiry time.Duration) *TestPgFixture {
// Use a sensible default on Windows (tcp/http) and linux/osx (socket)
// by specifying an empty endpoint.
pool, err := dockertest.NewPool("")
require.NoError(t, err, "Could not connect to docker")
// Pulls an image, creates a container based on it and runs it.
resource, err := pool.RunWithOptions(&dockertest.RunOptions{
Repository: "postgres",
Tag: PostgresTag,
Env: []string{
fmt.Sprintf("POSTGRES_USER=%v", testPgUser),
fmt.Sprintf("POSTGRES_PASSWORD=%v", testPgPass),
fmt.Sprintf("POSTGRES_DB=%v", testPgDBName),
"listen_addresses='*'",
},
Cmd: []string{
"postgres",
"-c", "log_statement=all",
"-c", "log_destination=stderr",
},
}, func(config *docker.HostConfig) {
// Set AutoRemove to true so that stopped container goes away
// by itself.
config.AutoRemove = true
config.RestartPolicy = docker.RestartPolicy{Name: "no"}
})
require.NoError(t, err, "Could not start resource")
hostAndPort := resource.GetHostPort("5432/tcp")
parts := strings.Split(hostAndPort, ":")
host := parts[0]
port, err := strconv.ParseInt(parts[1], 10, 64)
require.NoError(t, err)
fixture := &TestPgFixture{
host: host,
port: int(port),
}
databaseURL := fixture.GetDSN()
log.Infof("Connecting to Postgres fixture: %v\n", databaseURL)
// Tell docker to hard kill the container in "expiry" seconds.
require.NoError(t, resource.Expire(uint(expiry.Seconds())))
// Exponential backoff-retry, because the application in the container
// might not be ready to accept connections yet.
pool.MaxWait = 120 * time.Second
var testDB *sql.DB
err = pool.Retry(func() error {
testDB, err = sql.Open("postgres", databaseURL)
if err != nil {
return err
}
return testDB.Ping()
})
require.NoError(t, err, "Could not connect to docker")
// Now fill in the rest of the fixture.
fixture.db = testDB
fixture.pool = pool
fixture.resource = resource
return fixture
}
// GetDSN returns the DSN (Data Source Name) for the started Postgres node.
func (f *TestPgFixture) GetDSN() string {
return f.GetConfig().DSN(false)
}
// GetConfig returns the full config of the Postgres node.
func (f *TestPgFixture) GetConfig() *PostgresConfig {
return &PostgresConfig{
Host: f.host,
Port: f.port,
User: testPgUser,
Password: testPgPass,
DBName: testPgDBName,
RequireSSL: false,
}
}
// TearDown stops the underlying docker container.
func (f *TestPgFixture) TearDown(t *testing.T) {
err := f.pool.Purge(f.resource)
require.NoError(t, err, "Could not purge resource")
}
// ClearDB clears the database.
func (f *TestPgFixture) ClearDB(t *testing.T) {
dbConn, err := sql.Open("postgres", f.GetDSN())
require.NoError(t, err)
_, err = dbConn.ExecContext(
context.Background(),
`DROP SCHEMA IF EXISTS public CASCADE;
CREATE SCHEMA public;`,
)
require.NoError(t, err)
}

@ -0,0 +1,8 @@
package loopdb
import (
"embed"
)
//go:embed sqlc/migrations/*.up.sql
var sqlSchemas embed.FS

@ -0,0 +1,734 @@
package loopdb
import (
"context"
"database/sql"
"errors"
"strconv"
"strings"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/lightninglabs/loop/loopdb/sqlc"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/routing/route"
)
// FetchLoopOutSwaps returns all swaps currently in the store.
func (s *BaseDB) FetchLoopOutSwaps(ctx context.Context) ([]*LoopOut,
error) {
var loopOuts []*LoopOut
err := s.ExecTx(ctx, NewSqlReadOpts(), func(*sqlc.Queries) error {
swaps, err := s.Queries.GetLoopOutSwaps(ctx)
if err != nil {
return err
}
loopOuts = make([]*LoopOut, len(swaps))
for i, swap := range swaps {
updates, err := s.Queries.GetSwapUpdates(ctx, swap.SwapHash)
if err != nil {
return err
}
loopOut, err := s.convertLoopOutRow(
sqlc.GetLoopOutSwapRow(swap), updates,
)
if err != nil {
return err
}
loopOuts[i] = loopOut
}
return nil
})
if err != nil {
return nil, err
}
return loopOuts, nil
}
// FetchLoopOutSwap returns the loop out swap with the given hash.
func (s *BaseDB) FetchLoopOutSwap(ctx context.Context,
hash lntypes.Hash) (*LoopOut, error) {
var loopOut *LoopOut
err := s.ExecTx(ctx, NewSqlReadOpts(), func(*sqlc.Queries) error {
swap, err := s.Queries.GetLoopOutSwap(ctx, hash[:])
if err != nil {
return err
}
updates, err := s.Queries.GetSwapUpdates(ctx, swap.SwapHash)
if err != nil {
return err
}
loopOut, err = s.convertLoopOutRow(
swap, updates,
)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return loopOut, nil
}
// CreateLoopOut adds an initiated swap to the store.
func (s *BaseDB) CreateLoopOut(ctx context.Context, hash lntypes.Hash,
swap *LoopOutContract) error {
writeOpts := &SqliteTxOptions{}
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
insertArgs := loopToInsertArgs(
hash, &swap.SwapContract,
)
// First we'll insert the swap itself.
err := tx.InsertSwap(ctx, insertArgs)
if err != nil {
return err
}
htlcKeyInsertArgs := swapToHtlcKeysInsertArgs(
hash, &swap.SwapContract,
)
// Next insert the htlc keys.
err = tx.InsertHtlcKeys(ctx, htlcKeyInsertArgs)
if err != nil {
return err
}
loopOutInsertArgs := loopOutToInsertArgs(hash, swap)
// Next insert the loop out relevant data.
err = tx.InsertLoopOut(ctx, loopOutInsertArgs)
if err != nil {
return err
}
return nil
})
}
// BatchCreateLoopOut adds multiple initiated swaps to the store.
func (s *BaseDB) BatchCreateLoopOut(ctx context.Context,
swaps map[lntypes.Hash]*LoopOutContract) error {
writeOpts := &SqliteTxOptions{}
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
for swapHash, swap := range swaps {
insertArgs := loopToInsertArgs(
swapHash, &swap.SwapContract,
)
// First we'll insert the swap itself.
err := tx.InsertSwap(ctx, insertArgs)
if err != nil {
return err
}
htlcKeyInsertArgs := swapToHtlcKeysInsertArgs(
swapHash, &swap.SwapContract,
)
// Next insert the htlc keys.
err = tx.InsertHtlcKeys(ctx, htlcKeyInsertArgs)
if err != nil {
return err
}
loopOutInsertArgs := loopOutToInsertArgs(swapHash, swap)
// Next insert the loop out relevant data.
err = tx.InsertLoopOut(ctx, loopOutInsertArgs)
if err != nil {
return err
}
}
return nil
})
}
// UpdateLoopOut stores a new event for a target loop out swap. This
// appends to the event log for a particular swap as it goes through
// the various stages in its lifetime.
func (s *BaseDB) UpdateLoopOut(ctx context.Context, hash lntypes.Hash,
time time.Time, state SwapStateData) error {
return s.updateLoop(ctx, hash, time, state)
}
// FetchLoopInSwaps returns all swaps currently in the store.
func (s *BaseDB) FetchLoopInSwaps(ctx context.Context) (
[]*LoopIn, error) {
var loopIns []*LoopIn
err := s.ExecTx(ctx, NewSqlReadOpts(), func(*sqlc.Queries) error {
swaps, err := s.Queries.GetLoopInSwaps(ctx)
if err != nil {
return err
}
loopIns = make([]*LoopIn, len(swaps))
for i, swap := range swaps {
updates, err := s.Queries.GetSwapUpdates(ctx, swap.SwapHash)
if err != nil {
return err
}
loopIn, err := s.convertLoopInRow(
swap, updates,
)
if err != nil {
return err
}
loopIns[i] = loopIn
}
return nil
})
if err != nil {
return nil, err
}
return loopIns, nil
}
// CreateLoopIn adds an initiated swap to the store.
func (s *BaseDB) CreateLoopIn(ctx context.Context, hash lntypes.Hash,
swap *LoopInContract) error {
writeOpts := &SqliteTxOptions{}
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
insertArgs := loopToInsertArgs(
hash, &swap.SwapContract,
)
// First we'll insert the swap itself.
err := tx.InsertSwap(ctx, insertArgs)
if err != nil {
return err
}
htlcKeyInsertArgs := swapToHtlcKeysInsertArgs(
hash, &swap.SwapContract,
)
// Next insert the htlc keys.
err = tx.InsertHtlcKeys(ctx, htlcKeyInsertArgs)
if err != nil {
return err
}
loopInInsertArgs := loopInToInsertArgs(hash, swap)
// Next insert the loop out relevant data.
err = tx.InsertLoopIn(ctx, loopInInsertArgs)
if err != nil {
return err
}
return nil
})
}
// BatchCreateLoopOut adds multiple initiated swaps to the store.
func (s *BaseDB) BatchCreateLoopIn(ctx context.Context,
swaps map[lntypes.Hash]*LoopInContract) error {
writeOpts := &SqliteTxOptions{}
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
for swapHash, swap := range swaps {
insertArgs := loopToInsertArgs(
swapHash, &swap.SwapContract,
)
// First we'll insert the swap itself.
err := tx.InsertSwap(ctx, insertArgs)
if err != nil {
return err
}
htlcKeyInsertArgs := swapToHtlcKeysInsertArgs(
swapHash, &swap.SwapContract,
)
// Next insert the htlc keys.
err = tx.InsertHtlcKeys(ctx, htlcKeyInsertArgs)
if err != nil {
return err
}
loopInInsertArgs := loopInToInsertArgs(swapHash, swap)
// Next insert the loop in relevant data.
err = tx.InsertLoopIn(ctx, loopInInsertArgs)
if err != nil {
return err
}
}
return nil
})
}
// UpdateLoopIn stores a new event for a target loop in swap. This
// appends to the event log for a particular swap as it goes through
// the various stages in its lifetime.
func (s *BaseDB) UpdateLoopIn(ctx context.Context, hash lntypes.Hash,
time time.Time, state SwapStateData) error {
return s.updateLoop(ctx, hash, time, state)
}
// PutLiquidityParams writes the serialized `manager.Parameters` bytes
// into the bucket.
//
// NOTE: it's the caller's responsibility to encode the param. Atm,
// it's encoding using the proto package's `Marshal` method.
func (s *BaseDB) PutLiquidityParams(ctx context.Context,
params []byte) error {
err := s.Queries.UpsertLiquidityParams(ctx, params)
if err != nil {
return err
}
return nil
}
// FetchLiquidityParams reads the serialized `manager.Parameters` bytes
// from the bucket.
//
// NOTE: it's the caller's responsibility to decode the param. Atm,
// it's decoding using the proto package's `Unmarshal` method.
func (s *BaseDB) FetchLiquidityParams(ctx context.Context) ([]byte,
error) {
var params []byte
params, err := s.Queries.FetchLiquidityParams(ctx)
if errors.Is(err, sql.ErrNoRows) {
return params, nil
} else if err != nil {
return nil, err
}
return params, nil
}
// A compile time assertion to ensure that SqliteStore satisfies the
// SwapStore interface.
var _ SwapStore = (*BaseDB)(nil)
// updateLoop updates the swap with the given hash by inserting a new update
// in the swap_updates table.
func (s *BaseDB) updateLoop(ctx context.Context, hash lntypes.Hash,
time time.Time, state SwapStateData) error {
writeOpts := &SqliteTxOptions{}
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
updateParams := sqlc.InsertSwapUpdateParams{
SwapHash: hash[:],
UpdateTimestamp: time.UTC(),
UpdateState: int32(state.State),
ServerCost: int64(state.Cost.Server),
OnchainCost: int64(state.Cost.Onchain),
OffchainCost: int64(state.Cost.Offchain),
}
if state.HtlcTxHash != nil {
updateParams.HtlcTxhash = state.HtlcTxHash.String()
}
// First we insert the swap update.
err := tx.InsertSwapUpdate(ctx, updateParams)
if err != nil {
return err
}
return nil
})
}
// BatchInsertUpdate inserts multiple swap updates to the store.
func (s *BaseDB) BatchInsertUpdate(ctx context.Context,
updateData map[lntypes.Hash][]BatchInsertUpdateData) error {
writeOpts := &SqliteTxOptions{}
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
for swapHash, updates := range updateData {
for _, update := range updates {
updateParams := sqlc.InsertSwapUpdateParams{
SwapHash: swapHash[:],
UpdateTimestamp: update.Time.UTC(),
UpdateState: int32(update.State.State),
ServerCost: int64(update.State.Cost.Server),
OnchainCost: int64(update.State.Cost.Onchain),
OffchainCost: int64(update.State.Cost.Offchain),
}
if update.State.HtlcTxHash != nil {
updateParams.HtlcTxhash = update.State.HtlcTxHash.String()
}
// First we insert the swap update.
err := tx.InsertSwapUpdate(ctx, updateParams)
if err != nil {
return err
}
}
}
return nil
})
}
// loopToInsertArgs converts a SwapContract struct to the arguments needed to
// insert it into the database.
func loopToInsertArgs(hash lntypes.Hash,
swap *SwapContract) sqlc.InsertSwapParams {
return sqlc.InsertSwapParams{
SwapHash: hash[:],
Preimage: swap.Preimage[:],
InitiationTime: swap.InitiationTime.UTC(),
AmountRequested: int64(swap.AmountRequested),
CltvExpiry: swap.CltvExpiry,
MaxSwapFee: int64(swap.MaxSwapFee),
MaxMinerFee: int64(swap.MaxMinerFee),
InitiationHeight: swap.InitiationHeight,
ProtocolVersion: int32(swap.ProtocolVersion),
Label: swap.Label,
}
}
// loopOutToInsertArgs converts a LoopOutContract struct to the arguments
// needed to insert it into the database.
func loopOutToInsertArgs(hash lntypes.Hash,
loopOut *LoopOutContract) sqlc.InsertLoopOutParams {
return sqlc.InsertLoopOutParams{
SwapHash: hash[:],
DestAddress: loopOut.DestAddr.String(),
SwapInvoice: loopOut.SwapInvoice,
MaxSwapRoutingFee: int64(loopOut.MaxSwapRoutingFee),
SweepConfTarget: loopOut.SweepConfTarget,
HtlcConfirmations: int32(loopOut.HtlcConfirmations),
OutgoingChanSet: loopOut.OutgoingChanSet.String(),
PrepayInvoice: loopOut.PrepayInvoice,
MaxPrepayRoutingFee: int64(loopOut.MaxPrepayRoutingFee),
PublicationDeadline: loopOut.SwapPublicationDeadline.UTC(),
}
}
// loopInToInsertArgs converts a LoopInContract struct to the arguments needed
// to insert it into the database.
func loopInToInsertArgs(hash lntypes.Hash,
loopIn *LoopInContract) sqlc.InsertLoopInParams {
loopInInsertParams := sqlc.InsertLoopInParams{
SwapHash: hash[:],
HtlcConfTarget: loopIn.HtlcConfTarget,
ExternalHtlc: loopIn.ExternalHtlc,
}
if loopIn.LastHop != nil {
loopInInsertParams.LastHop = loopIn.LastHop[:]
}
return loopInInsertParams
}
// swapToHtlcKeysInsertArgs extracts the htlc keys from a SwapContract struct
// and converts them to the arguments needed to insert them into the database.
func swapToHtlcKeysInsertArgs(hash lntypes.Hash,
swap *SwapContract) sqlc.InsertHtlcKeysParams {
return sqlc.InsertHtlcKeysParams{
SwapHash: hash[:],
SenderScriptPubkey: swap.HtlcKeys.SenderScriptKey[:],
ReceiverScriptPubkey: swap.HtlcKeys.ReceiverScriptKey[:],
SenderInternalPubkey: swap.HtlcKeys.SenderInternalPubKey[:],
ReceiverInternalPubkey: swap.HtlcKeys.ReceiverInternalPubKey[:],
ClientKeyFamily: int32(
swap.HtlcKeys.ClientScriptKeyLocator.Family,
),
ClientKeyIndex: int32(
swap.HtlcKeys.ClientScriptKeyLocator.Index,
),
}
}
// convertLoopOutRow converts a database row containing a loop out swap to a
// LoopOut struct.
func (s *BaseDB) convertLoopOutRow(row sqlc.GetLoopOutSwapRow,
updates []sqlc.SwapUpdate) (*LoopOut, error) {
htlcKeys, err := fetchHtlcKeys(
row.SenderScriptPubkey, row.ReceiverScriptPubkey,
row.SenderInternalPubkey, row.ReceiverInternalPubkey,
row.ClientKeyFamily, row.ClientKeyIndex,
)
if err != nil {
return nil, err
}
preimage, err := lntypes.MakePreimage(row.Preimage)
if err != nil {
return nil, err
}
destAddress, err := btcutil.DecodeAddress(row.DestAddress, s.network)
if err != nil {
return nil, err
}
swapHash, err := lntypes.MakeHash(row.SwapHash)
if err != nil {
return nil, err
}
loopOut := &LoopOut{
Contract: &LoopOutContract{
SwapContract: SwapContract{
Preimage: preimage,
AmountRequested: btcutil.Amount(row.AmountRequested),
HtlcKeys: htlcKeys,
CltvExpiry: row.CltvExpiry,
MaxSwapFee: btcutil.Amount(row.MaxSwapFee),
MaxMinerFee: btcutil.Amount(row.MaxMinerFee),
InitiationHeight: row.InitiationHeight,
InitiationTime: row.InitiationTime,
Label: row.Label,
ProtocolVersion: ProtocolVersion(row.ProtocolVersion),
},
DestAddr: destAddress,
SwapInvoice: row.SwapInvoice,
MaxSwapRoutingFee: btcutil.Amount(row.MaxSwapRoutingFee),
SweepConfTarget: row.SweepConfTarget,
HtlcConfirmations: uint32(row.HtlcConfirmations),
PrepayInvoice: row.PrepayInvoice,
MaxPrepayRoutingFee: btcutil.Amount(row.MaxPrepayRoutingFee),
SwapPublicationDeadline: row.PublicationDeadline,
},
Loop: Loop{
Hash: swapHash,
},
}
if row.OutgoingChanSet != "" {
chanSet, err := convertOutgoingChanSet(row.OutgoingChanSet)
if err != nil {
return nil, err
}
loopOut.Contract.OutgoingChanSet = chanSet
}
// If we don't have any updates yet we can return early
if len(updates) == 0 {
return loopOut, nil
}
events, err := getSwapEvents(updates)
if err != nil {
return nil, err
}
loopOut.Events = events
return loopOut, nil
}
// convertLoopInRow converts a database row containing a loop in swap to a
// LoopIn struct.
func (s *BaseDB) convertLoopInRow(row sqlc.GetLoopInSwapsRow,
updates []sqlc.SwapUpdate) (*LoopIn, error) {
htlcKeys, err := fetchHtlcKeys(
row.SenderScriptPubkey, row.ReceiverScriptPubkey,
row.SenderInternalPubkey, row.ReceiverInternalPubkey,
row.ClientKeyFamily, row.ClientKeyIndex,
)
if err != nil {
return nil, err
}
preimage, err := lntypes.MakePreimage(row.Preimage)
if err != nil {
return nil, err
}
swapHash, err := lntypes.MakeHash(row.SwapHash)
if err != nil {
return nil, err
}
loopIn := &LoopIn{
Contract: &LoopInContract{
SwapContract: SwapContract{
Preimage: preimage,
AmountRequested: btcutil.Amount(row.AmountRequested),
HtlcKeys: htlcKeys,
CltvExpiry: row.CltvExpiry,
MaxSwapFee: btcutil.Amount(row.MaxSwapFee),
MaxMinerFee: btcutil.Amount(row.MaxMinerFee),
InitiationHeight: row.InitiationHeight,
InitiationTime: row.InitiationTime,
Label: row.Label,
ProtocolVersion: ProtocolVersion(row.ProtocolVersion),
},
HtlcConfTarget: row.HtlcConfTarget,
ExternalHtlc: row.ExternalHtlc,
},
Loop: Loop{
Hash: swapHash,
},
}
if row.LastHop != nil {
lastHop, err := route.NewVertexFromBytes(row.LastHop)
if err != nil {
return nil, err
}
loopIn.Contract.LastHop = &lastHop
}
// If we don't have any updates yet we can return early
if len(updates) == 0 {
return loopIn, nil
}
events, err := getSwapEvents(updates)
if err != nil {
return nil, err
}
loopIn.Events = events
return loopIn, nil
}
// getSwapEvents returns a slice of LoopEvents for the swap.
func getSwapEvents(updates []sqlc.SwapUpdate) ([]*LoopEvent, error) {
events := make([]*LoopEvent, len(updates))
for i := 0; i < len(events); i++ {
events[i] = &LoopEvent{
SwapStateData: SwapStateData{
State: SwapState(updates[i].UpdateState),
Cost: SwapCost{
Server: btcutil.Amount(updates[i].ServerCost),
Onchain: btcutil.Amount(updates[i].OnchainCost),
Offchain: btcutil.Amount(updates[i].OffchainCost),
},
},
Time: updates[i].UpdateTimestamp.UTC(),
}
if updates[i].HtlcTxhash != "" {
chainHash, err := chainhash.NewHashFromStr(updates[i].HtlcTxhash)
if err != nil {
return nil, err
}
events[i].HtlcTxHash = chainHash
}
}
return events, nil
}
// convertOutgoingChanSet converts a comma separated string of channel IDs into
// a ChannelSet.
func convertOutgoingChanSet(outgoingChanSet string) (ChannelSet, error) {
// Split the string into a slice of strings
chanStrings := strings.Split(outgoingChanSet, ",")
channels := make([]uint64, len(chanStrings))
// Iterate over the chanStrings slice and convert each string to ChannelID
for i, chanString := range chanStrings {
chanID, err := strconv.ParseInt(chanString, 10, 64)
if err != nil {
return nil, err
}
channels[i] = uint64(chanID)
}
return NewChannelSet(channels)
}
// fetchHtlcKeys converts the blob encoded htlc keys into a HtlcKeys struct.
func fetchHtlcKeys(senderScriptPubkey, receiverScriptPubkey,
senderInternalPubkey, receiverInternalPubkey []byte,
clientKeyFamily, clientKeyIndex int32) (HtlcKeys, error) {
senderScriptKey, err := blobTo33ByteSlice(senderScriptPubkey)
if err != nil {
return HtlcKeys{}, err
}
receiverScriptKey, err := blobTo33ByteSlice(receiverScriptPubkey)
if err != nil {
return HtlcKeys{}, err
}
htlcKeys := HtlcKeys{
SenderScriptKey: senderScriptKey,
ReceiverScriptKey: receiverScriptKey,
ClientScriptKeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamily(clientKeyFamily),
Index: uint32(clientKeyIndex),
},
}
if senderInternalPubkey != nil {
senderInternalPubkey, err := blobTo33ByteSlice(
senderInternalPubkey,
)
if err != nil {
return HtlcKeys{}, err
}
htlcKeys.SenderInternalPubKey = senderInternalPubkey
}
if receiverInternalPubkey != nil {
receiverInternalPubkey, err := blobTo33ByteSlice(
receiverInternalPubkey,
)
if err != nil {
return HtlcKeys{}, err
}
htlcKeys.ReceiverInternalPubKey = receiverInternalPubkey
}
return htlcKeys, nil
}
// blobTo33ByteSlice converts a blob encoded 33 byte public key into a
// [33]byte.
func blobTo33ByteSlice(blob []byte) ([33]byte, error) {
if len(blob) != 33 {
return [33]byte{}, errors.New("blob is not 33 bytes")
}
var key [33]byte
copy(key[:], blob)
return key, nil
}

@ -0,0 +1,389 @@
package loopdb
import (
"context"
"crypto/sha256"
"errors"
"math/rand"
"reflect"
"testing"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/lightninglabs/loop/loopdb/sqlc"
"github.com/lightninglabs/loop/test"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/routing/route"
"github.com/stretchr/testify/require"
)
var (
testTime1 = time.Date(2018, time.January, 9, 14, 54, 32, 3, time.UTC)
testTime2 = time.Date(2018, time.January, 9, 15, 02, 03, 5, time.UTC)
)
// TestSqliteLoopOutStore tests all the basic functionality of the current
// sqlite swap store.
func TestSqliteLoopOutStore(t *testing.T) {
destAddr := test.GetDestAddr(t, 0)
initiationTime := time.Date(2018, 11, 1, 0, 0, 0, 0, time.UTC)
// Next, we'll make a new pending swap that we'll insert into the
// database shortly.
unrestrictedSwap := LoopOutContract{
SwapContract: SwapContract{
AmountRequested: 100,
Preimage: testPreimage,
CltvExpiry: 144,
HtlcKeys: HtlcKeys{
SenderScriptKey: senderKey,
ReceiverScriptKey: receiverKey,
SenderInternalPubKey: senderInternalKey,
ReceiverInternalPubKey: receiverInternalKey,
ClientScriptKeyLocator: keychain.KeyLocator{
Family: 1,
Index: 2,
},
},
MaxMinerFee: 10,
MaxSwapFee: 20,
InitiationHeight: 99,
InitiationTime: initiationTime,
ProtocolVersion: ProtocolVersionMuSig2,
},
MaxPrepayRoutingFee: 40,
PrepayInvoice: "prepayinvoice",
DestAddr: destAddr,
SwapInvoice: "swapinvoice",
MaxSwapRoutingFee: 30,
SweepConfTarget: 2,
HtlcConfirmations: 2,
SwapPublicationDeadline: initiationTime,
}
t.Run("no outgoing set", func(t *testing.T) {
testSqliteLoopOutStore(t, &unrestrictedSwap)
})
restrictedSwap := unrestrictedSwap
restrictedSwap.OutgoingChanSet = ChannelSet{1, 2}
t.Run("two channel outgoing set", func(t *testing.T) {
testSqliteLoopOutStore(t, &restrictedSwap)
})
labelledSwap := unrestrictedSwap
labelledSwap.Label = "test label"
t.Run("labelled swap", func(t *testing.T) {
testSqliteLoopOutStore(t, &labelledSwap)
})
}
// testSqliteLoopOutStore tests the basic functionality of the current sqlite
// swap store for specific swap parameters.
func testSqliteLoopOutStore(t *testing.T, pendingSwap *LoopOutContract) {
store := NewTestDB(t)
ctxb := context.Background()
// First, verify that an empty database has no active swaps.
swaps, err := store.FetchLoopOutSwaps(ctxb)
require.NoError(t, err)
require.Empty(t, swaps)
hash := pendingSwap.Preimage.Hash()
// checkSwap is a test helper function that'll assert the state of a
// swap.
checkSwap := func(expectedState SwapState) {
t.Helper()
swaps, err := store.FetchLoopOutSwaps(ctxb)
require.NoError(t, err)
require.Len(t, swaps, 1)
swap, err := store.FetchLoopOutSwap(ctxb, hash)
require.NoError(t, err)
require.Equal(t, hash, swap.Hash)
require.Equal(t, hash, swaps[0].Hash)
swapContract := swap.Contract
require.Equal(t, swapContract, pendingSwap)
require.Equal(t, expectedState, swap.State().State)
if expectedState == StatePreimageRevealed {
require.NotNil(t, swap.State().HtlcTxHash)
}
}
// If we create a new swap, then it should show up as being initialized
// right after.
err = store.CreateLoopOut(ctxb, hash, pendingSwap)
require.NoError(t, err)
checkSwap(StateInitiated)
// Trying to make the same swap again should result in an error.
err = store.CreateLoopOut(ctxb, hash, pendingSwap)
require.Error(t, err)
checkSwap(StateInitiated)
// Next, we'll update to the next state of the pre-image being
// revealed. The state should be reflected here again.
err = store.UpdateLoopOut(
ctxb, hash, testTime,
SwapStateData{
State: StatePreimageRevealed,
HtlcTxHash: &chainhash.Hash{1, 6, 2},
},
)
require.NoError(t, err)
checkSwap(StatePreimageRevealed)
// Next, we'll update to the final state to ensure that the state is
// properly updated.
err = store.UpdateLoopOut(
ctxb, hash, testTime,
SwapStateData{
State: StateFailInsufficientValue,
},
)
require.NoError(t, err)
checkSwap(StateFailInsufficientValue)
err = store.Close()
require.NoError(t, err)
}
// TestSQLliteLoopInStore tests all the basic functionality of the current
// sqlite swap store.
func TestSQLliteLoopInStore(t *testing.T) {
initiationTime := time.Date(2018, 11, 1, 0, 0, 0, 0, time.UTC)
// Next, we'll make a new pending swap that we'll insert into the
// database shortly.
lastHop := route.Vertex{1, 2, 3}
pendingSwap := LoopInContract{
SwapContract: SwapContract{
AmountRequested: 100,
Preimage: testPreimage,
CltvExpiry: 144,
HtlcKeys: HtlcKeys{
SenderScriptKey: senderKey,
ReceiverScriptKey: receiverKey,
SenderInternalPubKey: senderInternalKey,
ReceiverInternalPubKey: receiverInternalKey,
ClientScriptKeyLocator: keychain.KeyLocator{
Family: 1,
Index: 2,
},
},
MaxMinerFee: 10,
MaxSwapFee: 20,
InitiationHeight: 99,
// Convert to/from unix to remove timezone, so that it
// doesn't interfere with DeepEqual.
InitiationTime: initiationTime,
ProtocolVersion: ProtocolVersionMuSig2,
},
HtlcConfTarget: 2,
LastHop: &lastHop,
ExternalHtlc: true,
}
t.Run("loop in", func(t *testing.T) {
testSqliteLoopInStore(t, pendingSwap)
})
labelledSwap := pendingSwap
labelledSwap.Label = "test label"
t.Run("loop in with label", func(t *testing.T) {
testSqliteLoopInStore(t, labelledSwap)
})
}
func testSqliteLoopInStore(t *testing.T, pendingSwap LoopInContract) {
store := NewTestDB(t)
ctxb := context.Background()
// First, verify that an empty database has no active swaps.
swaps, err := store.FetchLoopInSwaps(ctxb)
require.NoError(t, err)
require.Empty(t, swaps)
hash := sha256.Sum256(testPreimage[:])
// checkSwap is a test helper function that'll assert the state of a
// swap.
checkSwap := func(expectedState SwapState) {
t.Helper()
swaps, err := store.FetchLoopInSwaps(ctxb)
require.NoError(t, err)
require.Len(t, swaps, 1)
swap := swaps[0].Contract
require.Equal(t, swap, &pendingSwap)
require.Equal(t, swaps[0].State().State, expectedState)
}
// If we create a new swap, then it should show up as being initialized
// right after.
err = store.CreateLoopIn(ctxb, hash, &pendingSwap)
require.NoError(t, err)
checkSwap(StateInitiated)
// Trying to make the same swap again should result in an error.
err = store.CreateLoopIn(ctxb, hash, &pendingSwap)
require.Error(t, err)
checkSwap(StateInitiated)
// Next, we'll update to the next state of the pre-image being
// revealed. The state should be reflected here again.
err = store.UpdateLoopIn(
ctxb, hash, testTime,
SwapStateData{
State: StatePreimageRevealed,
},
)
require.NoError(t, err)
checkSwap(StatePreimageRevealed)
// Next, we'll update to the final state to ensure that the state is
// properly updated.
err = store.UpdateLoopIn(
ctxb, hash, testTime,
SwapStateData{
State: StateFailInsufficientValue,
},
)
require.NoError(t, err)
checkSwap(StateFailInsufficientValue)
err = store.Close()
require.NoError(t, err)
}
// TestLiquidityParams checks that reading and writing to liquidty bucket are
// as expected.
func TestSqliteLiquidityParams(t *testing.T) {
ctxb := context.Background()
store := NewTestDB(t)
// Test when there's no params saved before, an empty bytes is
// returned.
params, err := store.FetchLiquidityParams(ctxb)
require.NoError(t, err, "failed to fetch params")
require.Empty(t, params, "expect empty bytes")
require.Nil(t, params, "expected nil byte array")
params = []byte("test")
// Test we can save the params.
err = store.PutLiquidityParams(ctxb, params)
require.NoError(t, err, "failed to put params")
// Now fetch the db again should return the above saved bytes.
paramsRead, err := store.FetchLiquidityParams(ctxb)
require.NoError(t, err, "failed to fetch params")
require.Equal(t, params, paramsRead, "unexpected return value")
}
// TestSqliteTypeConversion is a small test that checks that we can safely
// convert between the :one and :many types from sqlc.
func TestSqliteTypeConversion(t *testing.T) {
loopOutSwapRow := sqlc.GetLoopOutSwapRow{}
randomStruct(&loopOutSwapRow)
require.NotNil(t, loopOutSwapRow.DestAddress)
loopOutSwapsRow := sqlc.GetLoopOutSwapsRow(loopOutSwapRow)
require.EqualValues(t, loopOutSwapRow, loopOutSwapsRow)
}
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
func randomString(length int) string {
b := make([]byte, length)
for i := range b {
b[i] = charset[rand.Intn(len(charset))]
}
return string(b)
}
func randomBytes(length int) []byte {
b := make([]byte, length)
for i := range b {
b[i] = byte(rand.Intn(256))
}
return b
}
func randomStruct(v interface{}) error {
val := reflect.ValueOf(v)
if val.Kind() != reflect.Ptr || val.Elem().Kind() != reflect.Struct {
return errors.New("Input should be a pointer to a struct type")
}
val = val.Elem()
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
switch field.Kind() {
case reflect.Int64:
if field.CanSet() {
field.SetInt(rand.Int63())
}
case reflect.String:
if field.CanSet() {
field.SetString(randomString(10))
}
case reflect.Slice:
if field.Type().Elem().Kind() == reflect.Uint8 {
if field.CanSet() {
field.SetBytes(randomBytes(32))
}
}
case reflect.Struct:
if field.Type() == reflect.TypeOf(time.Time{}) {
if field.CanSet() {
field.Set(reflect.ValueOf(time.Now()))
}
}
if field.Type() == reflect.TypeOf(route.Vertex{}) {
if field.CanSet() {
vertex, err := route.NewVertexFromBytes(
randomBytes(route.VertexSize),
)
if err != nil {
return err
}
field.Set(reflect.ValueOf(vertex))
}
}
}
}
return nil
}

@ -0,0 +1,31 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.17.2
package sqlc
import (
"context"
"database/sql"
)
type DBTX interface {
ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
PrepareContext(context.Context, string) (*sql.Stmt, error)
QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error)
QueryRowContext(context.Context, string, ...interface{}) *sql.Row
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx *sql.Tx) *Queries {
return &Queries{
db: tx,
}
}

@ -0,0 +1,35 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.17.2
// source: liquidity_params.sql
package sqlc
import (
"context"
)
const fetchLiquidityParams = `-- name: FetchLiquidityParams :one
SELECT params FROM liquidity_params WHERE id = 1
`
func (q *Queries) FetchLiquidityParams(ctx context.Context) ([]byte, error) {
row := q.db.QueryRowContext(ctx, fetchLiquidityParams)
var params []byte
err := row.Scan(&params)
return params, err
}
const upsertLiquidityParams = `-- name: UpsertLiquidityParams :exec
INSERT INTO liquidity_params (
id, params
) VALUES (
1, $1
) ON CONFLICT (id) DO UPDATE SET
params = excluded.params
`
func (q *Queries) UpsertLiquidityParams(ctx context.Context, params []byte) error {
_, err := q.db.ExecContext(ctx, upsertLiquidityParams, params)
return err
}

@ -0,0 +1,7 @@
DROP INDEX IF EXISTS updates_swap_hash_idx;
DROP TABLE IF EXISTS swaps;
DROP TABLE IF EXISTS loopin_swaps;
DROP TABLE IF EXISTS loopout_swaps;
DROP TABLE IF EXISTS swap_updates;
DROP TABLE IF EXISTS htlc_keys;

@ -0,0 +1,166 @@
-- swaps stores all base data that is shared between loop-outs and loop-ins,
-- as well as the updates.
CREATE TABLE swaps (
-- id is the autoincrementing primary key.
id INTEGER PRIMARY KEY,
-- swap_hash is the randomly generated hash of the swap, which is used
-- as the swap identifier for the clients.
swap_hash BLOB NOT NULL UNIQUE,
-- preimage is the preimage for swap htlc.
preimage BLOB NOT NULL UNIQUE,
-- initiation_time is the creation time (when stored) of the contract.
initiation_time TIMESTAMP NOT NULL,
-- amount_requested is the requested swap amount in sats.
amount_requested BIGINT NOT NULL,
-- cltv_expiry defines the on-chain HTLC's CLTV. In specific,
-- * For loop in swap, this value must be greater than the off-chain
-- payment's CLTV.
-- * For loop out swap, this value must be smaller than the off-chain
-- payment's CLTV.
cltv_expiry INTEGER NOT NULL,
-- max_miner_fee is the maximum in on-chain fees that we are willing to
-- spend.
max_miner_fee BIGINT NOT NULL,
-- max_swap_fee is the maximum we are willing to pay the server for the
-- swap.
max_swap_fee BIGINT NOT NULL,
-- initiation_height is the block height at which the swap was initiated.
initiation_height INTEGER NOT NULL,
-- protocol_version is the protocol version that the swap was created with.
-- Note that this version is not upgraded if the client upgrades or
-- downgrades their protocol version mid-swap.
protocol_version INTEGER NOT NULL,
-- label contains an optional label for the swap.
label TEXT NOT NULL
);
-- swap_updates stores timestamps and states of swap updates.
CREATE TABLE swap_updates (
-- id is the autoincrementing primary key.
id INTEGER PRIMARY KEY,
-- swap_id is the foreign key referencing the swap in the swaps table.
swap_hash BLOB NOT NULL,
-- update_timestamp is the timestamp the swap was updated at.
update_timestamp TIMESTAMP NOT NULL,
-- update_state is the state the swap was in at a given timestamp.
update_state INTEGER NOT NULL,
-- htlc_txhash is the hash of the transaction that creates the htlc.
htlc_txhash TEXT NOT NULL,
-- server_cost is the amount paid to the server.
server_cost BIGINT NOT NULL DEFAULT 0,
-- onchain_cost is the amount paid to miners for the onchain tx.
onchain_cost BIGINT NOT NULL DEFAULT 0,
-- offchain_cost is the amount paid in routing fees.
offchain_cost BIGINT NOT NULL DEFAULT 0,
-- Foreign key constraint to ensure that swap_id references an existing swap.
FOREIGN KEY (swap_hash) REFERENCES swaps (swap_hash)
);
-- loopin_swaps stores the loop-in specific data.
CREATE TABLE loopin_swaps (
-- swap_hash points to the parent swap hash.
swap_hash BLOB PRIMARY KEY REFERENCES swaps(swap_hash),
-- htlc_conf_target specifies the targeted confirmation target for the
-- sweep transaction.
htlc_conf_target INTEGER NOT NULL,
-- last_hop is an optional parameter that specifies the last hop to be
-- used for a loop in swap.
last_hop BLOB,
-- external_htlc specifies whether the htlc is published by an external
-- source.
external_htlc BOOLEAN NOT NULL
);
-- loopout_swaps stores the loop-out specific data.
CREATE TABLE loopout_swaps (
-- swap_hash points to the parent swap hash.
swap_hash BLOB PRIMARY KEY REFERENCES swaps(swap_hash),
-- dest_address is the destination address of the loop out swap.
dest_address TEXT NOT NULL,
-- SwapInvoice is the invoice that is to be paid by the client to
-- initiate the loop out swap.
swap_invoice TEXT NOT NULL,
-- MaxSwapRoutingFee is the maximum off-chain fee in msat that may be
-- paid for the swap payment to the server.
max_swap_routing_fee BIGINT NOT NULL,
-- SweepConfTarget specifies the targeted confirmation target for the
-- client sweep tx.
sweep_conf_target INTEGER NOT NULL,
-- HtlcConfirmations is the number of confirmations we require the on
-- chain htlc to have before proceeding with the swap.
htlc_confirmations INTEGER NOT NULL,
-- OutgoingChanSet is the set of short ids of channels that may be used.
-- If empty, any channel may be used.
outgoing_chan_set TEXT NOT NULL,
-- PrepayInvoice is the invoice that the client should pay to the
-- server that will be returned if the swap is complete.
prepay_invoice TEXT NOT NULL,
-- MaxPrepayRoutingFee is the maximum off-chain fee in msat that may be
-- paid for the prepayment to the server.
max_prepay_routing_fee BIGINT NOT NULL,
-- SwapPublicationDeadline is a timestamp that the server commits to
-- have the on-chain swap published by. It is set by the client to
-- allow the server to delay the publication in exchange for possibly
-- lower fees.
publication_deadline TIMESTAMP NOT NULL
);
-- htlc_keys stores public and private keys used when construcing swap HTLCs.
CREATE TABLE htlc_keys (
-- swap_hash points to the parent swap hash.
swap_hash BLOB PRIMARY KEY REFERENCES swaps(swap_hash),
-- sender_script_pubkey is the sender's script pubkey used in the HTLC.
sender_script_pubkey BLOB NOT NULL,
-- receiver_script_pubkey is the receivers's script pubkey used in the HTLC.
receiver_script_pubkey BLOB NOT NULL,
-- sender_internal_pubkey is the public key for the sender_internal_key.
sender_internal_pubkey BLOB,
-- receiver_internal_pubkey is the public key for the receiver_internal_key.
receiver_internal_pubkey BLOB,
-- client_key_family is the family of key being identified.
client_key_family INTEGER NOT NULL,
-- client_key_index is the precise index of the key being identified.
client_key_index INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS updates_swap_hash_idx ON swap_updates(swap_hash);

@ -0,0 +1 @@
DROP TABLE IF EXISTS liquidity_params;

@ -0,0 +1,6 @@
-- liquidity_params stores the liquidity parameters for autoloop as a single row
-- with a blob column, which is the serialized proto request.
CREATE TABLE liquidity_params (
id INTEGER PRIMARY KEY,
params BLOB
);

@ -0,0 +1,69 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.17.2
package sqlc
import (
"time"
)
type HtlcKey struct {
SwapHash []byte
SenderScriptPubkey []byte
ReceiverScriptPubkey []byte
SenderInternalPubkey []byte
ReceiverInternalPubkey []byte
ClientKeyFamily int32
ClientKeyIndex int32
}
type LiquidityParam struct {
ID int32
Params []byte
}
type LoopinSwap struct {
SwapHash []byte
HtlcConfTarget int32
LastHop []byte
ExternalHtlc bool
}
type LoopoutSwap struct {
SwapHash []byte
DestAddress string
SwapInvoice string
MaxSwapRoutingFee int64
SweepConfTarget int32
HtlcConfirmations int32
OutgoingChanSet string
PrepayInvoice string
MaxPrepayRoutingFee int64
PublicationDeadline time.Time
}
type Swap struct {
ID int32
SwapHash []byte
Preimage []byte
InitiationTime time.Time
AmountRequested int64
CltvExpiry int32
MaxMinerFee int64
MaxSwapFee int64
InitiationHeight int32
ProtocolVersion int32
Label string
}
type SwapUpdate struct {
ID int32
SwapHash []byte
UpdateTimestamp time.Time
UpdateState int32
HtlcTxhash string
ServerCost int64
OnchainCost int64
OffchainCost int64
}

@ -0,0 +1,26 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.17.2
package sqlc
import (
"context"
)
type Querier interface {
FetchLiquidityParams(ctx context.Context) ([]byte, error)
GetLoopInSwap(ctx context.Context, swapHash []byte) (GetLoopInSwapRow, error)
GetLoopInSwaps(ctx context.Context) ([]GetLoopInSwapsRow, error)
GetLoopOutSwap(ctx context.Context, swapHash []byte) (GetLoopOutSwapRow, error)
GetLoopOutSwaps(ctx context.Context) ([]GetLoopOutSwapsRow, error)
GetSwapUpdates(ctx context.Context, swapHash []byte) ([]SwapUpdate, error)
InsertHtlcKeys(ctx context.Context, arg InsertHtlcKeysParams) error
InsertLoopIn(ctx context.Context, arg InsertLoopInParams) error
InsertLoopOut(ctx context.Context, arg InsertLoopOutParams) error
InsertSwap(ctx context.Context, arg InsertSwapParams) error
InsertSwapUpdate(ctx context.Context, arg InsertSwapUpdateParams) error
UpsertLiquidityParams(ctx context.Context, params []byte) error
}
var _ Querier = (*Queries)(nil)

@ -0,0 +1,10 @@
-- name: UpsertLiquidityParams :exec
INSERT INTO liquidity_params (
id, params
) VALUES (
1, $1
) ON CONFLICT (id) DO UPDATE SET
params = excluded.params;
-- name: FetchLiquidityParams :one
SELECT params FROM liquidity_params WHERE id = 1;

@ -0,0 +1,133 @@
-- name: GetLoopOutSwaps :many
SELECT
swaps.*,
loopout_swaps.*,
htlc_keys.*
FROM
swaps
JOIN
loopout_swaps ON swaps.swap_hash = loopout_swaps.swap_hash
JOIN
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
ORDER BY
swaps.id;
-- name: GetLoopOutSwap :one
SELECT
swaps.*,
loopout_swaps.*,
htlc_keys.*
FROM
swaps
JOIN
loopout_swaps ON swaps.swap_hash = loopout_swaps.swap_hash
JOIN
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
WHERE
swaps.swap_hash = $1;
-- name: GetLoopInSwaps :many
SELECT
swaps.*,
loopin_swaps.*,
htlc_keys.*
FROM
swaps
JOIN
loopin_swaps ON swaps.swap_hash = loopin_swaps.swap_hash
JOIN
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
ORDER BY
swaps.id;
-- name: GetLoopInSwap :one
SELECT
swaps.*,
loopin_swaps.*,
htlc_keys.*
FROM
swaps
JOIN
loopin_swaps ON swaps.swap_hash = loopin_swaps.swap_hash
JOIN
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
WHERE
swaps.swap_hash = $1;
-- name: GetSwapUpdates :many
SELECT
*
FROM
swap_updates
WHERE
swap_hash = $1
ORDER BY
id ASC;
-- name: InsertSwap :exec
INSERT INTO swaps (
swap_hash,
preimage,
initiation_time,
amount_requested,
cltv_expiry,
max_miner_fee,
max_swap_fee,
initiation_height,
protocol_version,
label
) VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
);
-- name: InsertSwapUpdate :exec
INSERT INTO swap_updates (
swap_hash,
update_timestamp,
update_state,
htlc_txhash,
server_cost,
onchain_cost,
offchain_cost
) VALUES (
$1, $2, $3, $4, $5, $6, $7
);
-- name: InsertLoopOut :exec
INSERT INTO loopout_swaps (
swap_hash,
dest_address,
swap_invoice,
max_swap_routing_fee,
sweep_conf_target,
htlc_confirmations,
outgoing_chan_set,
prepay_invoice,
max_prepay_routing_fee,
publication_deadline
) VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
);
-- name: InsertLoopIn :exec
INSERT INTO loopin_swaps (
swap_hash,
htlc_conf_target,
last_hop,
external_htlc
) VALUES (
$1, $2, $3, $4
);
-- name: InsertHtlcKeys :exec
INSERT INTO htlc_keys(
swap_hash,
sender_script_pubkey,
receiver_script_pubkey,
sender_internal_pubkey,
receiver_internal_pubkey,
client_key_family,
client_key_index
) VALUES (
$1, $2, $3, $4, $5, $6, $7
);

@ -0,0 +1,584 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.17.2
// source: swaps.sql
package sqlc
import (
"context"
"time"
)
const getLoopInSwap = `-- name: GetLoopInSwap :one
SELECT
swaps.id, swaps.swap_hash, swaps.preimage, swaps.initiation_time, swaps.amount_requested, swaps.cltv_expiry, swaps.max_miner_fee, swaps.max_swap_fee, swaps.initiation_height, swaps.protocol_version, swaps.label,
loopin_swaps.swap_hash, loopin_swaps.htlc_conf_target, loopin_swaps.last_hop, loopin_swaps.external_htlc,
htlc_keys.swap_hash, htlc_keys.sender_script_pubkey, htlc_keys.receiver_script_pubkey, htlc_keys.sender_internal_pubkey, htlc_keys.receiver_internal_pubkey, htlc_keys.client_key_family, htlc_keys.client_key_index
FROM
swaps
JOIN
loopin_swaps ON swaps.swap_hash = loopin_swaps.swap_hash
JOIN
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
WHERE
swaps.swap_hash = $1
`
type GetLoopInSwapRow struct {
ID int32
SwapHash []byte
Preimage []byte
InitiationTime time.Time
AmountRequested int64
CltvExpiry int32
MaxMinerFee int64
MaxSwapFee int64
InitiationHeight int32
ProtocolVersion int32
Label string
SwapHash_2 []byte
HtlcConfTarget int32
LastHop []byte
ExternalHtlc bool
SwapHash_3 []byte
SenderScriptPubkey []byte
ReceiverScriptPubkey []byte
SenderInternalPubkey []byte
ReceiverInternalPubkey []byte
ClientKeyFamily int32
ClientKeyIndex int32
}
func (q *Queries) GetLoopInSwap(ctx context.Context, swapHash []byte) (GetLoopInSwapRow, error) {
row := q.db.QueryRowContext(ctx, getLoopInSwap, swapHash)
var i GetLoopInSwapRow
err := row.Scan(
&i.ID,
&i.SwapHash,
&i.Preimage,
&i.InitiationTime,
&i.AmountRequested,
&i.CltvExpiry,
&i.MaxMinerFee,
&i.MaxSwapFee,
&i.InitiationHeight,
&i.ProtocolVersion,
&i.Label,
&i.SwapHash_2,
&i.HtlcConfTarget,
&i.LastHop,
&i.ExternalHtlc,
&i.SwapHash_3,
&i.SenderScriptPubkey,
&i.ReceiverScriptPubkey,
&i.SenderInternalPubkey,
&i.ReceiverInternalPubkey,
&i.ClientKeyFamily,
&i.ClientKeyIndex,
)
return i, err
}
const getLoopInSwaps = `-- name: GetLoopInSwaps :many
SELECT
swaps.id, swaps.swap_hash, swaps.preimage, swaps.initiation_time, swaps.amount_requested, swaps.cltv_expiry, swaps.max_miner_fee, swaps.max_swap_fee, swaps.initiation_height, swaps.protocol_version, swaps.label,
loopin_swaps.swap_hash, loopin_swaps.htlc_conf_target, loopin_swaps.last_hop, loopin_swaps.external_htlc,
htlc_keys.swap_hash, htlc_keys.sender_script_pubkey, htlc_keys.receiver_script_pubkey, htlc_keys.sender_internal_pubkey, htlc_keys.receiver_internal_pubkey, htlc_keys.client_key_family, htlc_keys.client_key_index
FROM
swaps
JOIN
loopin_swaps ON swaps.swap_hash = loopin_swaps.swap_hash
JOIN
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
ORDER BY
swaps.id
`
type GetLoopInSwapsRow struct {
ID int32
SwapHash []byte
Preimage []byte
InitiationTime time.Time
AmountRequested int64
CltvExpiry int32
MaxMinerFee int64
MaxSwapFee int64
InitiationHeight int32
ProtocolVersion int32
Label string
SwapHash_2 []byte
HtlcConfTarget int32
LastHop []byte
ExternalHtlc bool
SwapHash_3 []byte
SenderScriptPubkey []byte
ReceiverScriptPubkey []byte
SenderInternalPubkey []byte
ReceiverInternalPubkey []byte
ClientKeyFamily int32
ClientKeyIndex int32
}
func (q *Queries) GetLoopInSwaps(ctx context.Context) ([]GetLoopInSwapsRow, error) {
rows, err := q.db.QueryContext(ctx, getLoopInSwaps)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetLoopInSwapsRow
for rows.Next() {
var i GetLoopInSwapsRow
if err := rows.Scan(
&i.ID,
&i.SwapHash,
&i.Preimage,
&i.InitiationTime,
&i.AmountRequested,
&i.CltvExpiry,
&i.MaxMinerFee,
&i.MaxSwapFee,
&i.InitiationHeight,
&i.ProtocolVersion,
&i.Label,
&i.SwapHash_2,
&i.HtlcConfTarget,
&i.LastHop,
&i.ExternalHtlc,
&i.SwapHash_3,
&i.SenderScriptPubkey,
&i.ReceiverScriptPubkey,
&i.SenderInternalPubkey,
&i.ReceiverInternalPubkey,
&i.ClientKeyFamily,
&i.ClientKeyIndex,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getLoopOutSwap = `-- name: GetLoopOutSwap :one
SELECT
swaps.id, swaps.swap_hash, swaps.preimage, swaps.initiation_time, swaps.amount_requested, swaps.cltv_expiry, swaps.max_miner_fee, swaps.max_swap_fee, swaps.initiation_height, swaps.protocol_version, swaps.label,
loopout_swaps.swap_hash, loopout_swaps.dest_address, loopout_swaps.swap_invoice, loopout_swaps.max_swap_routing_fee, loopout_swaps.sweep_conf_target, loopout_swaps.htlc_confirmations, loopout_swaps.outgoing_chan_set, loopout_swaps.prepay_invoice, loopout_swaps.max_prepay_routing_fee, loopout_swaps.publication_deadline,
htlc_keys.swap_hash, htlc_keys.sender_script_pubkey, htlc_keys.receiver_script_pubkey, htlc_keys.sender_internal_pubkey, htlc_keys.receiver_internal_pubkey, htlc_keys.client_key_family, htlc_keys.client_key_index
FROM
swaps
JOIN
loopout_swaps ON swaps.swap_hash = loopout_swaps.swap_hash
JOIN
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
WHERE
swaps.swap_hash = $1
`
type GetLoopOutSwapRow struct {
ID int32
SwapHash []byte
Preimage []byte
InitiationTime time.Time
AmountRequested int64
CltvExpiry int32
MaxMinerFee int64
MaxSwapFee int64
InitiationHeight int32
ProtocolVersion int32
Label string
SwapHash_2 []byte
DestAddress string
SwapInvoice string
MaxSwapRoutingFee int64
SweepConfTarget int32
HtlcConfirmations int32
OutgoingChanSet string
PrepayInvoice string
MaxPrepayRoutingFee int64
PublicationDeadline time.Time
SwapHash_3 []byte
SenderScriptPubkey []byte
ReceiverScriptPubkey []byte
SenderInternalPubkey []byte
ReceiverInternalPubkey []byte
ClientKeyFamily int32
ClientKeyIndex int32
}
func (q *Queries) GetLoopOutSwap(ctx context.Context, swapHash []byte) (GetLoopOutSwapRow, error) {
row := q.db.QueryRowContext(ctx, getLoopOutSwap, swapHash)
var i GetLoopOutSwapRow
err := row.Scan(
&i.ID,
&i.SwapHash,
&i.Preimage,
&i.InitiationTime,
&i.AmountRequested,
&i.CltvExpiry,
&i.MaxMinerFee,
&i.MaxSwapFee,
&i.InitiationHeight,
&i.ProtocolVersion,
&i.Label,
&i.SwapHash_2,
&i.DestAddress,
&i.SwapInvoice,
&i.MaxSwapRoutingFee,
&i.SweepConfTarget,
&i.HtlcConfirmations,
&i.OutgoingChanSet,
&i.PrepayInvoice,
&i.MaxPrepayRoutingFee,
&i.PublicationDeadline,
&i.SwapHash_3,
&i.SenderScriptPubkey,
&i.ReceiverScriptPubkey,
&i.SenderInternalPubkey,
&i.ReceiverInternalPubkey,
&i.ClientKeyFamily,
&i.ClientKeyIndex,
)
return i, err
}
const getLoopOutSwaps = `-- name: GetLoopOutSwaps :many
SELECT
swaps.id, swaps.swap_hash, swaps.preimage, swaps.initiation_time, swaps.amount_requested, swaps.cltv_expiry, swaps.max_miner_fee, swaps.max_swap_fee, swaps.initiation_height, swaps.protocol_version, swaps.label,
loopout_swaps.swap_hash, loopout_swaps.dest_address, loopout_swaps.swap_invoice, loopout_swaps.max_swap_routing_fee, loopout_swaps.sweep_conf_target, loopout_swaps.htlc_confirmations, loopout_swaps.outgoing_chan_set, loopout_swaps.prepay_invoice, loopout_swaps.max_prepay_routing_fee, loopout_swaps.publication_deadline,
htlc_keys.swap_hash, htlc_keys.sender_script_pubkey, htlc_keys.receiver_script_pubkey, htlc_keys.sender_internal_pubkey, htlc_keys.receiver_internal_pubkey, htlc_keys.client_key_family, htlc_keys.client_key_index
FROM
swaps
JOIN
loopout_swaps ON swaps.swap_hash = loopout_swaps.swap_hash
JOIN
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
ORDER BY
swaps.id
`
type GetLoopOutSwapsRow struct {
ID int32
SwapHash []byte
Preimage []byte
InitiationTime time.Time
AmountRequested int64
CltvExpiry int32
MaxMinerFee int64
MaxSwapFee int64
InitiationHeight int32
ProtocolVersion int32
Label string
SwapHash_2 []byte
DestAddress string
SwapInvoice string
MaxSwapRoutingFee int64
SweepConfTarget int32
HtlcConfirmations int32
OutgoingChanSet string
PrepayInvoice string
MaxPrepayRoutingFee int64
PublicationDeadline time.Time
SwapHash_3 []byte
SenderScriptPubkey []byte
ReceiverScriptPubkey []byte
SenderInternalPubkey []byte
ReceiverInternalPubkey []byte
ClientKeyFamily int32
ClientKeyIndex int32
}
func (q *Queries) GetLoopOutSwaps(ctx context.Context) ([]GetLoopOutSwapsRow, error) {
rows, err := q.db.QueryContext(ctx, getLoopOutSwaps)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetLoopOutSwapsRow
for rows.Next() {
var i GetLoopOutSwapsRow
if err := rows.Scan(
&i.ID,
&i.SwapHash,
&i.Preimage,
&i.InitiationTime,
&i.AmountRequested,
&i.CltvExpiry,
&i.MaxMinerFee,
&i.MaxSwapFee,
&i.InitiationHeight,
&i.ProtocolVersion,
&i.Label,
&i.SwapHash_2,
&i.DestAddress,
&i.SwapInvoice,
&i.MaxSwapRoutingFee,
&i.SweepConfTarget,
&i.HtlcConfirmations,
&i.OutgoingChanSet,
&i.PrepayInvoice,
&i.MaxPrepayRoutingFee,
&i.PublicationDeadline,
&i.SwapHash_3,
&i.SenderScriptPubkey,
&i.ReceiverScriptPubkey,
&i.SenderInternalPubkey,
&i.ReceiverInternalPubkey,
&i.ClientKeyFamily,
&i.ClientKeyIndex,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getSwapUpdates = `-- name: GetSwapUpdates :many
SELECT
id, swap_hash, update_timestamp, update_state, htlc_txhash, server_cost, onchain_cost, offchain_cost
FROM
swap_updates
WHERE
swap_hash = $1
ORDER BY
id ASC
`
func (q *Queries) GetSwapUpdates(ctx context.Context, swapHash []byte) ([]SwapUpdate, error) {
rows, err := q.db.QueryContext(ctx, getSwapUpdates, swapHash)
if err != nil {
return nil, err
}
defer rows.Close()
var items []SwapUpdate
for rows.Next() {
var i SwapUpdate
if err := rows.Scan(
&i.ID,
&i.SwapHash,
&i.UpdateTimestamp,
&i.UpdateState,
&i.HtlcTxhash,
&i.ServerCost,
&i.OnchainCost,
&i.OffchainCost,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const insertHtlcKeys = `-- name: InsertHtlcKeys :exec
INSERT INTO htlc_keys(
swap_hash,
sender_script_pubkey,
receiver_script_pubkey,
sender_internal_pubkey,
receiver_internal_pubkey,
client_key_family,
client_key_index
) VALUES (
$1, $2, $3, $4, $5, $6, $7
)
`
type InsertHtlcKeysParams struct {
SwapHash []byte
SenderScriptPubkey []byte
ReceiverScriptPubkey []byte
SenderInternalPubkey []byte
ReceiverInternalPubkey []byte
ClientKeyFamily int32
ClientKeyIndex int32
}
func (q *Queries) InsertHtlcKeys(ctx context.Context, arg InsertHtlcKeysParams) error {
_, err := q.db.ExecContext(ctx, insertHtlcKeys,
arg.SwapHash,
arg.SenderScriptPubkey,
arg.ReceiverScriptPubkey,
arg.SenderInternalPubkey,
arg.ReceiverInternalPubkey,
arg.ClientKeyFamily,
arg.ClientKeyIndex,
)
return err
}
const insertLoopIn = `-- name: InsertLoopIn :exec
INSERT INTO loopin_swaps (
swap_hash,
htlc_conf_target,
last_hop,
external_htlc
) VALUES (
$1, $2, $3, $4
)
`
type InsertLoopInParams struct {
SwapHash []byte
HtlcConfTarget int32
LastHop []byte
ExternalHtlc bool
}
func (q *Queries) InsertLoopIn(ctx context.Context, arg InsertLoopInParams) error {
_, err := q.db.ExecContext(ctx, insertLoopIn,
arg.SwapHash,
arg.HtlcConfTarget,
arg.LastHop,
arg.ExternalHtlc,
)
return err
}
const insertLoopOut = `-- name: InsertLoopOut :exec
INSERT INTO loopout_swaps (
swap_hash,
dest_address,
swap_invoice,
max_swap_routing_fee,
sweep_conf_target,
htlc_confirmations,
outgoing_chan_set,
prepay_invoice,
max_prepay_routing_fee,
publication_deadline
) VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
)
`
type InsertLoopOutParams struct {
SwapHash []byte
DestAddress string
SwapInvoice string
MaxSwapRoutingFee int64
SweepConfTarget int32
HtlcConfirmations int32
OutgoingChanSet string
PrepayInvoice string
MaxPrepayRoutingFee int64
PublicationDeadline time.Time
}
func (q *Queries) InsertLoopOut(ctx context.Context, arg InsertLoopOutParams) error {
_, err := q.db.ExecContext(ctx, insertLoopOut,
arg.SwapHash,
arg.DestAddress,
arg.SwapInvoice,
arg.MaxSwapRoutingFee,
arg.SweepConfTarget,
arg.HtlcConfirmations,
arg.OutgoingChanSet,
arg.PrepayInvoice,
arg.MaxPrepayRoutingFee,
arg.PublicationDeadline,
)
return err
}
const insertSwap = `-- name: InsertSwap :exec
INSERT INTO swaps (
swap_hash,
preimage,
initiation_time,
amount_requested,
cltv_expiry,
max_miner_fee,
max_swap_fee,
initiation_height,
protocol_version,
label
) VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
)
`
type InsertSwapParams struct {
SwapHash []byte
Preimage []byte
InitiationTime time.Time
AmountRequested int64
CltvExpiry int32
MaxMinerFee int64
MaxSwapFee int64
InitiationHeight int32
ProtocolVersion int32
Label string
}
func (q *Queries) InsertSwap(ctx context.Context, arg InsertSwapParams) error {
_, err := q.db.ExecContext(ctx, insertSwap,
arg.SwapHash,
arg.Preimage,
arg.InitiationTime,
arg.AmountRequested,
arg.CltvExpiry,
arg.MaxMinerFee,
arg.MaxSwapFee,
arg.InitiationHeight,
arg.ProtocolVersion,
arg.Label,
)
return err
}
const insertSwapUpdate = `-- name: InsertSwapUpdate :exec
INSERT INTO swap_updates (
swap_hash,
update_timestamp,
update_state,
htlc_txhash,
server_cost,
onchain_cost,
offchain_cost
) VALUES (
$1, $2, $3, $4, $5, $6, $7
)
`
type InsertSwapUpdateParams struct {
SwapHash []byte
UpdateTimestamp time.Time
UpdateState int32
HtlcTxhash string
ServerCost int64
OnchainCost int64
OffchainCost int64
}
func (q *Queries) InsertSwapUpdate(ctx context.Context, arg InsertSwapUpdateParams) error {
_, err := q.db.ExecContext(ctx, insertSwapUpdate,
arg.SwapHash,
arg.UpdateTimestamp,
arg.UpdateState,
arg.HtlcTxhash,
arg.ServerCost,
arg.OnchainCost,
arg.OffchainCost,
)
return err
}

@ -0,0 +1,71 @@
package loopdb
import (
"errors"
"fmt"
"github.com/jackc/pgconn"
"github.com/jackc/pgerrcode"
"modernc.org/sqlite"
sqlite3 "modernc.org/sqlite/lib"
)
// MapSQLError attempts to interpret a given error as a database agnostic SQL
// error.
func MapSQLError(err error) error {
// Attempt to interpret the error as a sqlite error.
var sqliteErr *sqlite.Error
if errors.As(err, &sqliteErr) {
return parseSqliteError(sqliteErr)
}
// Attempt to interpret the error as a postgres error.
var pqErr *pgconn.PgError
if errors.As(err, &pqErr) {
return parsePostgresError(pqErr)
}
// Return original error if it could not be classified as a database
// specific error.
return err
}
// parsePostgresError attempts to parse a sqlite error as a database agnostic
// SQL error.
func parseSqliteError(sqliteErr *sqlite.Error) error {
switch sqliteErr.Code() {
// Handle unique constraint violation error.
case sqlite3.SQLITE_CONSTRAINT_UNIQUE:
return &ErrSqlUniqueConstraintViolation{
DbError: sqliteErr,
}
default:
return fmt.Errorf("unknown sqlite error: %w", sqliteErr)
}
}
// parsePostgresError attempts to parse a postgres error as a database agnostic
// SQL error.
func parsePostgresError(pqErr *pgconn.PgError) error {
switch pqErr.Code {
// Handle unique constraint violation error.
case pgerrcode.UniqueViolation:
return &ErrSqlUniqueConstraintViolation{
DbError: pqErr,
}
default:
return fmt.Errorf("unknown postgres error: %w", pqErr)
}
}
// ErrSqlUniqueConstraintViolation is an error type which represents a database
// agnostic SQL unique constraint violation.
type ErrSqlUniqueConstraintViolation struct {
DbError error
}
func (e ErrSqlUniqueConstraintViolation) Error() string {
return fmt.Sprintf("sql unique constraint violation: %v", e.DbError)
}

@ -0,0 +1,221 @@
package loopdb
import (
"context"
"database/sql"
"fmt"
"net/url"
"path/filepath"
"testing"
"github.com/btcsuite/btcd/chaincfg"
sqlite_migrate "github.com/golang-migrate/migrate/v4/database/sqlite"
"github.com/lightninglabs/loop/loopdb/sqlc"
"github.com/stretchr/testify/require"
_ "modernc.org/sqlite" // Register relevant drivers.
)
const (
// sqliteOptionPrefix is the string prefix sqlite uses to set various
// options. This is used in the following format:
// * sqliteOptionPrefix || option_name = option_value.
sqliteOptionPrefix = "_pragma"
)
// SqliteConfig holds all the config arguments needed to interact with our
// sqlite DB.
type SqliteConfig struct {
// SkipMigrations if true, then all the tables will be created on start
// up if they don't already exist.
SkipMigrations bool `long:"skipmigrations" description:"Skip applying migrations on startup."`
// DatabaseFileName is the full file path where the database file can be
// found.
DatabaseFileName string `long:"dbfile" description:"The full path to the database."`
}
// SqliteSwapStore is a sqlite3 based database for the loop daemon.
type SqliteSwapStore struct {
cfg *SqliteConfig
*BaseDB
}
// NewSqliteStore attempts to open a new sqlite database based on the passed
// config.
func NewSqliteStore(cfg *SqliteConfig, network *chaincfg.Params) (*SqliteSwapStore, error) {
// The set of pragma options are accepted using query options. For now
// we only want to ensure that foreign key constraints are properly
// enforced.
pragmaOptions := []struct {
name string
value string
}{
{
name: "foreign_keys",
value: "on",
},
{
name: "journal_mode",
value: "WAL",
},
{
name: "busy_timeout",
value: "5000",
},
}
sqliteOptions := make(url.Values)
for _, option := range pragmaOptions {
sqliteOptions.Add(
sqliteOptionPrefix,
fmt.Sprintf("%v=%v", option.name, option.value),
)
}
// Construct the DSN which is just the database file name, appended
// with the series of pragma options as a query URL string. For more
// details on the formatting here, see the modernc.org/sqlite docs:
// https://pkg.go.dev/modernc.org/sqlite#Driver.Open.
dsn := fmt.Sprintf(
"%v?%v", cfg.DatabaseFileName, sqliteOptions.Encode(),
)
db, err := sql.Open("sqlite", dsn)
if err != nil {
return nil, err
}
if !cfg.SkipMigrations {
// Now that the database is open, populate the database with
// our set of schemas based on our embedded in-memory file
// system.
//
// First, we'll need to open up a new migration instance for
// our current target database: sqlite.
driver, err := sqlite_migrate.WithInstance(
db, &sqlite_migrate.Config{},
)
if err != nil {
return nil, err
}
err = applyMigrations(
sqlSchemas, driver, "sqlc/migrations", "sqlc",
)
if err != nil {
return nil, err
}
}
queries := sqlc.New(db)
return &SqliteSwapStore{
cfg: cfg,
BaseDB: &BaseDB{
DB: db,
Queries: queries,
network: network,
},
}, nil
}
// NewTestSqliteDB is a helper function that creates an SQLite database for
// testing.
func NewTestSqliteDB(t *testing.T) *SqliteSwapStore {
t.Helper()
t.Logf("Creating new SQLite DB for testing")
dbFileName := filepath.Join(t.TempDir(), "tmp.db")
sqlDB, err := NewSqliteStore(&SqliteConfig{
DatabaseFileName: dbFileName,
SkipMigrations: false,
}, &chaincfg.MainNetParams)
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, sqlDB.DB.Close())
})
return sqlDB
}
// BaseDB is the base database struct that each implementation can embed to
// gain some common functionality.
type BaseDB struct {
network *chaincfg.Params
*sql.DB
*sqlc.Queries
}
// BeginTx wraps the normal sql specific BeginTx method with the TxOptions
// interface. This interface is then mapped to the concrete sql tx options
// struct.
func (db *BaseDB) BeginTx(ctx context.Context,
opts TxOptions) (*sql.Tx, error) {
sqlOptions := sql.TxOptions{
ReadOnly: opts.ReadOnly(),
}
return db.DB.BeginTx(ctx, &sqlOptions)
}
// ExecTx is a wrapper for txBody to abstract the creation and commit of a db
// transaction. The db transaction is embedded in a `*postgres.Queries` that
// txBody needs to use when executing each one of the queries that need to be
// applied atomically.
func (db *BaseDB) ExecTx(ctx context.Context, txOptions TxOptions,
txBody func(*sqlc.Queries) error) error {
// Create the db transaction.
tx, err := db.BeginTx(ctx, txOptions)
if err != nil {
return err
}
// Rollback is safe to call even if the tx is already closed, so if
// the tx commits successfully, this is a no-op.
defer tx.Rollback() //nolint: errcheck
if err := txBody(db.Queries.WithTx(tx)); err != nil {
return err
}
// Commit transaction.
if err = tx.Commit(); err != nil {
return err
}
return nil
}
// TxOptions represents a set of options one can use to control what type of
// database transaction is created. Transaction can wither be read or write.
type TxOptions interface {
// ReadOnly returns true if the transaction should be read only.
ReadOnly() bool
}
// SqliteTxOptions defines the set of db txn options the KeyStore
// understands.
type SqliteTxOptions struct {
// readOnly governs if a read only transaction is needed or not.
readOnly bool
}
// NewKeyStoreReadOpts returns a new KeyStoreTxOptions instance triggers a read
// transaction.
func NewSqlReadOpts() *SqliteTxOptions {
return &SqliteTxOptions{
readOnly: true,
}
}
// ReadOnly returns true if the transaction should be read only.
//
// NOTE: This implements the TxOptions
func (r *SqliteTxOptions) ReadOnly() bool {
return r.readOnly
}

@ -2,6 +2,7 @@ package loopdb
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
@ -346,7 +347,9 @@ func unmarshalHtlcKeys(swapBucket *bbolt.Bucket, contract *SwapContract) error {
// FetchLoopOutSwaps returns all loop out swaps currently in the store.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *boltSwapStore) FetchLoopOutSwaps() ([]*LoopOut, error) {
func (s *boltSwapStore) FetchLoopOutSwaps(ctx context.Context) ([]*LoopOut,
error) {
var swaps []*LoopOut
err := s.db.View(func(tx *bbolt.Tx) error {
@ -385,7 +388,9 @@ func (s *boltSwapStore) FetchLoopOutSwaps() ([]*LoopOut, error) {
// FetchLoopOutSwap returns the loop out swap with the given hash.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *boltSwapStore) FetchLoopOutSwap(hash lntypes.Hash) (*LoopOut, error) {
func (s *boltSwapStore) FetchLoopOutSwap(ctx context.Context,
hash lntypes.Hash) (*LoopOut, error) {
var swap *LoopOut
err := s.db.View(func(tx *bbolt.Tx) error {
@ -414,7 +419,9 @@ func (s *boltSwapStore) FetchLoopOutSwap(hash lntypes.Hash) (*LoopOut, error) {
// FetchLoopInSwaps returns all loop in swaps currently in the store.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *boltSwapStore) FetchLoopInSwaps() ([]*LoopIn, error) {
func (s *boltSwapStore) FetchLoopInSwaps(ctx context.Context) ([]*LoopIn,
error) {
var swaps []*LoopIn
err := s.db.View(func(tx *bbolt.Tx) error {
@ -475,7 +482,7 @@ func createLoopBucket(tx *bbolt.Tx, swapTypeKey []byte, hash lntypes.Hash) (
// CreateLoopOut adds an initiated swap to the store.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *boltSwapStore) CreateLoopOut(hash lntypes.Hash,
func (s *boltSwapStore) CreateLoopOut(ctx context.Context, hash lntypes.Hash,
swap *LoopOutContract) error {
// If the hash doesn't match the pre-image, then this is an invalid
@ -561,7 +568,7 @@ func (s *boltSwapStore) CreateLoopOut(hash lntypes.Hash,
// CreateLoopIn adds an initiated swap to the store.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *boltSwapStore) CreateLoopIn(hash lntypes.Hash,
func (s *boltSwapStore) CreateLoopIn(ctx context.Context, hash lntypes.Hash,
swap *LoopInContract) error {
// If the hash doesn't match the pre-image, then this is an invalid
@ -678,8 +685,8 @@ func (s *boltSwapStore) updateLoop(bucketKey []byte, hash lntypes.Hash,
// a particular swap as it goes through the various stages in its lifetime.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *boltSwapStore) UpdateLoopOut(hash lntypes.Hash, time time.Time,
state SwapStateData) error {
func (s *boltSwapStore) UpdateLoopOut(ctx context.Context,
hash lntypes.Hash, time time.Time, state SwapStateData) error {
return s.updateLoop(loopOutBucketKey, hash, time, state)
}
@ -688,8 +695,8 @@ func (s *boltSwapStore) UpdateLoopOut(hash lntypes.Hash, time time.Time,
// a particular swap as it goes through the various stages in its lifetime.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *boltSwapStore) UpdateLoopIn(hash lntypes.Hash, time time.Time,
state SwapStateData) error {
func (s *boltSwapStore) UpdateLoopIn(ctx context.Context, hash lntypes.Hash,
time time.Time, state SwapStateData) error {
return s.updateLoop(loopInBucketKey, hash, time, state)
}
@ -706,7 +713,9 @@ func (s *boltSwapStore) Close() error {
//
// NOTE: it's the caller's responsibility to encode the param. Atm, it's
// encoding using the proto package's `Marshal` method.
func (s *boltSwapStore) PutLiquidityParams(params []byte) error {
func (s *boltSwapStore) PutLiquidityParams(ctx context.Context,
params []byte) error {
return s.db.Update(func(tx *bbolt.Tx) error {
// Read the root bucket.
rootBucket := tx.Bucket(liquidityBucket)
@ -722,7 +731,9 @@ func (s *boltSwapStore) PutLiquidityParams(params []byte) error {
//
// NOTE: it's the caller's responsibility to decode the param. Atm, it's
// decoding using the proto package's `Unmarshal` method.
func (s *boltSwapStore) FetchLiquidityParams() ([]byte, error) {
func (s *boltSwapStore) FetchLiquidityParams(ctx context.Context) ([]byte,
error) {
var params []byte
err := s.db.View(func(tx *bbolt.Tx) error {
@ -974,3 +985,24 @@ func (s *boltSwapStore) fetchLoopInSwap(rootBucket *bbolt.Bucket,
return &loop, nil
}
// BatchCreateLoopOut creates a batch of swaps to the store.
func (b *boltSwapStore) BatchCreateLoopOut(ctx context.Context,
swaps map[lntypes.Hash]*LoopOutContract) error {
return errors.New("not implemented")
}
// BatchCreateLoopIn creates a batch of loop in swaps to the store.
func (b *boltSwapStore) BatchCreateLoopIn(ctx context.Context,
swaps map[lntypes.Hash]*LoopInContract) error {
return errors.New("not implemented")
}
// BatchInsertUpdate inserts batch of swap updates to the store.
func (b *boltSwapStore) BatchInsertUpdate(ctx context.Context,
updateData map[lntypes.Hash][]BatchInsertUpdateData) error {
return errors.New("not implemented")
}

@ -1,6 +1,7 @@
package loopdb
import (
"context"
"crypto/sha256"
"io/ioutil"
"os"
@ -121,8 +122,10 @@ func testLoopOutStore(t *testing.T, pendingSwap *LoopOutContract) {
store, err := NewBoltSwapStore(tempDirName, &chaincfg.MainNetParams)
require.NoError(t, err)
ctxb := context.Background()
// First, verify that an empty database has no active swaps.
swaps, err := store.FetchLoopOutSwaps()
swaps, err := store.FetchLoopOutSwaps(ctxb)
require.NoError(t, err)
require.Empty(t, swaps)
@ -134,12 +137,12 @@ func testLoopOutStore(t *testing.T, pendingSwap *LoopOutContract) {
checkSwap := func(expectedState SwapState) {
t.Helper()
swaps, err := store.FetchLoopOutSwaps()
swaps, err := store.FetchLoopOutSwaps(ctxb)
require.NoError(t, err)
require.Len(t, swaps, 1)
swap, err := store.FetchLoopOutSwap(hash)
swap, err := store.FetchLoopOutSwap(ctxb, hash)
require.NoError(t, err)
require.Equal(t, hash, swap.Hash)
@ -158,20 +161,20 @@ func testLoopOutStore(t *testing.T, pendingSwap *LoopOutContract) {
// If we create a new swap, then it should show up as being initialized
// right after.
err = store.CreateLoopOut(hash, pendingSwap)
err = store.CreateLoopOut(ctxb, hash, pendingSwap)
require.NoError(t, err)
checkSwap(StateInitiated)
// Trying to make the same swap again should result in an error.
err = store.CreateLoopOut(hash, pendingSwap)
err = store.CreateLoopOut(ctxb, hash, pendingSwap)
require.Error(t, err)
checkSwap(StateInitiated)
// Next, we'll update to the next state of the pre-image being
// revealed. The state should be reflected here again.
err = store.UpdateLoopOut(
hash, testTime,
ctxb, hash, testTime,
SwapStateData{
State: StatePreimageRevealed,
HtlcTxHash: &chainhash.Hash{1, 6, 2},
@ -184,7 +187,7 @@ func testLoopOutStore(t *testing.T, pendingSwap *LoopOutContract) {
// Next, we'll update to the final state to ensure that the state is
// properly updated.
err = store.UpdateLoopOut(
hash, testTime,
ctxb, hash, testTime,
SwapStateData{
State: StateFailInsufficientValue,
},
@ -260,8 +263,10 @@ func testLoopInStore(t *testing.T, pendingSwap LoopInContract) {
store, err := NewBoltSwapStore(tempDirName, &chaincfg.MainNetParams)
require.NoError(t, err)
ctxb := context.Background()
// First, verify that an empty database has no active swaps.
swaps, err := store.FetchLoopInSwaps()
swaps, err := store.FetchLoopInSwaps(ctxb)
require.NoError(t, err)
require.Empty(t, swaps)
@ -272,7 +277,7 @@ func testLoopInStore(t *testing.T, pendingSwap LoopInContract) {
checkSwap := func(expectedState SwapState) {
t.Helper()
swaps, err := store.FetchLoopInSwaps()
swaps, err := store.FetchLoopInSwaps(ctxb)
require.NoError(t, err)
require.Len(t, swaps, 1)
@ -285,13 +290,13 @@ func testLoopInStore(t *testing.T, pendingSwap LoopInContract) {
// If we create a new swap, then it should show up as being initialized
// right after.
err = store.CreateLoopIn(hash, &pendingSwap)
err = store.CreateLoopIn(ctxb, hash, &pendingSwap)
require.NoError(t, err)
checkSwap(StateInitiated)
// Trying to make the same swap again should result in an error.
err = store.CreateLoopIn(hash, &pendingSwap)
err = store.CreateLoopIn(ctxb, hash, &pendingSwap)
require.Error(t, err)
checkSwap(StateInitiated)
@ -299,7 +304,7 @@ func testLoopInStore(t *testing.T, pendingSwap LoopInContract) {
// Next, we'll update to the next state of the pre-image being
// revealed. The state should be reflected here again.
err = store.UpdateLoopIn(
hash, testTime,
ctxb, hash, testTime,
SwapStateData{
State: StatePreimageRevealed,
},
@ -311,7 +316,7 @@ func testLoopInStore(t *testing.T, pendingSwap LoopInContract) {
// Next, we'll update to the final state to ensure that the state is
// properly updated.
err = store.UpdateLoopIn(
hash, testTime,
ctxb, hash, testTime,
SwapStateData{
State: StateFailInsufficientValue,
},
@ -407,6 +412,8 @@ func TestLegacyOutgoingChannel(t *testing.T) {
legacyOutgoingChannel = Hex("0000000000000005")
)
ctxb := context.Background()
legacyDb := map[string]interface{}{
"loop-in": map[string]interface{}{},
"metadata": map[string]interface{}{
@ -449,7 +456,7 @@ func TestLegacyOutgoingChannel(t *testing.T) {
t.Fatal(err)
}
swaps, err := store.FetchLoopOutSwaps()
swaps, err := store.FetchLoopOutSwaps(ctxb)
if err != nil {
t.Fatal(err)
}
@ -467,23 +474,26 @@ func TestLiquidityParams(t *testing.T) {
require.NoError(t, err, "failed to db")
defer os.RemoveAll(tempDirName)
ctxb := context.Background()
store, err := NewBoltSwapStore(tempDirName, &chaincfg.MainNetParams)
require.NoError(t, err, "failed to create store")
// Test when there's no params saved before, an empty bytes is
// returned.
params, err := store.FetchLiquidityParams()
params, err := store.FetchLiquidityParams(ctxb)
require.NoError(t, err, "failed to fetch params")
require.Empty(t, params, "expect empty bytes")
require.Nil(t, params)
params = []byte("test")
// Test we can save the params.
err = store.PutLiquidityParams(params)
err = store.PutLiquidityParams(ctxb, params)
require.NoError(t, err, "failed to put params")
// Now fetch the db again should return the above saved bytes.
paramsRead, err := store.FetchLiquidityParams()
paramsRead, err := store.FetchLiquidityParams(ctxb)
require.NoError(t, err, "failed to fetch params")
require.Equal(t, params, paramsRead, "unexpected return value")
}

@ -0,0 +1,13 @@
//go:build test_db_postgres
// +build test_db_postgres
package loopdb
import (
"testing"
)
// NewTestDB is a helper function that creates a Postgres database for testing.
func NewTestDB(t *testing.T) *PostgresStore {
return NewTestPostgresDB(t)
}

@ -0,0 +1,13 @@
//go:build !test_db_postgres
// +build !test_db_postgres
package loopdb
import (
"testing"
)
// NewTestDB is a helper function that creates an SQLite database for testing.
func NewTestDB(t *testing.T) *SqliteSwapStore {
return NewTestSqliteDB(t)
}

@ -298,7 +298,7 @@ func newLoopInSwap(globalCtx context.Context, cfg *swapConfig,
// Persist the data before exiting this function, so that the caller can
// trust that this swap will be resumed on restart.
err = cfg.store.CreateLoopIn(swapHash, &swap.LoopInContract)
err = cfg.store.CreateLoopIn(globalCtx, swapHash, &swap.LoopInContract)
if err != nil {
return nil, fmt.Errorf("cannot store swap: %v", err)
}
@ -776,7 +776,7 @@ func (s *loopInSwap) publishOnChainHtlc(ctx context.Context) (bool, error) {
s.cost.Onchain = fee
s.lastUpdateTime = time.Now()
if err := s.persistState(); err != nil {
if err := s.persistState(ctx); err != nil {
return false, fmt.Errorf("persist htlc tx: %v", err)
}
@ -1068,7 +1068,7 @@ func (s *loopInSwap) publishTimeoutTx(ctx context.Context,
// update notification.
func (s *loopInSwap) persistAndAnnounceState(ctx context.Context) error {
// Update state in store.
if err := s.persistState(); err != nil {
if err := s.persistState(ctx); err != nil {
return err
}
@ -1077,9 +1077,9 @@ func (s *loopInSwap) persistAndAnnounceState(ctx context.Context) error {
}
// persistState updates the swap state on disk.
func (s *loopInSwap) persistState() error {
func (s *loopInSwap) persistState(ctx context.Context) error {
return s.store.UpdateLoopIn(
s.hash, s.lastUpdateTime,
ctx, s.hash, s.lastUpdateTime,
loopdb.SwapStateData{
State: s.state,
Cost: s.cost,

@ -395,6 +395,7 @@ func testLoopInResume(t *testing.T, state loopdb.SwapState, expired bool,
storedVersion loopdb.ProtocolVersion) {
defer test.Guard(t)()
ctxb := context.Background()
ctx := newLoopInTestContext(t)
cfg := newSwapConfig(&ctx.lnd.LndServices, ctx.store, ctx.server)
@ -454,7 +455,7 @@ func testLoopInResume(t *testing.T, state loopdb.SwapState, expired bool,
)
require.NoError(t, err)
err = ctx.store.CreateLoopIn(testPreimage.Hash(), contract)
err = ctx.store.CreateLoopIn(ctxb, testPreimage.Hash(), contract)
require.NoError(t, err)
inSwap, err := resumeLoopInSwap(context.Background(), cfg, pendSwap)

@ -233,7 +233,7 @@ func newLoopOutSwap(globalCtx context.Context, cfg *swapConfig,
// Persist the data before exiting this function, so that the caller
// can trust that this swap will be resumed on restart.
err = cfg.store.CreateLoopOut(swapHash, &swap.LoopOutContract)
err = cfg.store.CreateLoopOut(globalCtx, swapHash, &swap.LoopOutContract)
if err != nil {
return nil, fmt.Errorf("cannot store swap: %v", err)
}
@ -578,7 +578,7 @@ func (s *loopOutSwap) persistState(ctx context.Context) error {
// Update state in store.
err := s.store.UpdateLoopOut(
s.hash, updateTime,
ctx, s.hash, updateTime,
loopdb.SwapStateData{
State: s.state,
Cost: s.cost,

@ -15,7 +15,7 @@ This file tracks release notes for the loop client.
## Next release
#### New Features
* Deprecated boltdb, we now support both sqlite and postgres. On first startup, loop will automatically migrate your database to sqlite. If you want to use postgres, you can set the `--databasebackend` flag to `postgres` and set the `--postgres.host`, `--postgres.port`, `--postgres.user`, `--postgres.password` and `--postgres.dbname` flags to connect to your postgres instance. Your boltdb file will be saved as a backup in the loop directory. NOTE: we're currently only supporting migrating once from boltdb to a sql backend. A manual migration between postgres and sqlite will be supported in the future.
#### Breaking Changes
#### Bug Fixes

@ -0,0 +1,20 @@
#!/bin/bash
set -e
# Directory of the script file, independent of where it's called from.
DIR="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
# Use the user's cache directories
GOCACHE=`go env GOCACHE`
GOMODCACHE=`go env GOMODCACHE`
echo "Generating sql models and queries in go..."
docker run \
--rm \
--user "$UID:$(id -g)" \
-e UID=$UID \
-v "$DIR/../:/build" \
-w /build \
kjconroy/sqlc:1.17.2 generate

@ -0,0 +1,10 @@
version: "2"
sql:
- engine: "postgresql"
schema: "loopdb/sqlc/migrations"
queries: "loopdb/sqlc/queries"
gen:
go:
out: loopdb/sqlc
package: sqlc
emit_interface: true

@ -1,6 +1,7 @@
package loop
import (
"context"
"errors"
"testing"
"time"
@ -45,7 +46,7 @@ func newStoreMock(t *testing.T) *storeMock {
// FetchLoopOutSwaps returns all swaps currently in the store.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *storeMock) FetchLoopOutSwaps() ([]*loopdb.LoopOut, error) {
func (s *storeMock) FetchLoopOutSwaps(ctx context.Context) ([]*loopdb.LoopOut, error) {
result := []*loopdb.LoopOut{}
for hash, contract := range s.loopOutSwaps {
@ -73,7 +74,7 @@ func (s *storeMock) FetchLoopOutSwaps() ([]*loopdb.LoopOut, error) {
// FetchLoopOutSwaps returns all swaps currently in the store.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *storeMock) FetchLoopOutSwap(
func (s *storeMock) FetchLoopOutSwap(ctx context.Context,
hash lntypes.Hash) (*loopdb.LoopOut, error) {
contract, ok := s.loopOutSwaps[hash]
@ -103,7 +104,7 @@ func (s *storeMock) FetchLoopOutSwap(
// CreateLoopOut adds an initiated swap to the store.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *storeMock) CreateLoopOut(hash lntypes.Hash,
func (s *storeMock) CreateLoopOut(ctx context.Context, hash lntypes.Hash,
swap *loopdb.LoopOutContract) error {
_, ok := s.loopOutSwaps[hash]
@ -119,7 +120,9 @@ func (s *storeMock) CreateLoopOut(hash lntypes.Hash,
}
// FetchLoopInSwaps returns all in swaps currently in the store.
func (s *storeMock) FetchLoopInSwaps() ([]*loopdb.LoopIn, error) {
func (s *storeMock) FetchLoopInSwaps(ctx context.Context) ([]*loopdb.LoopIn,
error) {
result := []*loopdb.LoopIn{}
for hash, contract := range s.loopInSwaps {
@ -147,7 +150,7 @@ func (s *storeMock) FetchLoopInSwaps() ([]*loopdb.LoopIn, error) {
// CreateLoopIn adds an initiated loop in swap to the store.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *storeMock) CreateLoopIn(hash lntypes.Hash,
func (s *storeMock) CreateLoopIn(ctx context.Context, hash lntypes.Hash,
swap *loopdb.LoopInContract) error {
_, ok := s.loopInSwaps[hash]
@ -167,8 +170,8 @@ func (s *storeMock) CreateLoopIn(hash lntypes.Hash,
// its lifetime.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *storeMock) UpdateLoopOut(hash lntypes.Hash, time time.Time,
state loopdb.SwapStateData) error {
func (s *storeMock) UpdateLoopOut(ctx context.Context, hash lntypes.Hash,
time time.Time, state loopdb.SwapStateData) error {
updates, ok := s.loopOutUpdates[hash]
if !ok {
@ -187,8 +190,8 @@ func (s *storeMock) UpdateLoopOut(hash lntypes.Hash, time time.Time,
// its lifetime.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *storeMock) UpdateLoopIn(hash lntypes.Hash, time time.Time,
state loopdb.SwapStateData) error {
func (s *storeMock) UpdateLoopIn(ctx context.Context, hash lntypes.Hash,
time time.Time, state loopdb.SwapStateData) error {
updates, ok := s.loopInUpdates[hash]
if !ok {
@ -206,7 +209,9 @@ func (s *storeMock) UpdateLoopIn(hash lntypes.Hash, time time.Time,
// bucket.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *storeMock) PutLiquidityParams(params []byte) error {
func (s *storeMock) PutLiquidityParams(ctx context.Context,
params []byte) error {
return nil
}
@ -214,7 +219,7 @@ func (s *storeMock) PutLiquidityParams(params []byte) error {
// the bucket.
//
// NOTE: Part of the loopdb.SwapStore interface.
func (s *storeMock) FetchLiquidityParams() ([]byte, error) {
func (s *storeMock) FetchLiquidityParams(ctx context.Context) ([]byte, error) {
return nil, nil
}
@ -298,3 +303,20 @@ func (s *storeMock) assertStoreFinished(expectedResult loopdb.SwapState) {
s.t.Fatalf("expected swap to be finished")
}
}
func (b *storeMock) BatchCreateLoopOut(ctx context.Context,
swaps map[lntypes.Hash]*loopdb.LoopOutContract) error {
return errors.New("not implemented")
}
func (b *storeMock) BatchCreateLoopIn(ctx context.Context,
swaps map[lntypes.Hash]*loopdb.LoopInContract) error {
return errors.New("not implemented")
}
func (b *storeMock) BatchInsertUpdate(ctx context.Context,
updateData map[lntypes.Hash][]loopdb.BatchInsertUpdateData) error {
return errors.New("not implemented")
}

Loading…
Cancel
Save