mirror of https://github.com/lightninglabs/loop
commit
0781cafbfb
@ -0,0 +1,92 @@
|
|||||||
|
package loopd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/lightninglabs/lndclient"
|
||||||
|
"github.com/lightninglabs/loop/loopdb"
|
||||||
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// migrateBoltdb migrates the boltdb to sqlite.
|
||||||
|
func migrateBoltdb(ctx context.Context, cfg *Config) error {
|
||||||
|
// First get the chain params.
|
||||||
|
chainParams, err := lndclient.Network(cfg.Network).ChainParams()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// First open the bolt db.
|
||||||
|
boltdb, err := loopdb.NewBoltSwapStore(cfg.DataDir, chainParams)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer boltdb.Close()
|
||||||
|
|
||||||
|
var db loopdb.SwapStore
|
||||||
|
switch cfg.DatabaseBackend {
|
||||||
|
case DatabaseBackendSqlite:
|
||||||
|
log.Infof("Opening sqlite3 database at: %v",
|
||||||
|
cfg.Sqlite.DatabaseFileName)
|
||||||
|
db, err = loopdb.NewSqliteStore(
|
||||||
|
cfg.Sqlite, chainParams,
|
||||||
|
)
|
||||||
|
|
||||||
|
case DatabaseBackendPostgres:
|
||||||
|
log.Infof("Opening postgres database at: %v",
|
||||||
|
cfg.Postgres.DSN(true))
|
||||||
|
db, err = loopdb.NewPostgresStore(
|
||||||
|
cfg.Postgres, chainParams,
|
||||||
|
)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown database backend: %s",
|
||||||
|
cfg.DatabaseBackend)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to open database: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Create a new migrator manager.
|
||||||
|
migrator := loopdb.NewMigratorManager(boltdb, db)
|
||||||
|
|
||||||
|
// Run the migration.
|
||||||
|
err = migrator.RunMigrations(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the migration was successfull we'll rename the bolt db to
|
||||||
|
// loop.db.bk.
|
||||||
|
err = os.Rename(
|
||||||
|
filepath.Join(cfg.DataDir, "loop.db"),
|
||||||
|
filepath.Join(cfg.DataDir, "loop.db.bk"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// needSqlMigration checks if the boltdb exists at it's default location
|
||||||
|
// and returns true if it does.
|
||||||
|
func needSqlMigration(cfg *Config) bool {
|
||||||
|
// First check if the data directory exists.
|
||||||
|
if !lnrpc.FileExists(cfg.DataDir) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we'll check if the bolt db exists.
|
||||||
|
if !lnrpc.FileExists(filepath.Join(cfg.DataDir, "loop.db")) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If both the folder and the bolt db exist, we'll return true.
|
||||||
|
return true
|
||||||
|
}
|
@ -0,0 +1,426 @@
|
|||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrLoopOutsNotEqual = errors.New("loop outs not equal")
|
||||||
|
ErrLoopInsNotEqual = errors.New("loop ins not equal")
|
||||||
|
ErrLiquidityParamsNotEqual = errors.New("liquidity params not equal")
|
||||||
|
)
|
||||||
|
|
||||||
|
// MigratorManager is a struct that handles migrating data from one SwapStore
|
||||||
|
// to another.
|
||||||
|
type MigratorManager struct {
|
||||||
|
fromStore SwapStore
|
||||||
|
toStore SwapStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMigratorManager creates a new MigratorManager.
|
||||||
|
func NewMigratorManager(fromStore SwapStore,
|
||||||
|
toStore SwapStore) *MigratorManager {
|
||||||
|
|
||||||
|
return &MigratorManager{
|
||||||
|
fromStore: fromStore,
|
||||||
|
toStore: toStore,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunMigrations runs the migrations from the fromStore to the toStore.
|
||||||
|
func (m *MigratorManager) RunMigrations(ctx context.Context) error {
|
||||||
|
log.Infof("Migrating loop outs...")
|
||||||
|
|
||||||
|
// Migrate loop outs.
|
||||||
|
err := m.migrateLoopOuts(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Checking loop outs...")
|
||||||
|
|
||||||
|
// Check that the loop outs are equal.
|
||||||
|
err = m.checkLoopOuts(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Migrating loop ins...")
|
||||||
|
|
||||||
|
// Migrate loop ins.
|
||||||
|
err = m.migrateLoopIns(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Checking loop ins...")
|
||||||
|
|
||||||
|
// Check that the loop ins are equal.
|
||||||
|
err = m.checkLoopIns(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Migrating liquidity parameters...")
|
||||||
|
|
||||||
|
// Migrate liquidity parameters.
|
||||||
|
err = m.migrateLiquidityParams(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Checking liquidity parameters...")
|
||||||
|
|
||||||
|
// Check that the liquidity parameters are equal.
|
||||||
|
err = m.checkLiquidityParams(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Migrations complete!")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MigratorManager) migrateLoopOuts(ctx context.Context) error {
|
||||||
|
// Fetch all loop outs from the fromStore.
|
||||||
|
loopOuts, err := m.fromStore.FetchLoopOutSwaps(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
swapMap := make(map[lntypes.Hash]*LoopOutContract)
|
||||||
|
updateMap := make(map[lntypes.Hash][]BatchInsertUpdateData)
|
||||||
|
|
||||||
|
// For each loop out, create a new loop out in the toStore.
|
||||||
|
for _, loopOut := range loopOuts {
|
||||||
|
swapMap[loopOut.Hash] = loopOut.Contract
|
||||||
|
|
||||||
|
for _, event := range loopOut.Events {
|
||||||
|
updateMap[loopOut.Hash] = append(
|
||||||
|
updateMap[loopOut.Hash],
|
||||||
|
BatchInsertUpdateData{
|
||||||
|
Time: event.Time,
|
||||||
|
State: event.SwapStateData,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the loop outs in the toStore.
|
||||||
|
err = m.toStore.BatchCreateLoopOut(ctx, swapMap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the loop outs in the toStore.
|
||||||
|
err = m.toStore.BatchInsertUpdate(
|
||||||
|
ctx, updateMap,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateLoopIns migrates all loop ins from the fromStore to the toStore.
|
||||||
|
func (m *MigratorManager) migrateLoopIns(ctx context.Context) error {
|
||||||
|
// Fetch all loop ins from the fromStore.
|
||||||
|
loopIns, err := m.fromStore.FetchLoopInSwaps(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
swapMap := make(map[lntypes.Hash]*LoopInContract)
|
||||||
|
updateMap := make(map[lntypes.Hash][]BatchInsertUpdateData)
|
||||||
|
|
||||||
|
// For each loop in, create a new loop in in the toStore.
|
||||||
|
for _, loopIn := range loopIns {
|
||||||
|
swapMap[loopIn.Hash] = loopIn.Contract
|
||||||
|
|
||||||
|
for _, event := range loopIn.Events {
|
||||||
|
updateMap[loopIn.Hash] = append(
|
||||||
|
updateMap[loopIn.Hash],
|
||||||
|
BatchInsertUpdateData{
|
||||||
|
Time: event.Time,
|
||||||
|
State: event.SwapStateData,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the loop outs in the toStore.
|
||||||
|
err = m.toStore.BatchCreateLoopIn(ctx, swapMap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the loop outs in the toStore.
|
||||||
|
err = m.toStore.BatchInsertUpdate(
|
||||||
|
ctx, updateMap,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateLiquidityParams migrates the liquidity parameters from the fromStore
|
||||||
|
// to the toStore.
|
||||||
|
func (m *MigratorManager) migrateLiquidityParams(ctx context.Context) error {
|
||||||
|
// Fetch the liquidity parameters from the fromStore.
|
||||||
|
params, err := m.fromStore.FetchLiquidityParams(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the liquidity parameters in the toStore.
|
||||||
|
err = m.toStore.PutLiquidityParams(ctx, params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkLoopOuts checks that all loop outs in the toStore are the exact same as
|
||||||
|
// the loop outs in the fromStore.
|
||||||
|
func (m *MigratorManager) checkLoopOuts(ctx context.Context) error {
|
||||||
|
// Fetch all loop outs from the fromStore.
|
||||||
|
fromLoopOuts, err := m.fromStore.FetchLoopOutSwaps(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch all loop outs from the toStore.
|
||||||
|
toLoopOuts, err := m.toStore.FetchLoopOutSwaps(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the number of loop outs is the same.
|
||||||
|
if len(fromLoopOuts) != len(toLoopOuts) {
|
||||||
|
return NewMigrationError(
|
||||||
|
fmt.Errorf("from: %d, to: %d", len(fromLoopOuts), len(toLoopOuts)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort both list of loop outs by hash.
|
||||||
|
sortLoopOuts(fromLoopOuts)
|
||||||
|
sortLoopOuts(toLoopOuts)
|
||||||
|
|
||||||
|
// Check that each loop out is the same.
|
||||||
|
for i, fromLoopOut := range fromLoopOuts {
|
||||||
|
toLoopOut := toLoopOuts[i]
|
||||||
|
|
||||||
|
err := equalizeLoopOut(fromLoopOut, toLoopOut)
|
||||||
|
if err != nil {
|
||||||
|
return NewMigrationError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = equalValues(fromLoopOut, toLoopOut)
|
||||||
|
if err != nil {
|
||||||
|
return NewMigrationError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkLoopIns checks that all loop ins in the toStore are the exact same as
|
||||||
|
// the loop ins in the fromStore.
|
||||||
|
func (m *MigratorManager) checkLoopIns(ctx context.Context) error {
|
||||||
|
// Fetch all loop ins from the fromStore.
|
||||||
|
fromLoopIns, err := m.fromStore.FetchLoopInSwaps(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch all loop ins from the toStore.
|
||||||
|
toLoopIns, err := m.toStore.FetchLoopInSwaps(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the number of loop ins is the same.
|
||||||
|
if len(fromLoopIns) != len(toLoopIns) {
|
||||||
|
return NewMigrationError(
|
||||||
|
fmt.Errorf("from: %d, to: %d", len(fromLoopIns), len(toLoopIns)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort both list of loop ins by hash.
|
||||||
|
sortLoopIns(fromLoopIns)
|
||||||
|
sortLoopIns(toLoopIns)
|
||||||
|
|
||||||
|
// Check that each loop in is the same.
|
||||||
|
for i, fromLoopIn := range fromLoopIns {
|
||||||
|
toLoopIn := toLoopIns[i]
|
||||||
|
|
||||||
|
err := equalizeLoopIns(fromLoopIn, toLoopIn)
|
||||||
|
if err != nil {
|
||||||
|
return NewMigrationError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = equalValues(fromLoopIn, toLoopIn)
|
||||||
|
if err != nil {
|
||||||
|
return NewMigrationError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkLiquidityParams checks that the liquidity parameters in the toStore are
|
||||||
|
// the exact same as the liquidity parameters in the fromStore.
|
||||||
|
func (m *MigratorManager) checkLiquidityParams(ctx context.Context) error {
|
||||||
|
// Fetch the liquidity parameters from the fromStore.
|
||||||
|
fromParams, err := m.fromStore.FetchLiquidityParams(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the liquidity parameters from the toStore.
|
||||||
|
toParams, err := m.toStore.FetchLiquidityParams(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the liquidity parameters are the same.
|
||||||
|
if !bytes.Equal(fromParams, toParams) {
|
||||||
|
return NewMigrationError(
|
||||||
|
fmt.Errorf("from: %v, to: %v", fromParams, toParams),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalizeLoopOut checks that the loop outs have the same time stored.
|
||||||
|
// Due to some weirdness with timezones between boltdb and sqlite we then
|
||||||
|
// set the times to the same value.
|
||||||
|
func equalizeLoopOut(fromLoopOut, toLoopOut *LoopOut) error {
|
||||||
|
if fromLoopOut.Contract.InitiationTime.Unix() !=
|
||||||
|
toLoopOut.Contract.InitiationTime.Unix() {
|
||||||
|
return fmt.Errorf("initiation time mismatch")
|
||||||
|
}
|
||||||
|
|
||||||
|
toLoopOut.Contract.InitiationTime = fromLoopOut.Contract.InitiationTime
|
||||||
|
|
||||||
|
if fromLoopOut.Contract.SwapPublicationDeadline.Unix() !=
|
||||||
|
toLoopOut.Contract.SwapPublicationDeadline.Unix() {
|
||||||
|
return fmt.Errorf("swap publication deadline mismatch")
|
||||||
|
}
|
||||||
|
|
||||||
|
toLoopOut.Contract.
|
||||||
|
SwapPublicationDeadline = fromLoopOut.Contract.SwapPublicationDeadline
|
||||||
|
|
||||||
|
for i, event := range fromLoopOut.Events {
|
||||||
|
if event.Time.Unix() != toLoopOut.Events[i].Time.Unix() {
|
||||||
|
return fmt.Errorf("event time mismatch")
|
||||||
|
}
|
||||||
|
toLoopOut.Events[i].Time = event.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalizeLoopIns(fromLoopIn, toLoopIn *LoopIn) error {
|
||||||
|
if fromLoopIn.Contract.InitiationTime.Unix() !=
|
||||||
|
toLoopIn.Contract.InitiationTime.Unix() {
|
||||||
|
return fmt.Errorf("initiation time mismatch")
|
||||||
|
}
|
||||||
|
|
||||||
|
toLoopIn.Contract.InitiationTime = fromLoopIn.Contract.InitiationTime
|
||||||
|
|
||||||
|
for i, event := range fromLoopIn.Events {
|
||||||
|
if event.Time.Unix() != toLoopIn.Events[i].Time.Unix() {
|
||||||
|
return fmt.Errorf("event time mismatch")
|
||||||
|
}
|
||||||
|
toLoopIn.Events[i].Time = event.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortLoopOuts sorts a list of loop outs by hash.
|
||||||
|
func sortLoopOuts(loopOuts []*LoopOut) {
|
||||||
|
sort.Slice(loopOuts, func(i, j int) bool {
|
||||||
|
return bytes.Compare(loopOuts[i].Hash[:], loopOuts[j].Hash[:]) < 0
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortLoopIns sorts a list of loop ins by hash.
|
||||||
|
func sortLoopIns(loopIns []*LoopIn) {
|
||||||
|
sort.Slice(loopIns, func(i, j int) bool {
|
||||||
|
return bytes.Compare(loopIns[i].Hash[:], loopIns[j].Hash[:]) < 0
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type migrationError struct {
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *migrationError) Error() string {
|
||||||
|
return fmt.Sprintf("migrator error: %v", e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *migrationError) Unwrap() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *migrationError) Is(target error) bool {
|
||||||
|
_, ok := target.(*migrationError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMigrationError(err error) *migrationError {
|
||||||
|
return &migrationError{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalValues(src interface{}, dst interface{}) error {
|
||||||
|
mt := &mockTesting{}
|
||||||
|
|
||||||
|
require.EqualValues(mt, src, dst)
|
||||||
|
if mt.fail || mt.failNow {
|
||||||
|
return fmt.Errorf(mt.format, mt.args)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func elementsMatch(src interface{}, dst interface{}) error {
|
||||||
|
mt := &mockTesting{}
|
||||||
|
|
||||||
|
require.ElementsMatch(mt, src, dst)
|
||||||
|
if mt.fail || mt.failNow {
|
||||||
|
return fmt.Errorf(mt.format, mt.args)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockTesting struct {
|
||||||
|
failNow bool
|
||||||
|
fail bool
|
||||||
|
format string
|
||||||
|
args []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockTesting) FailNow() {
|
||||||
|
m.failNow = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockTesting) Errorf(format string, args ...interface{}) {
|
||||||
|
m.format = format
|
||||||
|
m.args = args
|
||||||
|
}
|
@ -0,0 +1,38 @@
|
|||||||
|
//go:build test_migration
|
||||||
|
// +build test_migration
|
||||||
|
|
||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
boltDbFile = "../loopdb-kon"
|
||||||
|
addr = "bc1p4g493qcmzt79r87363fvyvq5sfz58q5gsz74g2c4ejqy5xnpcpesh3yq2y"
|
||||||
|
addrBtc, _ = btcutil.DecodeAddress(addr, &chaincfg.MainNetParams)
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestMigrationFromOnDiskBoltdb tests migrating from an on-disk boltdb to an
|
||||||
|
// sqlite database.
|
||||||
|
func TestMigrationFromOnDiskBoltdb(t *testing.T) {
|
||||||
|
ctxb := context.Background()
|
||||||
|
|
||||||
|
// Open a boltdbStore from the on-disk file.
|
||||||
|
boltDb, err := NewBoltSwapStore(boltDbFile, &chaincfg.TestNet3Params)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a new sqlite store for testing.
|
||||||
|
sqlDB := NewTestDB(t)
|
||||||
|
|
||||||
|
migrator := NewMigratorManager(boltDb, sqlDB)
|
||||||
|
|
||||||
|
// Run the migration.
|
||||||
|
err = migrator.RunMigrations(ctxb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
@ -0,0 +1,147 @@
|
|||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang-migrate/migrate/v4"
|
||||||
|
"github.com/golang-migrate/migrate/v4/database"
|
||||||
|
"github.com/golang-migrate/migrate/v4/source/httpfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// applyMigrations executes all database migration files found in the given file
|
||||||
|
// system under the given path, using the passed database driver and database
|
||||||
|
// name.
|
||||||
|
func applyMigrations(fs fs.FS, driver database.Driver, path,
|
||||||
|
dbName string) error {
|
||||||
|
|
||||||
|
// With the migrate instance open, we'll create a new migration source
|
||||||
|
// using the embedded file system stored in sqlSchemas. The library
|
||||||
|
// we're using can't handle a raw file system interface, so we wrap it
|
||||||
|
// in this intermediate layer.
|
||||||
|
migrateFileServer, err := httpfs.New(http.FS(fs), path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, we'll run the migration with our driver above based on the
|
||||||
|
// open DB, and also the migration source stored in the file system
|
||||||
|
// above.
|
||||||
|
sqlMigrate, err := migrate.NewWithInstance(
|
||||||
|
"migrations", migrateFileServer, dbName, driver,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = sqlMigrate.Up()
|
||||||
|
if err != nil && err != migrate.ErrNoChange {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// replacerFS is an implementation of a fs.FS virtual file system that wraps an
|
||||||
|
// existing file system but does a search-and-replace operation on each file
|
||||||
|
// when it is opened.
|
||||||
|
type replacerFS struct {
|
||||||
|
parentFS fs.FS
|
||||||
|
replaces map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// A compile-time assertion to make sure replacerFS implements the fs.FS
|
||||||
|
// interface.
|
||||||
|
var _ fs.FS = (*replacerFS)(nil)
|
||||||
|
|
||||||
|
// newReplacerFS creates a new replacer file system, wrapping the given parent
|
||||||
|
// virtual file system. Each file within the file system is undergoing a
|
||||||
|
// search-and-replace operation when it is opened, using the given map where the
|
||||||
|
// key denotes the search term and the value the term to replace each occurrence
|
||||||
|
// with.
|
||||||
|
func newReplacerFS(parent fs.FS, replaces map[string]string) *replacerFS {
|
||||||
|
return &replacerFS{
|
||||||
|
parentFS: parent,
|
||||||
|
replaces: replaces,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens a file in the virtual file system.
|
||||||
|
//
|
||||||
|
// NOTE: This is part of the fs.FS interface.
|
||||||
|
func (t *replacerFS) Open(name string) (fs.File, error) {
|
||||||
|
f, err := t.parentFS.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat.IsDir() {
|
||||||
|
return f, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newReplacerFile(f, t.replaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
type replacerFile struct {
|
||||||
|
parentFile fs.File
|
||||||
|
buf bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// A compile-time assertion to make sure replacerFile implements the fs.File
|
||||||
|
// interface.
|
||||||
|
var _ fs.File = (*replacerFile)(nil)
|
||||||
|
|
||||||
|
func newReplacerFile(parent fs.File, replaces map[string]string) (*replacerFile,
|
||||||
|
error) {
|
||||||
|
|
||||||
|
content, err := io.ReadAll(parent)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contentStr := string(content)
|
||||||
|
for from, to := range replaces {
|
||||||
|
contentStr = strings.Replace(contentStr, from, to, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
_, err = buf.WriteString(contentStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &replacerFile{
|
||||||
|
parentFile: parent,
|
||||||
|
buf: buf,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns statistics/info about the file.
|
||||||
|
//
|
||||||
|
// NOTE: This is part of the fs.File interface.
|
||||||
|
func (t *replacerFile) Stat() (fs.FileInfo, error) {
|
||||||
|
return t.parentFile.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads as many bytes as possible from the file into the given slice.
|
||||||
|
//
|
||||||
|
// NOTE: This is part of the fs.File interface.
|
||||||
|
func (t *replacerFile) Read(bytes []byte) (int, error) {
|
||||||
|
return t.buf.Read(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying file.
|
||||||
|
//
|
||||||
|
// NOTE: This is part of the fs.File interface.
|
||||||
|
func (t *replacerFile) Close() error {
|
||||||
|
// We already fully read and then closed the file when creating this
|
||||||
|
// instance, so there's nothing to do for us here.
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,135 @@
|
|||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
|
postgres_migrate "github.com/golang-migrate/migrate/v4/database/postgres"
|
||||||
|
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||||
|
"github.com/lightninglabs/loop/loopdb/sqlc"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dsnTemplate = "postgres://%v:%v@%v:%d/%v?sslmode=%v"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultPostgresFixtureLifetime is the default maximum time a Postgres
|
||||||
|
// test fixture is being kept alive. After that time the docker
|
||||||
|
// container will be terminated forcefully, even if the tests aren't
|
||||||
|
// fully executed yet. So this time needs to be chosen correctly to be
|
||||||
|
// longer than the longest expected individual test run time.
|
||||||
|
DefaultPostgresFixtureLifetime = 10 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostgresConfig holds the postgres database configuration.
|
||||||
|
type PostgresConfig struct {
|
||||||
|
SkipMigrations bool `long:"skipmigrations" description:"Skip applying migrations on startup."`
|
||||||
|
Host string `long:"host" description:"Database server hostname."`
|
||||||
|
Port int `long:"port" description:"Database server port."`
|
||||||
|
User string `long:"user" description:"Database user."`
|
||||||
|
Password string `long:"password" description:"Database user's password."`
|
||||||
|
DBName string `long:"dbname" description:"Database name to use."`
|
||||||
|
MaxOpenConnections int32 `long:"maxconnections" description:"Max open connections to keep alive to the database server."`
|
||||||
|
RequireSSL bool `long:"requiressl" description:"Whether to require using SSL (mode: require) when connecting to the server."`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DSN returns the dns to connect to the database.
|
||||||
|
func (s *PostgresConfig) DSN(hidePassword bool) string {
|
||||||
|
var sslMode = "disable"
|
||||||
|
if s.RequireSSL {
|
||||||
|
sslMode = "require"
|
||||||
|
}
|
||||||
|
|
||||||
|
password := s.Password
|
||||||
|
if hidePassword {
|
||||||
|
// Placeholder used for logging the DSN safely.
|
||||||
|
password = "****"
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(dsnTemplate, s.User, password, s.Host, s.Port,
|
||||||
|
s.DBName, sslMode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostgresStore is a database store implementation that uses a Postgres
|
||||||
|
// backend.
|
||||||
|
type PostgresStore struct {
|
||||||
|
cfg *PostgresConfig
|
||||||
|
|
||||||
|
*BaseDB
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPostgresStore creates a new store that is backed by a Postgres database
|
||||||
|
// backend.
|
||||||
|
func NewPostgresStore(cfg *PostgresConfig,
|
||||||
|
network *chaincfg.Params) (*PostgresStore, error) {
|
||||||
|
|
||||||
|
log.Infof("Using SQL database '%s'", cfg.DSN(true))
|
||||||
|
|
||||||
|
rawDb, err := sql.Open("pgx", cfg.DSN(false))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cfg.SkipMigrations {
|
||||||
|
// Now that the database is open, populate the database with
|
||||||
|
// our set of schemas based on our embedded in-memory file
|
||||||
|
// system.
|
||||||
|
//
|
||||||
|
// First, we'll need to open up a new migration instance for
|
||||||
|
// our current target database: sqlite.
|
||||||
|
driver, err := postgres_migrate.WithInstance(
|
||||||
|
rawDb, &postgres_migrate.Config{},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
postgresFS := newReplacerFS(sqlSchemas, map[string]string{
|
||||||
|
"BLOB": "BYTEA",
|
||||||
|
"INTEGER PRIMARY KEY": "SERIAL PRIMARY KEY",
|
||||||
|
})
|
||||||
|
|
||||||
|
err = applyMigrations(
|
||||||
|
postgresFS, driver, "sqlc/migrations", cfg.DBName,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
queries := sqlc.New(rawDb)
|
||||||
|
|
||||||
|
return &PostgresStore{
|
||||||
|
cfg: cfg,
|
||||||
|
BaseDB: &BaseDB{
|
||||||
|
DB: rawDb,
|
||||||
|
Queries: queries,
|
||||||
|
network: network,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTestPostgresDB is a helper function that creates a Postgres database for
|
||||||
|
// testing.
|
||||||
|
func NewTestPostgresDB(t *testing.T) *PostgresStore {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
t.Logf("Creating new Postgres DB for testing")
|
||||||
|
|
||||||
|
sqlFixture := NewTestPgFixture(t, DefaultPostgresFixtureLifetime)
|
||||||
|
store, err := NewPostgresStore(
|
||||||
|
sqlFixture.GetConfig(), &chaincfg.MainNetParams,
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
sqlFixture.TearDown(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
return store
|
||||||
|
}
|
@ -0,0 +1,139 @@
|
|||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/lib/pq"
|
||||||
|
"github.com/ory/dockertest/v3"
|
||||||
|
"github.com/ory/dockertest/v3/docker"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
testPgUser = "test"
|
||||||
|
testPgPass = "test"
|
||||||
|
testPgDBName = "test"
|
||||||
|
PostgresTag = "11"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestPgFixture is a test fixture that starts a Postgres 11 instance in a
|
||||||
|
// docker container.
|
||||||
|
type TestPgFixture struct {
|
||||||
|
db *sql.DB
|
||||||
|
pool *dockertest.Pool
|
||||||
|
resource *dockertest.Resource
|
||||||
|
host string
|
||||||
|
port int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTestPgFixture constructs a new TestPgFixture starting up a docker
|
||||||
|
// container running Postgres 11. The started container will expire in after
|
||||||
|
// the passed duration.
|
||||||
|
func NewTestPgFixture(t *testing.T, expiry time.Duration) *TestPgFixture {
|
||||||
|
// Use a sensible default on Windows (tcp/http) and linux/osx (socket)
|
||||||
|
// by specifying an empty endpoint.
|
||||||
|
pool, err := dockertest.NewPool("")
|
||||||
|
require.NoError(t, err, "Could not connect to docker")
|
||||||
|
|
||||||
|
// Pulls an image, creates a container based on it and runs it.
|
||||||
|
resource, err := pool.RunWithOptions(&dockertest.RunOptions{
|
||||||
|
Repository: "postgres",
|
||||||
|
Tag: PostgresTag,
|
||||||
|
Env: []string{
|
||||||
|
fmt.Sprintf("POSTGRES_USER=%v", testPgUser),
|
||||||
|
fmt.Sprintf("POSTGRES_PASSWORD=%v", testPgPass),
|
||||||
|
fmt.Sprintf("POSTGRES_DB=%v", testPgDBName),
|
||||||
|
"listen_addresses='*'",
|
||||||
|
},
|
||||||
|
Cmd: []string{
|
||||||
|
"postgres",
|
||||||
|
"-c", "log_statement=all",
|
||||||
|
"-c", "log_destination=stderr",
|
||||||
|
},
|
||||||
|
}, func(config *docker.HostConfig) {
|
||||||
|
// Set AutoRemove to true so that stopped container goes away
|
||||||
|
// by itself.
|
||||||
|
config.AutoRemove = true
|
||||||
|
config.RestartPolicy = docker.RestartPolicy{Name: "no"}
|
||||||
|
})
|
||||||
|
require.NoError(t, err, "Could not start resource")
|
||||||
|
|
||||||
|
hostAndPort := resource.GetHostPort("5432/tcp")
|
||||||
|
parts := strings.Split(hostAndPort, ":")
|
||||||
|
host := parts[0]
|
||||||
|
port, err := strconv.ParseInt(parts[1], 10, 64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fixture := &TestPgFixture{
|
||||||
|
host: host,
|
||||||
|
port: int(port),
|
||||||
|
}
|
||||||
|
databaseURL := fixture.GetDSN()
|
||||||
|
log.Infof("Connecting to Postgres fixture: %v\n", databaseURL)
|
||||||
|
|
||||||
|
// Tell docker to hard kill the container in "expiry" seconds.
|
||||||
|
require.NoError(t, resource.Expire(uint(expiry.Seconds())))
|
||||||
|
|
||||||
|
// Exponential backoff-retry, because the application in the container
|
||||||
|
// might not be ready to accept connections yet.
|
||||||
|
pool.MaxWait = 120 * time.Second
|
||||||
|
|
||||||
|
var testDB *sql.DB
|
||||||
|
err = pool.Retry(func() error {
|
||||||
|
testDB, err = sql.Open("postgres", databaseURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return testDB.Ping()
|
||||||
|
})
|
||||||
|
require.NoError(t, err, "Could not connect to docker")
|
||||||
|
|
||||||
|
// Now fill in the rest of the fixture.
|
||||||
|
fixture.db = testDB
|
||||||
|
fixture.pool = pool
|
||||||
|
fixture.resource = resource
|
||||||
|
|
||||||
|
return fixture
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDSN returns the DSN (Data Source Name) for the started Postgres node.
|
||||||
|
func (f *TestPgFixture) GetDSN() string {
|
||||||
|
return f.GetConfig().DSN(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConfig returns the full config of the Postgres node.
|
||||||
|
func (f *TestPgFixture) GetConfig() *PostgresConfig {
|
||||||
|
return &PostgresConfig{
|
||||||
|
Host: f.host,
|
||||||
|
Port: f.port,
|
||||||
|
User: testPgUser,
|
||||||
|
Password: testPgPass,
|
||||||
|
DBName: testPgDBName,
|
||||||
|
RequireSSL: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TearDown stops the underlying docker container.
|
||||||
|
func (f *TestPgFixture) TearDown(t *testing.T) {
|
||||||
|
err := f.pool.Purge(f.resource)
|
||||||
|
require.NoError(t, err, "Could not purge resource")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearDB clears the database.
|
||||||
|
func (f *TestPgFixture) ClearDB(t *testing.T) {
|
||||||
|
dbConn, err := sql.Open("postgres", f.GetDSN())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = dbConn.ExecContext(
|
||||||
|
context.Background(),
|
||||||
|
`DROP SCHEMA IF EXISTS public CASCADE;
|
||||||
|
CREATE SCHEMA public;`,
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
@ -0,0 +1,8 @@
|
|||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed sqlc/migrations/*.up.sql
|
||||||
|
var sqlSchemas embed.FS
|
@ -0,0 +1,734 @@
|
|||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"errors"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
|
"github.com/lightninglabs/loop/loopdb/sqlc"
|
||||||
|
"github.com/lightningnetwork/lnd/keychain"
|
||||||
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
|
"github.com/lightningnetwork/lnd/routing/route"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FetchLoopOutSwaps returns all swaps currently in the store.
|
||||||
|
func (s *BaseDB) FetchLoopOutSwaps(ctx context.Context) ([]*LoopOut,
|
||||||
|
error) {
|
||||||
|
|
||||||
|
var loopOuts []*LoopOut
|
||||||
|
|
||||||
|
err := s.ExecTx(ctx, NewSqlReadOpts(), func(*sqlc.Queries) error {
|
||||||
|
swaps, err := s.Queries.GetLoopOutSwaps(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopOuts = make([]*LoopOut, len(swaps))
|
||||||
|
|
||||||
|
for i, swap := range swaps {
|
||||||
|
updates, err := s.Queries.GetSwapUpdates(ctx, swap.SwapHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopOut, err := s.convertLoopOutRow(
|
||||||
|
sqlc.GetLoopOutSwapRow(swap), updates,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopOuts[i] = loopOut
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return loopOuts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchLoopOutSwap returns the loop out swap with the given hash.
|
||||||
|
func (s *BaseDB) FetchLoopOutSwap(ctx context.Context,
|
||||||
|
hash lntypes.Hash) (*LoopOut, error) {
|
||||||
|
|
||||||
|
var loopOut *LoopOut
|
||||||
|
|
||||||
|
err := s.ExecTx(ctx, NewSqlReadOpts(), func(*sqlc.Queries) error {
|
||||||
|
swap, err := s.Queries.GetLoopOutSwap(ctx, hash[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
updates, err := s.Queries.GetSwapUpdates(ctx, swap.SwapHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopOut, err = s.convertLoopOutRow(
|
||||||
|
swap, updates,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return loopOut, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateLoopOut adds an initiated swap to the store.
|
||||||
|
func (s *BaseDB) CreateLoopOut(ctx context.Context, hash lntypes.Hash,
|
||||||
|
swap *LoopOutContract) error {
|
||||||
|
|
||||||
|
writeOpts := &SqliteTxOptions{}
|
||||||
|
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
|
||||||
|
insertArgs := loopToInsertArgs(
|
||||||
|
hash, &swap.SwapContract,
|
||||||
|
)
|
||||||
|
|
||||||
|
// First we'll insert the swap itself.
|
||||||
|
err := tx.InsertSwap(ctx, insertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
htlcKeyInsertArgs := swapToHtlcKeysInsertArgs(
|
||||||
|
hash, &swap.SwapContract,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Next insert the htlc keys.
|
||||||
|
err = tx.InsertHtlcKeys(ctx, htlcKeyInsertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopOutInsertArgs := loopOutToInsertArgs(hash, swap)
|
||||||
|
|
||||||
|
// Next insert the loop out relevant data.
|
||||||
|
err = tx.InsertLoopOut(ctx, loopOutInsertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchCreateLoopOut adds multiple initiated swaps to the store.
|
||||||
|
func (s *BaseDB) BatchCreateLoopOut(ctx context.Context,
|
||||||
|
swaps map[lntypes.Hash]*LoopOutContract) error {
|
||||||
|
|
||||||
|
writeOpts := &SqliteTxOptions{}
|
||||||
|
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
|
||||||
|
for swapHash, swap := range swaps {
|
||||||
|
insertArgs := loopToInsertArgs(
|
||||||
|
swapHash, &swap.SwapContract,
|
||||||
|
)
|
||||||
|
|
||||||
|
// First we'll insert the swap itself.
|
||||||
|
err := tx.InsertSwap(ctx, insertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
htlcKeyInsertArgs := swapToHtlcKeysInsertArgs(
|
||||||
|
swapHash, &swap.SwapContract,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Next insert the htlc keys.
|
||||||
|
err = tx.InsertHtlcKeys(ctx, htlcKeyInsertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopOutInsertArgs := loopOutToInsertArgs(swapHash, swap)
|
||||||
|
|
||||||
|
// Next insert the loop out relevant data.
|
||||||
|
err = tx.InsertLoopOut(ctx, loopOutInsertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLoopOut stores a new event for a target loop out swap. This
|
||||||
|
// appends to the event log for a particular swap as it goes through
|
||||||
|
// the various stages in its lifetime.
|
||||||
|
func (s *BaseDB) UpdateLoopOut(ctx context.Context, hash lntypes.Hash,
|
||||||
|
time time.Time, state SwapStateData) error {
|
||||||
|
|
||||||
|
return s.updateLoop(ctx, hash, time, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchLoopInSwaps returns all swaps currently in the store.
|
||||||
|
func (s *BaseDB) FetchLoopInSwaps(ctx context.Context) (
|
||||||
|
[]*LoopIn, error) {
|
||||||
|
|
||||||
|
var loopIns []*LoopIn
|
||||||
|
|
||||||
|
err := s.ExecTx(ctx, NewSqlReadOpts(), func(*sqlc.Queries) error {
|
||||||
|
swaps, err := s.Queries.GetLoopInSwaps(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopIns = make([]*LoopIn, len(swaps))
|
||||||
|
|
||||||
|
for i, swap := range swaps {
|
||||||
|
updates, err := s.Queries.GetSwapUpdates(ctx, swap.SwapHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopIn, err := s.convertLoopInRow(
|
||||||
|
swap, updates,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopIns[i] = loopIn
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return loopIns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateLoopIn adds an initiated swap to the store.
|
||||||
|
func (s *BaseDB) CreateLoopIn(ctx context.Context, hash lntypes.Hash,
|
||||||
|
swap *LoopInContract) error {
|
||||||
|
|
||||||
|
writeOpts := &SqliteTxOptions{}
|
||||||
|
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
|
||||||
|
insertArgs := loopToInsertArgs(
|
||||||
|
hash, &swap.SwapContract,
|
||||||
|
)
|
||||||
|
|
||||||
|
// First we'll insert the swap itself.
|
||||||
|
err := tx.InsertSwap(ctx, insertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
htlcKeyInsertArgs := swapToHtlcKeysInsertArgs(
|
||||||
|
hash, &swap.SwapContract,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Next insert the htlc keys.
|
||||||
|
err = tx.InsertHtlcKeys(ctx, htlcKeyInsertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopInInsertArgs := loopInToInsertArgs(hash, swap)
|
||||||
|
|
||||||
|
// Next insert the loop out relevant data.
|
||||||
|
err = tx.InsertLoopIn(ctx, loopInInsertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchCreateLoopOut adds multiple initiated swaps to the store.
|
||||||
|
func (s *BaseDB) BatchCreateLoopIn(ctx context.Context,
|
||||||
|
swaps map[lntypes.Hash]*LoopInContract) error {
|
||||||
|
|
||||||
|
writeOpts := &SqliteTxOptions{}
|
||||||
|
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
|
||||||
|
for swapHash, swap := range swaps {
|
||||||
|
insertArgs := loopToInsertArgs(
|
||||||
|
swapHash, &swap.SwapContract,
|
||||||
|
)
|
||||||
|
|
||||||
|
// First we'll insert the swap itself.
|
||||||
|
err := tx.InsertSwap(ctx, insertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
htlcKeyInsertArgs := swapToHtlcKeysInsertArgs(
|
||||||
|
swapHash, &swap.SwapContract,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Next insert the htlc keys.
|
||||||
|
err = tx.InsertHtlcKeys(ctx, htlcKeyInsertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopInInsertArgs := loopInToInsertArgs(swapHash, swap)
|
||||||
|
|
||||||
|
// Next insert the loop in relevant data.
|
||||||
|
err = tx.InsertLoopIn(ctx, loopInInsertArgs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLoopIn stores a new event for a target loop in swap. This
|
||||||
|
// appends to the event log for a particular swap as it goes through
|
||||||
|
// the various stages in its lifetime.
|
||||||
|
func (s *BaseDB) UpdateLoopIn(ctx context.Context, hash lntypes.Hash,
|
||||||
|
time time.Time, state SwapStateData) error {
|
||||||
|
|
||||||
|
return s.updateLoop(ctx, hash, time, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutLiquidityParams writes the serialized `manager.Parameters` bytes
|
||||||
|
// into the bucket.
|
||||||
|
//
|
||||||
|
// NOTE: it's the caller's responsibility to encode the param. Atm,
|
||||||
|
// it's encoding using the proto package's `Marshal` method.
|
||||||
|
func (s *BaseDB) PutLiquidityParams(ctx context.Context,
|
||||||
|
params []byte) error {
|
||||||
|
|
||||||
|
err := s.Queries.UpsertLiquidityParams(ctx, params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchLiquidityParams reads the serialized `manager.Parameters` bytes
|
||||||
|
// from the bucket.
|
||||||
|
//
|
||||||
|
// NOTE: it's the caller's responsibility to decode the param. Atm,
|
||||||
|
// it's decoding using the proto package's `Unmarshal` method.
|
||||||
|
func (s *BaseDB) FetchLiquidityParams(ctx context.Context) ([]byte,
|
||||||
|
error) {
|
||||||
|
|
||||||
|
var params []byte
|
||||||
|
params, err := s.Queries.FetchLiquidityParams(ctx)
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return params, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A compile time assertion to ensure that SqliteStore satisfies the
|
||||||
|
// SwapStore interface.
|
||||||
|
var _ SwapStore = (*BaseDB)(nil)
|
||||||
|
|
||||||
|
// updateLoop updates the swap with the given hash by inserting a new update
|
||||||
|
// in the swap_updates table.
|
||||||
|
func (s *BaseDB) updateLoop(ctx context.Context, hash lntypes.Hash,
|
||||||
|
time time.Time, state SwapStateData) error {
|
||||||
|
|
||||||
|
writeOpts := &SqliteTxOptions{}
|
||||||
|
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
|
||||||
|
updateParams := sqlc.InsertSwapUpdateParams{
|
||||||
|
SwapHash: hash[:],
|
||||||
|
UpdateTimestamp: time.UTC(),
|
||||||
|
UpdateState: int32(state.State),
|
||||||
|
ServerCost: int64(state.Cost.Server),
|
||||||
|
OnchainCost: int64(state.Cost.Onchain),
|
||||||
|
OffchainCost: int64(state.Cost.Offchain),
|
||||||
|
}
|
||||||
|
|
||||||
|
if state.HtlcTxHash != nil {
|
||||||
|
updateParams.HtlcTxhash = state.HtlcTxHash.String()
|
||||||
|
}
|
||||||
|
// First we insert the swap update.
|
||||||
|
err := tx.InsertSwapUpdate(ctx, updateParams)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchInsertUpdate inserts multiple swap updates to the store.
|
||||||
|
func (s *BaseDB) BatchInsertUpdate(ctx context.Context,
|
||||||
|
updateData map[lntypes.Hash][]BatchInsertUpdateData) error {
|
||||||
|
|
||||||
|
writeOpts := &SqliteTxOptions{}
|
||||||
|
return s.ExecTx(ctx, writeOpts, func(tx *sqlc.Queries) error {
|
||||||
|
for swapHash, updates := range updateData {
|
||||||
|
for _, update := range updates {
|
||||||
|
updateParams := sqlc.InsertSwapUpdateParams{
|
||||||
|
SwapHash: swapHash[:],
|
||||||
|
UpdateTimestamp: update.Time.UTC(),
|
||||||
|
UpdateState: int32(update.State.State),
|
||||||
|
ServerCost: int64(update.State.Cost.Server),
|
||||||
|
OnchainCost: int64(update.State.Cost.Onchain),
|
||||||
|
OffchainCost: int64(update.State.Cost.Offchain),
|
||||||
|
}
|
||||||
|
|
||||||
|
if update.State.HtlcTxHash != nil {
|
||||||
|
updateParams.HtlcTxhash = update.State.HtlcTxHash.String()
|
||||||
|
}
|
||||||
|
// First we insert the swap update.
|
||||||
|
err := tx.InsertSwapUpdate(ctx, updateParams)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// loopToInsertArgs converts a SwapContract struct to the arguments needed to
|
||||||
|
// insert it into the database.
|
||||||
|
func loopToInsertArgs(hash lntypes.Hash,
|
||||||
|
swap *SwapContract) sqlc.InsertSwapParams {
|
||||||
|
|
||||||
|
return sqlc.InsertSwapParams{
|
||||||
|
SwapHash: hash[:],
|
||||||
|
Preimage: swap.Preimage[:],
|
||||||
|
InitiationTime: swap.InitiationTime.UTC(),
|
||||||
|
AmountRequested: int64(swap.AmountRequested),
|
||||||
|
CltvExpiry: swap.CltvExpiry,
|
||||||
|
MaxSwapFee: int64(swap.MaxSwapFee),
|
||||||
|
MaxMinerFee: int64(swap.MaxMinerFee),
|
||||||
|
InitiationHeight: swap.InitiationHeight,
|
||||||
|
ProtocolVersion: int32(swap.ProtocolVersion),
|
||||||
|
Label: swap.Label,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// loopOutToInsertArgs converts a LoopOutContract struct to the arguments
|
||||||
|
// needed to insert it into the database.
|
||||||
|
func loopOutToInsertArgs(hash lntypes.Hash,
|
||||||
|
loopOut *LoopOutContract) sqlc.InsertLoopOutParams {
|
||||||
|
return sqlc.InsertLoopOutParams{
|
||||||
|
SwapHash: hash[:],
|
||||||
|
DestAddress: loopOut.DestAddr.String(),
|
||||||
|
SwapInvoice: loopOut.SwapInvoice,
|
||||||
|
MaxSwapRoutingFee: int64(loopOut.MaxSwapRoutingFee),
|
||||||
|
SweepConfTarget: loopOut.SweepConfTarget,
|
||||||
|
HtlcConfirmations: int32(loopOut.HtlcConfirmations),
|
||||||
|
OutgoingChanSet: loopOut.OutgoingChanSet.String(),
|
||||||
|
PrepayInvoice: loopOut.PrepayInvoice,
|
||||||
|
MaxPrepayRoutingFee: int64(loopOut.MaxPrepayRoutingFee),
|
||||||
|
PublicationDeadline: loopOut.SwapPublicationDeadline.UTC(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// loopInToInsertArgs converts a LoopInContract struct to the arguments needed
|
||||||
|
// to insert it into the database.
|
||||||
|
func loopInToInsertArgs(hash lntypes.Hash,
|
||||||
|
loopIn *LoopInContract) sqlc.InsertLoopInParams {
|
||||||
|
|
||||||
|
loopInInsertParams := sqlc.InsertLoopInParams{
|
||||||
|
SwapHash: hash[:],
|
||||||
|
HtlcConfTarget: loopIn.HtlcConfTarget,
|
||||||
|
ExternalHtlc: loopIn.ExternalHtlc,
|
||||||
|
}
|
||||||
|
|
||||||
|
if loopIn.LastHop != nil {
|
||||||
|
loopInInsertParams.LastHop = loopIn.LastHop[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return loopInInsertParams
|
||||||
|
}
|
||||||
|
|
||||||
|
// swapToHtlcKeysInsertArgs extracts the htlc keys from a SwapContract struct
|
||||||
|
// and converts them to the arguments needed to insert them into the database.
|
||||||
|
func swapToHtlcKeysInsertArgs(hash lntypes.Hash,
|
||||||
|
swap *SwapContract) sqlc.InsertHtlcKeysParams {
|
||||||
|
return sqlc.InsertHtlcKeysParams{
|
||||||
|
SwapHash: hash[:],
|
||||||
|
SenderScriptPubkey: swap.HtlcKeys.SenderScriptKey[:],
|
||||||
|
ReceiverScriptPubkey: swap.HtlcKeys.ReceiverScriptKey[:],
|
||||||
|
SenderInternalPubkey: swap.HtlcKeys.SenderInternalPubKey[:],
|
||||||
|
ReceiverInternalPubkey: swap.HtlcKeys.ReceiverInternalPubKey[:],
|
||||||
|
ClientKeyFamily: int32(
|
||||||
|
swap.HtlcKeys.ClientScriptKeyLocator.Family,
|
||||||
|
),
|
||||||
|
ClientKeyIndex: int32(
|
||||||
|
swap.HtlcKeys.ClientScriptKeyLocator.Index,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertLoopOutRow converts a database row containing a loop out swap to a
|
||||||
|
// LoopOut struct.
|
||||||
|
func (s *BaseDB) convertLoopOutRow(row sqlc.GetLoopOutSwapRow,
|
||||||
|
updates []sqlc.SwapUpdate) (*LoopOut, error) {
|
||||||
|
|
||||||
|
htlcKeys, err := fetchHtlcKeys(
|
||||||
|
row.SenderScriptPubkey, row.ReceiverScriptPubkey,
|
||||||
|
row.SenderInternalPubkey, row.ReceiverInternalPubkey,
|
||||||
|
row.ClientKeyFamily, row.ClientKeyIndex,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
preimage, err := lntypes.MakePreimage(row.Preimage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
destAddress, err := btcutil.DecodeAddress(row.DestAddress, s.network)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
swapHash, err := lntypes.MakeHash(row.SwapHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopOut := &LoopOut{
|
||||||
|
Contract: &LoopOutContract{
|
||||||
|
SwapContract: SwapContract{
|
||||||
|
Preimage: preimage,
|
||||||
|
AmountRequested: btcutil.Amount(row.AmountRequested),
|
||||||
|
HtlcKeys: htlcKeys,
|
||||||
|
CltvExpiry: row.CltvExpiry,
|
||||||
|
MaxSwapFee: btcutil.Amount(row.MaxSwapFee),
|
||||||
|
MaxMinerFee: btcutil.Amount(row.MaxMinerFee),
|
||||||
|
InitiationHeight: row.InitiationHeight,
|
||||||
|
InitiationTime: row.InitiationTime,
|
||||||
|
Label: row.Label,
|
||||||
|
ProtocolVersion: ProtocolVersion(row.ProtocolVersion),
|
||||||
|
},
|
||||||
|
DestAddr: destAddress,
|
||||||
|
SwapInvoice: row.SwapInvoice,
|
||||||
|
MaxSwapRoutingFee: btcutil.Amount(row.MaxSwapRoutingFee),
|
||||||
|
SweepConfTarget: row.SweepConfTarget,
|
||||||
|
HtlcConfirmations: uint32(row.HtlcConfirmations),
|
||||||
|
PrepayInvoice: row.PrepayInvoice,
|
||||||
|
MaxPrepayRoutingFee: btcutil.Amount(row.MaxPrepayRoutingFee),
|
||||||
|
SwapPublicationDeadline: row.PublicationDeadline,
|
||||||
|
},
|
||||||
|
Loop: Loop{
|
||||||
|
Hash: swapHash,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if row.OutgoingChanSet != "" {
|
||||||
|
chanSet, err := convertOutgoingChanSet(row.OutgoingChanSet)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopOut.Contract.OutgoingChanSet = chanSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have any updates yet we can return early
|
||||||
|
if len(updates) == 0 {
|
||||||
|
return loopOut, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
events, err := getSwapEvents(updates)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopOut.Events = events
|
||||||
|
|
||||||
|
return loopOut, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertLoopInRow converts a database row containing a loop in swap to a
|
||||||
|
// LoopIn struct.
|
||||||
|
func (s *BaseDB) convertLoopInRow(row sqlc.GetLoopInSwapsRow,
|
||||||
|
updates []sqlc.SwapUpdate) (*LoopIn, error) {
|
||||||
|
|
||||||
|
htlcKeys, err := fetchHtlcKeys(
|
||||||
|
row.SenderScriptPubkey, row.ReceiverScriptPubkey,
|
||||||
|
row.SenderInternalPubkey, row.ReceiverInternalPubkey,
|
||||||
|
row.ClientKeyFamily, row.ClientKeyIndex,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
preimage, err := lntypes.MakePreimage(row.Preimage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
swapHash, err := lntypes.MakeHash(row.SwapHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopIn := &LoopIn{
|
||||||
|
Contract: &LoopInContract{
|
||||||
|
SwapContract: SwapContract{
|
||||||
|
Preimage: preimage,
|
||||||
|
AmountRequested: btcutil.Amount(row.AmountRequested),
|
||||||
|
HtlcKeys: htlcKeys,
|
||||||
|
CltvExpiry: row.CltvExpiry,
|
||||||
|
MaxSwapFee: btcutil.Amount(row.MaxSwapFee),
|
||||||
|
MaxMinerFee: btcutil.Amount(row.MaxMinerFee),
|
||||||
|
InitiationHeight: row.InitiationHeight,
|
||||||
|
InitiationTime: row.InitiationTime,
|
||||||
|
Label: row.Label,
|
||||||
|
ProtocolVersion: ProtocolVersion(row.ProtocolVersion),
|
||||||
|
},
|
||||||
|
HtlcConfTarget: row.HtlcConfTarget,
|
||||||
|
ExternalHtlc: row.ExternalHtlc,
|
||||||
|
},
|
||||||
|
Loop: Loop{
|
||||||
|
Hash: swapHash,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if row.LastHop != nil {
|
||||||
|
lastHop, err := route.NewVertexFromBytes(row.LastHop)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopIn.Contract.LastHop = &lastHop
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have any updates yet we can return early
|
||||||
|
if len(updates) == 0 {
|
||||||
|
return loopIn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
events, err := getSwapEvents(updates)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
loopIn.Events = events
|
||||||
|
|
||||||
|
return loopIn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSwapEvents returns a slice of LoopEvents for the swap.
|
||||||
|
func getSwapEvents(updates []sqlc.SwapUpdate) ([]*LoopEvent, error) {
|
||||||
|
events := make([]*LoopEvent, len(updates))
|
||||||
|
|
||||||
|
for i := 0; i < len(events); i++ {
|
||||||
|
events[i] = &LoopEvent{
|
||||||
|
SwapStateData: SwapStateData{
|
||||||
|
State: SwapState(updates[i].UpdateState),
|
||||||
|
Cost: SwapCost{
|
||||||
|
Server: btcutil.Amount(updates[i].ServerCost),
|
||||||
|
Onchain: btcutil.Amount(updates[i].OnchainCost),
|
||||||
|
Offchain: btcutil.Amount(updates[i].OffchainCost),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Time: updates[i].UpdateTimestamp.UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if updates[i].HtlcTxhash != "" {
|
||||||
|
chainHash, err := chainhash.NewHashFromStr(updates[i].HtlcTxhash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
events[i].HtlcTxHash = chainHash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertOutgoingChanSet converts a comma separated string of channel IDs into
|
||||||
|
// a ChannelSet.
|
||||||
|
func convertOutgoingChanSet(outgoingChanSet string) (ChannelSet, error) {
|
||||||
|
// Split the string into a slice of strings
|
||||||
|
chanStrings := strings.Split(outgoingChanSet, ",")
|
||||||
|
channels := make([]uint64, len(chanStrings))
|
||||||
|
|
||||||
|
// Iterate over the chanStrings slice and convert each string to ChannelID
|
||||||
|
for i, chanString := range chanStrings {
|
||||||
|
chanID, err := strconv.ParseInt(chanString, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
channels[i] = uint64(chanID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewChannelSet(channels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchHtlcKeys converts the blob encoded htlc keys into a HtlcKeys struct.
|
||||||
|
func fetchHtlcKeys(senderScriptPubkey, receiverScriptPubkey,
|
||||||
|
senderInternalPubkey, receiverInternalPubkey []byte,
|
||||||
|
clientKeyFamily, clientKeyIndex int32) (HtlcKeys, error) {
|
||||||
|
|
||||||
|
senderScriptKey, err := blobTo33ByteSlice(senderScriptPubkey)
|
||||||
|
if err != nil {
|
||||||
|
return HtlcKeys{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
receiverScriptKey, err := blobTo33ByteSlice(receiverScriptPubkey)
|
||||||
|
if err != nil {
|
||||||
|
return HtlcKeys{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
htlcKeys := HtlcKeys{
|
||||||
|
SenderScriptKey: senderScriptKey,
|
||||||
|
ReceiverScriptKey: receiverScriptKey,
|
||||||
|
ClientScriptKeyLocator: keychain.KeyLocator{
|
||||||
|
Family: keychain.KeyFamily(clientKeyFamily),
|
||||||
|
Index: uint32(clientKeyIndex),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if senderInternalPubkey != nil {
|
||||||
|
senderInternalPubkey, err := blobTo33ByteSlice(
|
||||||
|
senderInternalPubkey,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return HtlcKeys{}, err
|
||||||
|
}
|
||||||
|
htlcKeys.SenderInternalPubKey = senderInternalPubkey
|
||||||
|
}
|
||||||
|
|
||||||
|
if receiverInternalPubkey != nil {
|
||||||
|
receiverInternalPubkey, err := blobTo33ByteSlice(
|
||||||
|
receiverInternalPubkey,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return HtlcKeys{}, err
|
||||||
|
}
|
||||||
|
htlcKeys.ReceiverInternalPubKey = receiverInternalPubkey
|
||||||
|
}
|
||||||
|
|
||||||
|
return htlcKeys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// blobTo33ByteSlice converts a blob encoded 33 byte public key into a
|
||||||
|
// [33]byte.
|
||||||
|
func blobTo33ByteSlice(blob []byte) ([33]byte, error) {
|
||||||
|
if len(blob) != 33 {
|
||||||
|
return [33]byte{}, errors.New("blob is not 33 bytes")
|
||||||
|
}
|
||||||
|
|
||||||
|
var key [33]byte
|
||||||
|
copy(key[:], blob)
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
@ -0,0 +1,389 @@
|
|||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"errors"
|
||||||
|
"math/rand"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
|
"github.com/lightninglabs/loop/loopdb/sqlc"
|
||||||
|
"github.com/lightninglabs/loop/test"
|
||||||
|
"github.com/lightningnetwork/lnd/keychain"
|
||||||
|
"github.com/lightningnetwork/lnd/routing/route"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
testTime1 = time.Date(2018, time.January, 9, 14, 54, 32, 3, time.UTC)
|
||||||
|
testTime2 = time.Date(2018, time.January, 9, 15, 02, 03, 5, time.UTC)
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestSqliteLoopOutStore tests all the basic functionality of the current
|
||||||
|
// sqlite swap store.
|
||||||
|
func TestSqliteLoopOutStore(t *testing.T) {
|
||||||
|
destAddr := test.GetDestAddr(t, 0)
|
||||||
|
initiationTime := time.Date(2018, 11, 1, 0, 0, 0, 0, time.UTC)
|
||||||
|
|
||||||
|
// Next, we'll make a new pending swap that we'll insert into the
|
||||||
|
// database shortly.
|
||||||
|
unrestrictedSwap := LoopOutContract{
|
||||||
|
SwapContract: SwapContract{
|
||||||
|
AmountRequested: 100,
|
||||||
|
Preimage: testPreimage,
|
||||||
|
CltvExpiry: 144,
|
||||||
|
HtlcKeys: HtlcKeys{
|
||||||
|
SenderScriptKey: senderKey,
|
||||||
|
ReceiverScriptKey: receiverKey,
|
||||||
|
SenderInternalPubKey: senderInternalKey,
|
||||||
|
ReceiverInternalPubKey: receiverInternalKey,
|
||||||
|
ClientScriptKeyLocator: keychain.KeyLocator{
|
||||||
|
Family: 1,
|
||||||
|
Index: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
MaxMinerFee: 10,
|
||||||
|
MaxSwapFee: 20,
|
||||||
|
|
||||||
|
InitiationHeight: 99,
|
||||||
|
|
||||||
|
InitiationTime: initiationTime,
|
||||||
|
ProtocolVersion: ProtocolVersionMuSig2,
|
||||||
|
},
|
||||||
|
MaxPrepayRoutingFee: 40,
|
||||||
|
PrepayInvoice: "prepayinvoice",
|
||||||
|
DestAddr: destAddr,
|
||||||
|
SwapInvoice: "swapinvoice",
|
||||||
|
MaxSwapRoutingFee: 30,
|
||||||
|
SweepConfTarget: 2,
|
||||||
|
HtlcConfirmations: 2,
|
||||||
|
SwapPublicationDeadline: initiationTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("no outgoing set", func(t *testing.T) {
|
||||||
|
testSqliteLoopOutStore(t, &unrestrictedSwap)
|
||||||
|
})
|
||||||
|
|
||||||
|
restrictedSwap := unrestrictedSwap
|
||||||
|
restrictedSwap.OutgoingChanSet = ChannelSet{1, 2}
|
||||||
|
|
||||||
|
t.Run("two channel outgoing set", func(t *testing.T) {
|
||||||
|
testSqliteLoopOutStore(t, &restrictedSwap)
|
||||||
|
})
|
||||||
|
|
||||||
|
labelledSwap := unrestrictedSwap
|
||||||
|
labelledSwap.Label = "test label"
|
||||||
|
t.Run("labelled swap", func(t *testing.T) {
|
||||||
|
testSqliteLoopOutStore(t, &labelledSwap)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// testSqliteLoopOutStore tests the basic functionality of the current sqlite
|
||||||
|
// swap store for specific swap parameters.
|
||||||
|
func testSqliteLoopOutStore(t *testing.T, pendingSwap *LoopOutContract) {
|
||||||
|
store := NewTestDB(t)
|
||||||
|
|
||||||
|
ctxb := context.Background()
|
||||||
|
|
||||||
|
// First, verify that an empty database has no active swaps.
|
||||||
|
swaps, err := store.FetchLoopOutSwaps(ctxb)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, swaps)
|
||||||
|
|
||||||
|
hash := pendingSwap.Preimage.Hash()
|
||||||
|
|
||||||
|
// checkSwap is a test helper function that'll assert the state of a
|
||||||
|
// swap.
|
||||||
|
checkSwap := func(expectedState SwapState) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
swaps, err := store.FetchLoopOutSwaps(ctxb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, swaps, 1)
|
||||||
|
|
||||||
|
swap, err := store.FetchLoopOutSwap(ctxb, hash)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, hash, swap.Hash)
|
||||||
|
require.Equal(t, hash, swaps[0].Hash)
|
||||||
|
|
||||||
|
swapContract := swap.Contract
|
||||||
|
|
||||||
|
require.Equal(t, swapContract, pendingSwap)
|
||||||
|
|
||||||
|
require.Equal(t, expectedState, swap.State().State)
|
||||||
|
|
||||||
|
if expectedState == StatePreimageRevealed {
|
||||||
|
require.NotNil(t, swap.State().HtlcTxHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we create a new swap, then it should show up as being initialized
|
||||||
|
// right after.
|
||||||
|
err = store.CreateLoopOut(ctxb, hash, pendingSwap)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
checkSwap(StateInitiated)
|
||||||
|
|
||||||
|
// Trying to make the same swap again should result in an error.
|
||||||
|
err = store.CreateLoopOut(ctxb, hash, pendingSwap)
|
||||||
|
require.Error(t, err)
|
||||||
|
checkSwap(StateInitiated)
|
||||||
|
|
||||||
|
// Next, we'll update to the next state of the pre-image being
|
||||||
|
// revealed. The state should be reflected here again.
|
||||||
|
err = store.UpdateLoopOut(
|
||||||
|
ctxb, hash, testTime,
|
||||||
|
SwapStateData{
|
||||||
|
State: StatePreimageRevealed,
|
||||||
|
HtlcTxHash: &chainhash.Hash{1, 6, 2},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
checkSwap(StatePreimageRevealed)
|
||||||
|
|
||||||
|
// Next, we'll update to the final state to ensure that the state is
|
||||||
|
// properly updated.
|
||||||
|
err = store.UpdateLoopOut(
|
||||||
|
ctxb, hash, testTime,
|
||||||
|
SwapStateData{
|
||||||
|
State: StateFailInsufficientValue,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkSwap(StateFailInsufficientValue)
|
||||||
|
|
||||||
|
err = store.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSQLliteLoopInStore tests all the basic functionality of the current
|
||||||
|
// sqlite swap store.
|
||||||
|
func TestSQLliteLoopInStore(t *testing.T) {
|
||||||
|
initiationTime := time.Date(2018, 11, 1, 0, 0, 0, 0, time.UTC)
|
||||||
|
|
||||||
|
// Next, we'll make a new pending swap that we'll insert into the
|
||||||
|
// database shortly.
|
||||||
|
lastHop := route.Vertex{1, 2, 3}
|
||||||
|
|
||||||
|
pendingSwap := LoopInContract{
|
||||||
|
SwapContract: SwapContract{
|
||||||
|
AmountRequested: 100,
|
||||||
|
Preimage: testPreimage,
|
||||||
|
CltvExpiry: 144,
|
||||||
|
HtlcKeys: HtlcKeys{
|
||||||
|
SenderScriptKey: senderKey,
|
||||||
|
ReceiverScriptKey: receiverKey,
|
||||||
|
SenderInternalPubKey: senderInternalKey,
|
||||||
|
ReceiverInternalPubKey: receiverInternalKey,
|
||||||
|
ClientScriptKeyLocator: keychain.KeyLocator{
|
||||||
|
Family: 1,
|
||||||
|
Index: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
MaxMinerFee: 10,
|
||||||
|
MaxSwapFee: 20,
|
||||||
|
InitiationHeight: 99,
|
||||||
|
|
||||||
|
// Convert to/from unix to remove timezone, so that it
|
||||||
|
// doesn't interfere with DeepEqual.
|
||||||
|
InitiationTime: initiationTime,
|
||||||
|
ProtocolVersion: ProtocolVersionMuSig2,
|
||||||
|
},
|
||||||
|
HtlcConfTarget: 2,
|
||||||
|
LastHop: &lastHop,
|
||||||
|
ExternalHtlc: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("loop in", func(t *testing.T) {
|
||||||
|
testSqliteLoopInStore(t, pendingSwap)
|
||||||
|
})
|
||||||
|
|
||||||
|
labelledSwap := pendingSwap
|
||||||
|
labelledSwap.Label = "test label"
|
||||||
|
t.Run("loop in with label", func(t *testing.T) {
|
||||||
|
testSqliteLoopInStore(t, labelledSwap)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSqliteLoopInStore(t *testing.T, pendingSwap LoopInContract) {
|
||||||
|
store := NewTestDB(t)
|
||||||
|
|
||||||
|
ctxb := context.Background()
|
||||||
|
|
||||||
|
// First, verify that an empty database has no active swaps.
|
||||||
|
swaps, err := store.FetchLoopInSwaps(ctxb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, swaps)
|
||||||
|
|
||||||
|
hash := sha256.Sum256(testPreimage[:])
|
||||||
|
|
||||||
|
// checkSwap is a test helper function that'll assert the state of a
|
||||||
|
// swap.
|
||||||
|
checkSwap := func(expectedState SwapState) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
swaps, err := store.FetchLoopInSwaps(ctxb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, swaps, 1)
|
||||||
|
|
||||||
|
swap := swaps[0].Contract
|
||||||
|
|
||||||
|
require.Equal(t, swap, &pendingSwap)
|
||||||
|
|
||||||
|
require.Equal(t, swaps[0].State().State, expectedState)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we create a new swap, then it should show up as being initialized
|
||||||
|
// right after.
|
||||||
|
err = store.CreateLoopIn(ctxb, hash, &pendingSwap)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
checkSwap(StateInitiated)
|
||||||
|
|
||||||
|
// Trying to make the same swap again should result in an error.
|
||||||
|
err = store.CreateLoopIn(ctxb, hash, &pendingSwap)
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
checkSwap(StateInitiated)
|
||||||
|
|
||||||
|
// Next, we'll update to the next state of the pre-image being
|
||||||
|
// revealed. The state should be reflected here again.
|
||||||
|
err = store.UpdateLoopIn(
|
||||||
|
ctxb, hash, testTime,
|
||||||
|
SwapStateData{
|
||||||
|
State: StatePreimageRevealed,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
checkSwap(StatePreimageRevealed)
|
||||||
|
|
||||||
|
// Next, we'll update to the final state to ensure that the state is
|
||||||
|
// properly updated.
|
||||||
|
err = store.UpdateLoopIn(
|
||||||
|
ctxb, hash, testTime,
|
||||||
|
SwapStateData{
|
||||||
|
State: StateFailInsufficientValue,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkSwap(StateFailInsufficientValue)
|
||||||
|
|
||||||
|
err = store.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLiquidityParams checks that reading and writing to liquidty bucket are
|
||||||
|
// as expected.
|
||||||
|
func TestSqliteLiquidityParams(t *testing.T) {
|
||||||
|
ctxb := context.Background()
|
||||||
|
|
||||||
|
store := NewTestDB(t)
|
||||||
|
|
||||||
|
// Test when there's no params saved before, an empty bytes is
|
||||||
|
// returned.
|
||||||
|
params, err := store.FetchLiquidityParams(ctxb)
|
||||||
|
require.NoError(t, err, "failed to fetch params")
|
||||||
|
require.Empty(t, params, "expect empty bytes")
|
||||||
|
require.Nil(t, params, "expected nil byte array")
|
||||||
|
|
||||||
|
params = []byte("test")
|
||||||
|
|
||||||
|
// Test we can save the params.
|
||||||
|
err = store.PutLiquidityParams(ctxb, params)
|
||||||
|
require.NoError(t, err, "failed to put params")
|
||||||
|
|
||||||
|
// Now fetch the db again should return the above saved bytes.
|
||||||
|
paramsRead, err := store.FetchLiquidityParams(ctxb)
|
||||||
|
require.NoError(t, err, "failed to fetch params")
|
||||||
|
require.Equal(t, params, paramsRead, "unexpected return value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSqliteTypeConversion is a small test that checks that we can safely
|
||||||
|
// convert between the :one and :many types from sqlc.
|
||||||
|
func TestSqliteTypeConversion(t *testing.T) {
|
||||||
|
loopOutSwapRow := sqlc.GetLoopOutSwapRow{}
|
||||||
|
randomStruct(&loopOutSwapRow)
|
||||||
|
require.NotNil(t, loopOutSwapRow.DestAddress)
|
||||||
|
|
||||||
|
loopOutSwapsRow := sqlc.GetLoopOutSwapsRow(loopOutSwapRow)
|
||||||
|
require.EqualValues(t, loopOutSwapRow, loopOutSwapsRow)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||||
|
|
||||||
|
func randomString(length int) string {
|
||||||
|
b := make([]byte, length)
|
||||||
|
for i := range b {
|
||||||
|
b[i] = charset[rand.Intn(len(charset))]
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func randomBytes(length int) []byte {
|
||||||
|
b := make([]byte, length)
|
||||||
|
for i := range b {
|
||||||
|
b[i] = byte(rand.Intn(256))
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func randomStruct(v interface{}) error {
|
||||||
|
val := reflect.ValueOf(v)
|
||||||
|
if val.Kind() != reflect.Ptr || val.Elem().Kind() != reflect.Struct {
|
||||||
|
return errors.New("Input should be a pointer to a struct type")
|
||||||
|
}
|
||||||
|
|
||||||
|
val = val.Elem()
|
||||||
|
for i := 0; i < val.NumField(); i++ {
|
||||||
|
field := val.Field(i)
|
||||||
|
|
||||||
|
switch field.Kind() {
|
||||||
|
case reflect.Int64:
|
||||||
|
if field.CanSet() {
|
||||||
|
field.SetInt(rand.Int63())
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
if field.CanSet() {
|
||||||
|
field.SetString(randomString(10))
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
if field.Type().Elem().Kind() == reflect.Uint8 {
|
||||||
|
if field.CanSet() {
|
||||||
|
field.SetBytes(randomBytes(32))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
if field.Type() == reflect.TypeOf(time.Time{}) {
|
||||||
|
if field.CanSet() {
|
||||||
|
field.Set(reflect.ValueOf(time.Now()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if field.Type() == reflect.TypeOf(route.Vertex{}) {
|
||||||
|
if field.CanSet() {
|
||||||
|
vertex, err := route.NewVertexFromBytes(
|
||||||
|
randomBytes(route.VertexSize),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
field.Set(reflect.ValueOf(vertex))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,31 @@
|
|||||||
|
// Code generated by sqlc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// sqlc v1.17.2
|
||||||
|
|
||||||
|
package sqlc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DBTX interface {
|
||||||
|
ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
|
||||||
|
PrepareContext(context.Context, string) (*sql.Stmt, error)
|
||||||
|
QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error)
|
||||||
|
QueryRowContext(context.Context, string, ...interface{}) *sql.Row
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(db DBTX) *Queries {
|
||||||
|
return &Queries{db: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Queries struct {
|
||||||
|
db DBTX
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) WithTx(tx *sql.Tx) *Queries {
|
||||||
|
return &Queries{
|
||||||
|
db: tx,
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,35 @@
|
|||||||
|
// Code generated by sqlc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// sqlc v1.17.2
|
||||||
|
// source: liquidity_params.sql
|
||||||
|
|
||||||
|
package sqlc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
const fetchLiquidityParams = `-- name: FetchLiquidityParams :one
|
||||||
|
SELECT params FROM liquidity_params WHERE id = 1
|
||||||
|
`
|
||||||
|
|
||||||
|
func (q *Queries) FetchLiquidityParams(ctx context.Context) ([]byte, error) {
|
||||||
|
row := q.db.QueryRowContext(ctx, fetchLiquidityParams)
|
||||||
|
var params []byte
|
||||||
|
err := row.Scan(¶ms)
|
||||||
|
return params, err
|
||||||
|
}
|
||||||
|
|
||||||
|
const upsertLiquidityParams = `-- name: UpsertLiquidityParams :exec
|
||||||
|
INSERT INTO liquidity_params (
|
||||||
|
id, params
|
||||||
|
) VALUES (
|
||||||
|
1, $1
|
||||||
|
) ON CONFLICT (id) DO UPDATE SET
|
||||||
|
params = excluded.params
|
||||||
|
`
|
||||||
|
|
||||||
|
func (q *Queries) UpsertLiquidityParams(ctx context.Context, params []byte) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, upsertLiquidityParams, params)
|
||||||
|
return err
|
||||||
|
}
|
@ -0,0 +1,7 @@
|
|||||||
|
DROP INDEX IF EXISTS updates_swap_hash_idx;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS swaps;
|
||||||
|
DROP TABLE IF EXISTS loopin_swaps;
|
||||||
|
DROP TABLE IF EXISTS loopout_swaps;
|
||||||
|
DROP TABLE IF EXISTS swap_updates;
|
||||||
|
DROP TABLE IF EXISTS htlc_keys;
|
@ -0,0 +1,166 @@
|
|||||||
|
-- swaps stores all base data that is shared between loop-outs and loop-ins,
|
||||||
|
-- as well as the updates.
|
||||||
|
CREATE TABLE swaps (
|
||||||
|
-- id is the autoincrementing primary key.
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
|
||||||
|
-- swap_hash is the randomly generated hash of the swap, which is used
|
||||||
|
-- as the swap identifier for the clients.
|
||||||
|
swap_hash BLOB NOT NULL UNIQUE,
|
||||||
|
|
||||||
|
-- preimage is the preimage for swap htlc.
|
||||||
|
preimage BLOB NOT NULL UNIQUE,
|
||||||
|
|
||||||
|
-- initiation_time is the creation time (when stored) of the contract.
|
||||||
|
initiation_time TIMESTAMP NOT NULL,
|
||||||
|
|
||||||
|
-- amount_requested is the requested swap amount in sats.
|
||||||
|
amount_requested BIGINT NOT NULL,
|
||||||
|
|
||||||
|
-- cltv_expiry defines the on-chain HTLC's CLTV. In specific,
|
||||||
|
-- * For loop in swap, this value must be greater than the off-chain
|
||||||
|
-- payment's CLTV.
|
||||||
|
-- * For loop out swap, this value must be smaller than the off-chain
|
||||||
|
-- payment's CLTV.
|
||||||
|
cltv_expiry INTEGER NOT NULL,
|
||||||
|
|
||||||
|
-- max_miner_fee is the maximum in on-chain fees that we are willing to
|
||||||
|
-- spend.
|
||||||
|
max_miner_fee BIGINT NOT NULL,
|
||||||
|
|
||||||
|
-- max_swap_fee is the maximum we are willing to pay the server for the
|
||||||
|
-- swap.
|
||||||
|
max_swap_fee BIGINT NOT NULL,
|
||||||
|
|
||||||
|
-- initiation_height is the block height at which the swap was initiated.
|
||||||
|
initiation_height INTEGER NOT NULL,
|
||||||
|
|
||||||
|
-- protocol_version is the protocol version that the swap was created with.
|
||||||
|
-- Note that this version is not upgraded if the client upgrades or
|
||||||
|
-- downgrades their protocol version mid-swap.
|
||||||
|
protocol_version INTEGER NOT NULL,
|
||||||
|
|
||||||
|
-- label contains an optional label for the swap.
|
||||||
|
label TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
-- swap_updates stores timestamps and states of swap updates.
|
||||||
|
CREATE TABLE swap_updates (
|
||||||
|
-- id is the autoincrementing primary key.
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
|
||||||
|
-- swap_id is the foreign key referencing the swap in the swaps table.
|
||||||
|
swap_hash BLOB NOT NULL,
|
||||||
|
|
||||||
|
-- update_timestamp is the timestamp the swap was updated at.
|
||||||
|
update_timestamp TIMESTAMP NOT NULL,
|
||||||
|
|
||||||
|
-- update_state is the state the swap was in at a given timestamp.
|
||||||
|
update_state INTEGER NOT NULL,
|
||||||
|
|
||||||
|
-- htlc_txhash is the hash of the transaction that creates the htlc.
|
||||||
|
htlc_txhash TEXT NOT NULL,
|
||||||
|
|
||||||
|
-- server_cost is the amount paid to the server.
|
||||||
|
server_cost BIGINT NOT NULL DEFAULT 0,
|
||||||
|
|
||||||
|
-- onchain_cost is the amount paid to miners for the onchain tx.
|
||||||
|
onchain_cost BIGINT NOT NULL DEFAULT 0,
|
||||||
|
|
||||||
|
-- offchain_cost is the amount paid in routing fees.
|
||||||
|
offchain_cost BIGINT NOT NULL DEFAULT 0,
|
||||||
|
|
||||||
|
-- Foreign key constraint to ensure that swap_id references an existing swap.
|
||||||
|
FOREIGN KEY (swap_hash) REFERENCES swaps (swap_hash)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
-- loopin_swaps stores the loop-in specific data.
|
||||||
|
CREATE TABLE loopin_swaps (
|
||||||
|
-- swap_hash points to the parent swap hash.
|
||||||
|
swap_hash BLOB PRIMARY KEY REFERENCES swaps(swap_hash),
|
||||||
|
|
||||||
|
-- htlc_conf_target specifies the targeted confirmation target for the
|
||||||
|
-- sweep transaction.
|
||||||
|
htlc_conf_target INTEGER NOT NULL,
|
||||||
|
|
||||||
|
-- last_hop is an optional parameter that specifies the last hop to be
|
||||||
|
-- used for a loop in swap.
|
||||||
|
last_hop BLOB,
|
||||||
|
|
||||||
|
-- external_htlc specifies whether the htlc is published by an external
|
||||||
|
-- source.
|
||||||
|
external_htlc BOOLEAN NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
-- loopout_swaps stores the loop-out specific data.
|
||||||
|
CREATE TABLE loopout_swaps (
|
||||||
|
-- swap_hash points to the parent swap hash.
|
||||||
|
swap_hash BLOB PRIMARY KEY REFERENCES swaps(swap_hash),
|
||||||
|
|
||||||
|
-- dest_address is the destination address of the loop out swap.
|
||||||
|
dest_address TEXT NOT NULL,
|
||||||
|
|
||||||
|
-- SwapInvoice is the invoice that is to be paid by the client to
|
||||||
|
-- initiate the loop out swap.
|
||||||
|
swap_invoice TEXT NOT NULL,
|
||||||
|
|
||||||
|
-- MaxSwapRoutingFee is the maximum off-chain fee in msat that may be
|
||||||
|
-- paid for the swap payment to the server.
|
||||||
|
max_swap_routing_fee BIGINT NOT NULL,
|
||||||
|
|
||||||
|
-- SweepConfTarget specifies the targeted confirmation target for the
|
||||||
|
-- client sweep tx.
|
||||||
|
sweep_conf_target INTEGER NOT NULL,
|
||||||
|
|
||||||
|
-- HtlcConfirmations is the number of confirmations we require the on
|
||||||
|
-- chain htlc to have before proceeding with the swap.
|
||||||
|
htlc_confirmations INTEGER NOT NULL,
|
||||||
|
|
||||||
|
-- OutgoingChanSet is the set of short ids of channels that may be used.
|
||||||
|
-- If empty, any channel may be used.
|
||||||
|
outgoing_chan_set TEXT NOT NULL,
|
||||||
|
|
||||||
|
-- PrepayInvoice is the invoice that the client should pay to the
|
||||||
|
-- server that will be returned if the swap is complete.
|
||||||
|
prepay_invoice TEXT NOT NULL,
|
||||||
|
|
||||||
|
-- MaxPrepayRoutingFee is the maximum off-chain fee in msat that may be
|
||||||
|
-- paid for the prepayment to the server.
|
||||||
|
max_prepay_routing_fee BIGINT NOT NULL,
|
||||||
|
|
||||||
|
-- SwapPublicationDeadline is a timestamp that the server commits to
|
||||||
|
-- have the on-chain swap published by. It is set by the client to
|
||||||
|
-- allow the server to delay the publication in exchange for possibly
|
||||||
|
-- lower fees.
|
||||||
|
publication_deadline TIMESTAMP NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
-- htlc_keys stores public and private keys used when construcing swap HTLCs.
|
||||||
|
CREATE TABLE htlc_keys (
|
||||||
|
-- swap_hash points to the parent swap hash.
|
||||||
|
swap_hash BLOB PRIMARY KEY REFERENCES swaps(swap_hash),
|
||||||
|
|
||||||
|
-- sender_script_pubkey is the sender's script pubkey used in the HTLC.
|
||||||
|
sender_script_pubkey BLOB NOT NULL,
|
||||||
|
|
||||||
|
-- receiver_script_pubkey is the receivers's script pubkey used in the HTLC.
|
||||||
|
receiver_script_pubkey BLOB NOT NULL,
|
||||||
|
|
||||||
|
-- sender_internal_pubkey is the public key for the sender_internal_key.
|
||||||
|
sender_internal_pubkey BLOB,
|
||||||
|
|
||||||
|
-- receiver_internal_pubkey is the public key for the receiver_internal_key.
|
||||||
|
receiver_internal_pubkey BLOB,
|
||||||
|
|
||||||
|
-- client_key_family is the family of key being identified.
|
||||||
|
client_key_family INTEGER NOT NULL,
|
||||||
|
|
||||||
|
-- client_key_index is the precise index of the key being identified.
|
||||||
|
client_key_index INTEGER NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS updates_swap_hash_idx ON swap_updates(swap_hash);
|
@ -0,0 +1 @@
|
|||||||
|
DROP TABLE IF EXISTS liquidity_params;
|
@ -0,0 +1,6 @@
|
|||||||
|
-- liquidity_params stores the liquidity parameters for autoloop as a single row
|
||||||
|
-- with a blob column, which is the serialized proto request.
|
||||||
|
CREATE TABLE liquidity_params (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
params BLOB
|
||||||
|
);
|
@ -0,0 +1,69 @@
|
|||||||
|
// Code generated by sqlc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// sqlc v1.17.2
|
||||||
|
|
||||||
|
package sqlc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HtlcKey struct {
|
||||||
|
SwapHash []byte
|
||||||
|
SenderScriptPubkey []byte
|
||||||
|
ReceiverScriptPubkey []byte
|
||||||
|
SenderInternalPubkey []byte
|
||||||
|
ReceiverInternalPubkey []byte
|
||||||
|
ClientKeyFamily int32
|
||||||
|
ClientKeyIndex int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type LiquidityParam struct {
|
||||||
|
ID int32
|
||||||
|
Params []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type LoopinSwap struct {
|
||||||
|
SwapHash []byte
|
||||||
|
HtlcConfTarget int32
|
||||||
|
LastHop []byte
|
||||||
|
ExternalHtlc bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type LoopoutSwap struct {
|
||||||
|
SwapHash []byte
|
||||||
|
DestAddress string
|
||||||
|
SwapInvoice string
|
||||||
|
MaxSwapRoutingFee int64
|
||||||
|
SweepConfTarget int32
|
||||||
|
HtlcConfirmations int32
|
||||||
|
OutgoingChanSet string
|
||||||
|
PrepayInvoice string
|
||||||
|
MaxPrepayRoutingFee int64
|
||||||
|
PublicationDeadline time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type Swap struct {
|
||||||
|
ID int32
|
||||||
|
SwapHash []byte
|
||||||
|
Preimage []byte
|
||||||
|
InitiationTime time.Time
|
||||||
|
AmountRequested int64
|
||||||
|
CltvExpiry int32
|
||||||
|
MaxMinerFee int64
|
||||||
|
MaxSwapFee int64
|
||||||
|
InitiationHeight int32
|
||||||
|
ProtocolVersion int32
|
||||||
|
Label string
|
||||||
|
}
|
||||||
|
|
||||||
|
type SwapUpdate struct {
|
||||||
|
ID int32
|
||||||
|
SwapHash []byte
|
||||||
|
UpdateTimestamp time.Time
|
||||||
|
UpdateState int32
|
||||||
|
HtlcTxhash string
|
||||||
|
ServerCost int64
|
||||||
|
OnchainCost int64
|
||||||
|
OffchainCost int64
|
||||||
|
}
|
@ -0,0 +1,26 @@
|
|||||||
|
// Code generated by sqlc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// sqlc v1.17.2
|
||||||
|
|
||||||
|
package sqlc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Querier interface {
|
||||||
|
FetchLiquidityParams(ctx context.Context) ([]byte, error)
|
||||||
|
GetLoopInSwap(ctx context.Context, swapHash []byte) (GetLoopInSwapRow, error)
|
||||||
|
GetLoopInSwaps(ctx context.Context) ([]GetLoopInSwapsRow, error)
|
||||||
|
GetLoopOutSwap(ctx context.Context, swapHash []byte) (GetLoopOutSwapRow, error)
|
||||||
|
GetLoopOutSwaps(ctx context.Context) ([]GetLoopOutSwapsRow, error)
|
||||||
|
GetSwapUpdates(ctx context.Context, swapHash []byte) ([]SwapUpdate, error)
|
||||||
|
InsertHtlcKeys(ctx context.Context, arg InsertHtlcKeysParams) error
|
||||||
|
InsertLoopIn(ctx context.Context, arg InsertLoopInParams) error
|
||||||
|
InsertLoopOut(ctx context.Context, arg InsertLoopOutParams) error
|
||||||
|
InsertSwap(ctx context.Context, arg InsertSwapParams) error
|
||||||
|
InsertSwapUpdate(ctx context.Context, arg InsertSwapUpdateParams) error
|
||||||
|
UpsertLiquidityParams(ctx context.Context, params []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Querier = (*Queries)(nil)
|
@ -0,0 +1,10 @@
|
|||||||
|
-- name: UpsertLiquidityParams :exec
|
||||||
|
INSERT INTO liquidity_params (
|
||||||
|
id, params
|
||||||
|
) VALUES (
|
||||||
|
1, $1
|
||||||
|
) ON CONFLICT (id) DO UPDATE SET
|
||||||
|
params = excluded.params;
|
||||||
|
|
||||||
|
-- name: FetchLiquidityParams :one
|
||||||
|
SELECT params FROM liquidity_params WHERE id = 1;
|
@ -0,0 +1,133 @@
|
|||||||
|
-- name: GetLoopOutSwaps :many
|
||||||
|
SELECT
|
||||||
|
swaps.*,
|
||||||
|
loopout_swaps.*,
|
||||||
|
htlc_keys.*
|
||||||
|
FROM
|
||||||
|
swaps
|
||||||
|
JOIN
|
||||||
|
loopout_swaps ON swaps.swap_hash = loopout_swaps.swap_hash
|
||||||
|
JOIN
|
||||||
|
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
|
||||||
|
ORDER BY
|
||||||
|
swaps.id;
|
||||||
|
|
||||||
|
-- name: GetLoopOutSwap :one
|
||||||
|
SELECT
|
||||||
|
swaps.*,
|
||||||
|
loopout_swaps.*,
|
||||||
|
htlc_keys.*
|
||||||
|
FROM
|
||||||
|
swaps
|
||||||
|
JOIN
|
||||||
|
loopout_swaps ON swaps.swap_hash = loopout_swaps.swap_hash
|
||||||
|
JOIN
|
||||||
|
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
|
||||||
|
WHERE
|
||||||
|
swaps.swap_hash = $1;
|
||||||
|
|
||||||
|
-- name: GetLoopInSwaps :many
|
||||||
|
SELECT
|
||||||
|
swaps.*,
|
||||||
|
loopin_swaps.*,
|
||||||
|
htlc_keys.*
|
||||||
|
FROM
|
||||||
|
swaps
|
||||||
|
JOIN
|
||||||
|
loopin_swaps ON swaps.swap_hash = loopin_swaps.swap_hash
|
||||||
|
JOIN
|
||||||
|
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
|
||||||
|
ORDER BY
|
||||||
|
swaps.id;
|
||||||
|
|
||||||
|
-- name: GetLoopInSwap :one
|
||||||
|
SELECT
|
||||||
|
swaps.*,
|
||||||
|
loopin_swaps.*,
|
||||||
|
htlc_keys.*
|
||||||
|
FROM
|
||||||
|
swaps
|
||||||
|
JOIN
|
||||||
|
loopin_swaps ON swaps.swap_hash = loopin_swaps.swap_hash
|
||||||
|
JOIN
|
||||||
|
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
|
||||||
|
WHERE
|
||||||
|
swaps.swap_hash = $1;
|
||||||
|
|
||||||
|
-- name: GetSwapUpdates :many
|
||||||
|
SELECT
|
||||||
|
*
|
||||||
|
FROM
|
||||||
|
swap_updates
|
||||||
|
WHERE
|
||||||
|
swap_hash = $1
|
||||||
|
ORDER BY
|
||||||
|
id ASC;
|
||||||
|
|
||||||
|
-- name: InsertSwap :exec
|
||||||
|
INSERT INTO swaps (
|
||||||
|
swap_hash,
|
||||||
|
preimage,
|
||||||
|
initiation_time,
|
||||||
|
amount_requested,
|
||||||
|
cltv_expiry,
|
||||||
|
max_miner_fee,
|
||||||
|
max_swap_fee,
|
||||||
|
initiation_height,
|
||||||
|
protocol_version,
|
||||||
|
label
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
|
||||||
|
);
|
||||||
|
|
||||||
|
-- name: InsertSwapUpdate :exec
|
||||||
|
INSERT INTO swap_updates (
|
||||||
|
swap_hash,
|
||||||
|
update_timestamp,
|
||||||
|
update_state,
|
||||||
|
htlc_txhash,
|
||||||
|
server_cost,
|
||||||
|
onchain_cost,
|
||||||
|
offchain_cost
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4, $5, $6, $7
|
||||||
|
);
|
||||||
|
|
||||||
|
-- name: InsertLoopOut :exec
|
||||||
|
INSERT INTO loopout_swaps (
|
||||||
|
swap_hash,
|
||||||
|
dest_address,
|
||||||
|
swap_invoice,
|
||||||
|
max_swap_routing_fee,
|
||||||
|
sweep_conf_target,
|
||||||
|
htlc_confirmations,
|
||||||
|
outgoing_chan_set,
|
||||||
|
prepay_invoice,
|
||||||
|
max_prepay_routing_fee,
|
||||||
|
publication_deadline
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
|
||||||
|
);
|
||||||
|
|
||||||
|
-- name: InsertLoopIn :exec
|
||||||
|
INSERT INTO loopin_swaps (
|
||||||
|
swap_hash,
|
||||||
|
htlc_conf_target,
|
||||||
|
last_hop,
|
||||||
|
external_htlc
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4
|
||||||
|
);
|
||||||
|
|
||||||
|
-- name: InsertHtlcKeys :exec
|
||||||
|
INSERT INTO htlc_keys(
|
||||||
|
swap_hash,
|
||||||
|
sender_script_pubkey,
|
||||||
|
receiver_script_pubkey,
|
||||||
|
sender_internal_pubkey,
|
||||||
|
receiver_internal_pubkey,
|
||||||
|
client_key_family,
|
||||||
|
client_key_index
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4, $5, $6, $7
|
||||||
|
);
|
@ -0,0 +1,584 @@
|
|||||||
|
// Code generated by sqlc. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// sqlc v1.17.2
|
||||||
|
// source: swaps.sql
|
||||||
|
|
||||||
|
package sqlc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const getLoopInSwap = `-- name: GetLoopInSwap :one
|
||||||
|
SELECT
|
||||||
|
swaps.id, swaps.swap_hash, swaps.preimage, swaps.initiation_time, swaps.amount_requested, swaps.cltv_expiry, swaps.max_miner_fee, swaps.max_swap_fee, swaps.initiation_height, swaps.protocol_version, swaps.label,
|
||||||
|
loopin_swaps.swap_hash, loopin_swaps.htlc_conf_target, loopin_swaps.last_hop, loopin_swaps.external_htlc,
|
||||||
|
htlc_keys.swap_hash, htlc_keys.sender_script_pubkey, htlc_keys.receiver_script_pubkey, htlc_keys.sender_internal_pubkey, htlc_keys.receiver_internal_pubkey, htlc_keys.client_key_family, htlc_keys.client_key_index
|
||||||
|
FROM
|
||||||
|
swaps
|
||||||
|
JOIN
|
||||||
|
loopin_swaps ON swaps.swap_hash = loopin_swaps.swap_hash
|
||||||
|
JOIN
|
||||||
|
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
|
||||||
|
WHERE
|
||||||
|
swaps.swap_hash = $1
|
||||||
|
`
|
||||||
|
|
||||||
|
type GetLoopInSwapRow struct {
|
||||||
|
ID int32
|
||||||
|
SwapHash []byte
|
||||||
|
Preimage []byte
|
||||||
|
InitiationTime time.Time
|
||||||
|
AmountRequested int64
|
||||||
|
CltvExpiry int32
|
||||||
|
MaxMinerFee int64
|
||||||
|
MaxSwapFee int64
|
||||||
|
InitiationHeight int32
|
||||||
|
ProtocolVersion int32
|
||||||
|
Label string
|
||||||
|
SwapHash_2 []byte
|
||||||
|
HtlcConfTarget int32
|
||||||
|
LastHop []byte
|
||||||
|
ExternalHtlc bool
|
||||||
|
SwapHash_3 []byte
|
||||||
|
SenderScriptPubkey []byte
|
||||||
|
ReceiverScriptPubkey []byte
|
||||||
|
SenderInternalPubkey []byte
|
||||||
|
ReceiverInternalPubkey []byte
|
||||||
|
ClientKeyFamily int32
|
||||||
|
ClientKeyIndex int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) GetLoopInSwap(ctx context.Context, swapHash []byte) (GetLoopInSwapRow, error) {
|
||||||
|
row := q.db.QueryRowContext(ctx, getLoopInSwap, swapHash)
|
||||||
|
var i GetLoopInSwapRow
|
||||||
|
err := row.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.SwapHash,
|
||||||
|
&i.Preimage,
|
||||||
|
&i.InitiationTime,
|
||||||
|
&i.AmountRequested,
|
||||||
|
&i.CltvExpiry,
|
||||||
|
&i.MaxMinerFee,
|
||||||
|
&i.MaxSwapFee,
|
||||||
|
&i.InitiationHeight,
|
||||||
|
&i.ProtocolVersion,
|
||||||
|
&i.Label,
|
||||||
|
&i.SwapHash_2,
|
||||||
|
&i.HtlcConfTarget,
|
||||||
|
&i.LastHop,
|
||||||
|
&i.ExternalHtlc,
|
||||||
|
&i.SwapHash_3,
|
||||||
|
&i.SenderScriptPubkey,
|
||||||
|
&i.ReceiverScriptPubkey,
|
||||||
|
&i.SenderInternalPubkey,
|
||||||
|
&i.ReceiverInternalPubkey,
|
||||||
|
&i.ClientKeyFamily,
|
||||||
|
&i.ClientKeyIndex,
|
||||||
|
)
|
||||||
|
return i, err
|
||||||
|
}
|
||||||
|
|
||||||
|
const getLoopInSwaps = `-- name: GetLoopInSwaps :many
|
||||||
|
SELECT
|
||||||
|
swaps.id, swaps.swap_hash, swaps.preimage, swaps.initiation_time, swaps.amount_requested, swaps.cltv_expiry, swaps.max_miner_fee, swaps.max_swap_fee, swaps.initiation_height, swaps.protocol_version, swaps.label,
|
||||||
|
loopin_swaps.swap_hash, loopin_swaps.htlc_conf_target, loopin_swaps.last_hop, loopin_swaps.external_htlc,
|
||||||
|
htlc_keys.swap_hash, htlc_keys.sender_script_pubkey, htlc_keys.receiver_script_pubkey, htlc_keys.sender_internal_pubkey, htlc_keys.receiver_internal_pubkey, htlc_keys.client_key_family, htlc_keys.client_key_index
|
||||||
|
FROM
|
||||||
|
swaps
|
||||||
|
JOIN
|
||||||
|
loopin_swaps ON swaps.swap_hash = loopin_swaps.swap_hash
|
||||||
|
JOIN
|
||||||
|
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
|
||||||
|
ORDER BY
|
||||||
|
swaps.id
|
||||||
|
`
|
||||||
|
|
||||||
|
type GetLoopInSwapsRow struct {
|
||||||
|
ID int32
|
||||||
|
SwapHash []byte
|
||||||
|
Preimage []byte
|
||||||
|
InitiationTime time.Time
|
||||||
|
AmountRequested int64
|
||||||
|
CltvExpiry int32
|
||||||
|
MaxMinerFee int64
|
||||||
|
MaxSwapFee int64
|
||||||
|
InitiationHeight int32
|
||||||
|
ProtocolVersion int32
|
||||||
|
Label string
|
||||||
|
SwapHash_2 []byte
|
||||||
|
HtlcConfTarget int32
|
||||||
|
LastHop []byte
|
||||||
|
ExternalHtlc bool
|
||||||
|
SwapHash_3 []byte
|
||||||
|
SenderScriptPubkey []byte
|
||||||
|
ReceiverScriptPubkey []byte
|
||||||
|
SenderInternalPubkey []byte
|
||||||
|
ReceiverInternalPubkey []byte
|
||||||
|
ClientKeyFamily int32
|
||||||
|
ClientKeyIndex int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) GetLoopInSwaps(ctx context.Context) ([]GetLoopInSwapsRow, error) {
|
||||||
|
rows, err := q.db.QueryContext(ctx, getLoopInSwaps)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
var items []GetLoopInSwapsRow
|
||||||
|
for rows.Next() {
|
||||||
|
var i GetLoopInSwapsRow
|
||||||
|
if err := rows.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.SwapHash,
|
||||||
|
&i.Preimage,
|
||||||
|
&i.InitiationTime,
|
||||||
|
&i.AmountRequested,
|
||||||
|
&i.CltvExpiry,
|
||||||
|
&i.MaxMinerFee,
|
||||||
|
&i.MaxSwapFee,
|
||||||
|
&i.InitiationHeight,
|
||||||
|
&i.ProtocolVersion,
|
||||||
|
&i.Label,
|
||||||
|
&i.SwapHash_2,
|
||||||
|
&i.HtlcConfTarget,
|
||||||
|
&i.LastHop,
|
||||||
|
&i.ExternalHtlc,
|
||||||
|
&i.SwapHash_3,
|
||||||
|
&i.SenderScriptPubkey,
|
||||||
|
&i.ReceiverScriptPubkey,
|
||||||
|
&i.SenderInternalPubkey,
|
||||||
|
&i.ReceiverInternalPubkey,
|
||||||
|
&i.ClientKeyFamily,
|
||||||
|
&i.ClientKeyIndex,
|
||||||
|
); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
items = append(items, i)
|
||||||
|
}
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return items, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const getLoopOutSwap = `-- name: GetLoopOutSwap :one
|
||||||
|
SELECT
|
||||||
|
swaps.id, swaps.swap_hash, swaps.preimage, swaps.initiation_time, swaps.amount_requested, swaps.cltv_expiry, swaps.max_miner_fee, swaps.max_swap_fee, swaps.initiation_height, swaps.protocol_version, swaps.label,
|
||||||
|
loopout_swaps.swap_hash, loopout_swaps.dest_address, loopout_swaps.swap_invoice, loopout_swaps.max_swap_routing_fee, loopout_swaps.sweep_conf_target, loopout_swaps.htlc_confirmations, loopout_swaps.outgoing_chan_set, loopout_swaps.prepay_invoice, loopout_swaps.max_prepay_routing_fee, loopout_swaps.publication_deadline,
|
||||||
|
htlc_keys.swap_hash, htlc_keys.sender_script_pubkey, htlc_keys.receiver_script_pubkey, htlc_keys.sender_internal_pubkey, htlc_keys.receiver_internal_pubkey, htlc_keys.client_key_family, htlc_keys.client_key_index
|
||||||
|
FROM
|
||||||
|
swaps
|
||||||
|
JOIN
|
||||||
|
loopout_swaps ON swaps.swap_hash = loopout_swaps.swap_hash
|
||||||
|
JOIN
|
||||||
|
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
|
||||||
|
WHERE
|
||||||
|
swaps.swap_hash = $1
|
||||||
|
`
|
||||||
|
|
||||||
|
type GetLoopOutSwapRow struct {
|
||||||
|
ID int32
|
||||||
|
SwapHash []byte
|
||||||
|
Preimage []byte
|
||||||
|
InitiationTime time.Time
|
||||||
|
AmountRequested int64
|
||||||
|
CltvExpiry int32
|
||||||
|
MaxMinerFee int64
|
||||||
|
MaxSwapFee int64
|
||||||
|
InitiationHeight int32
|
||||||
|
ProtocolVersion int32
|
||||||
|
Label string
|
||||||
|
SwapHash_2 []byte
|
||||||
|
DestAddress string
|
||||||
|
SwapInvoice string
|
||||||
|
MaxSwapRoutingFee int64
|
||||||
|
SweepConfTarget int32
|
||||||
|
HtlcConfirmations int32
|
||||||
|
OutgoingChanSet string
|
||||||
|
PrepayInvoice string
|
||||||
|
MaxPrepayRoutingFee int64
|
||||||
|
PublicationDeadline time.Time
|
||||||
|
SwapHash_3 []byte
|
||||||
|
SenderScriptPubkey []byte
|
||||||
|
ReceiverScriptPubkey []byte
|
||||||
|
SenderInternalPubkey []byte
|
||||||
|
ReceiverInternalPubkey []byte
|
||||||
|
ClientKeyFamily int32
|
||||||
|
ClientKeyIndex int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) GetLoopOutSwap(ctx context.Context, swapHash []byte) (GetLoopOutSwapRow, error) {
|
||||||
|
row := q.db.QueryRowContext(ctx, getLoopOutSwap, swapHash)
|
||||||
|
var i GetLoopOutSwapRow
|
||||||
|
err := row.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.SwapHash,
|
||||||
|
&i.Preimage,
|
||||||
|
&i.InitiationTime,
|
||||||
|
&i.AmountRequested,
|
||||||
|
&i.CltvExpiry,
|
||||||
|
&i.MaxMinerFee,
|
||||||
|
&i.MaxSwapFee,
|
||||||
|
&i.InitiationHeight,
|
||||||
|
&i.ProtocolVersion,
|
||||||
|
&i.Label,
|
||||||
|
&i.SwapHash_2,
|
||||||
|
&i.DestAddress,
|
||||||
|
&i.SwapInvoice,
|
||||||
|
&i.MaxSwapRoutingFee,
|
||||||
|
&i.SweepConfTarget,
|
||||||
|
&i.HtlcConfirmations,
|
||||||
|
&i.OutgoingChanSet,
|
||||||
|
&i.PrepayInvoice,
|
||||||
|
&i.MaxPrepayRoutingFee,
|
||||||
|
&i.PublicationDeadline,
|
||||||
|
&i.SwapHash_3,
|
||||||
|
&i.SenderScriptPubkey,
|
||||||
|
&i.ReceiverScriptPubkey,
|
||||||
|
&i.SenderInternalPubkey,
|
||||||
|
&i.ReceiverInternalPubkey,
|
||||||
|
&i.ClientKeyFamily,
|
||||||
|
&i.ClientKeyIndex,
|
||||||
|
)
|
||||||
|
return i, err
|
||||||
|
}
|
||||||
|
|
||||||
|
const getLoopOutSwaps = `-- name: GetLoopOutSwaps :many
|
||||||
|
SELECT
|
||||||
|
swaps.id, swaps.swap_hash, swaps.preimage, swaps.initiation_time, swaps.amount_requested, swaps.cltv_expiry, swaps.max_miner_fee, swaps.max_swap_fee, swaps.initiation_height, swaps.protocol_version, swaps.label,
|
||||||
|
loopout_swaps.swap_hash, loopout_swaps.dest_address, loopout_swaps.swap_invoice, loopout_swaps.max_swap_routing_fee, loopout_swaps.sweep_conf_target, loopout_swaps.htlc_confirmations, loopout_swaps.outgoing_chan_set, loopout_swaps.prepay_invoice, loopout_swaps.max_prepay_routing_fee, loopout_swaps.publication_deadline,
|
||||||
|
htlc_keys.swap_hash, htlc_keys.sender_script_pubkey, htlc_keys.receiver_script_pubkey, htlc_keys.sender_internal_pubkey, htlc_keys.receiver_internal_pubkey, htlc_keys.client_key_family, htlc_keys.client_key_index
|
||||||
|
FROM
|
||||||
|
swaps
|
||||||
|
JOIN
|
||||||
|
loopout_swaps ON swaps.swap_hash = loopout_swaps.swap_hash
|
||||||
|
JOIN
|
||||||
|
htlc_keys ON swaps.swap_hash = htlc_keys.swap_hash
|
||||||
|
ORDER BY
|
||||||
|
swaps.id
|
||||||
|
`
|
||||||
|
|
||||||
|
type GetLoopOutSwapsRow struct {
|
||||||
|
ID int32
|
||||||
|
SwapHash []byte
|
||||||
|
Preimage []byte
|
||||||
|
InitiationTime time.Time
|
||||||
|
AmountRequested int64
|
||||||
|
CltvExpiry int32
|
||||||
|
MaxMinerFee int64
|
||||||
|
MaxSwapFee int64
|
||||||
|
InitiationHeight int32
|
||||||
|
ProtocolVersion int32
|
||||||
|
Label string
|
||||||
|
SwapHash_2 []byte
|
||||||
|
DestAddress string
|
||||||
|
SwapInvoice string
|
||||||
|
MaxSwapRoutingFee int64
|
||||||
|
SweepConfTarget int32
|
||||||
|
HtlcConfirmations int32
|
||||||
|
OutgoingChanSet string
|
||||||
|
PrepayInvoice string
|
||||||
|
MaxPrepayRoutingFee int64
|
||||||
|
PublicationDeadline time.Time
|
||||||
|
SwapHash_3 []byte
|
||||||
|
SenderScriptPubkey []byte
|
||||||
|
ReceiverScriptPubkey []byte
|
||||||
|
SenderInternalPubkey []byte
|
||||||
|
ReceiverInternalPubkey []byte
|
||||||
|
ClientKeyFamily int32
|
||||||
|
ClientKeyIndex int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) GetLoopOutSwaps(ctx context.Context) ([]GetLoopOutSwapsRow, error) {
|
||||||
|
rows, err := q.db.QueryContext(ctx, getLoopOutSwaps)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
var items []GetLoopOutSwapsRow
|
||||||
|
for rows.Next() {
|
||||||
|
var i GetLoopOutSwapsRow
|
||||||
|
if err := rows.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.SwapHash,
|
||||||
|
&i.Preimage,
|
||||||
|
&i.InitiationTime,
|
||||||
|
&i.AmountRequested,
|
||||||
|
&i.CltvExpiry,
|
||||||
|
&i.MaxMinerFee,
|
||||||
|
&i.MaxSwapFee,
|
||||||
|
&i.InitiationHeight,
|
||||||
|
&i.ProtocolVersion,
|
||||||
|
&i.Label,
|
||||||
|
&i.SwapHash_2,
|
||||||
|
&i.DestAddress,
|
||||||
|
&i.SwapInvoice,
|
||||||
|
&i.MaxSwapRoutingFee,
|
||||||
|
&i.SweepConfTarget,
|
||||||
|
&i.HtlcConfirmations,
|
||||||
|
&i.OutgoingChanSet,
|
||||||
|
&i.PrepayInvoice,
|
||||||
|
&i.MaxPrepayRoutingFee,
|
||||||
|
&i.PublicationDeadline,
|
||||||
|
&i.SwapHash_3,
|
||||||
|
&i.SenderScriptPubkey,
|
||||||
|
&i.ReceiverScriptPubkey,
|
||||||
|
&i.SenderInternalPubkey,
|
||||||
|
&i.ReceiverInternalPubkey,
|
||||||
|
&i.ClientKeyFamily,
|
||||||
|
&i.ClientKeyIndex,
|
||||||
|
); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
items = append(items, i)
|
||||||
|
}
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return items, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const getSwapUpdates = `-- name: GetSwapUpdates :many
|
||||||
|
SELECT
|
||||||
|
id, swap_hash, update_timestamp, update_state, htlc_txhash, server_cost, onchain_cost, offchain_cost
|
||||||
|
FROM
|
||||||
|
swap_updates
|
||||||
|
WHERE
|
||||||
|
swap_hash = $1
|
||||||
|
ORDER BY
|
||||||
|
id ASC
|
||||||
|
`
|
||||||
|
|
||||||
|
func (q *Queries) GetSwapUpdates(ctx context.Context, swapHash []byte) ([]SwapUpdate, error) {
|
||||||
|
rows, err := q.db.QueryContext(ctx, getSwapUpdates, swapHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
var items []SwapUpdate
|
||||||
|
for rows.Next() {
|
||||||
|
var i SwapUpdate
|
||||||
|
if err := rows.Scan(
|
||||||
|
&i.ID,
|
||||||
|
&i.SwapHash,
|
||||||
|
&i.UpdateTimestamp,
|
||||||
|
&i.UpdateState,
|
||||||
|
&i.HtlcTxhash,
|
||||||
|
&i.ServerCost,
|
||||||
|
&i.OnchainCost,
|
||||||
|
&i.OffchainCost,
|
||||||
|
); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
items = append(items, i)
|
||||||
|
}
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return items, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const insertHtlcKeys = `-- name: InsertHtlcKeys :exec
|
||||||
|
INSERT INTO htlc_keys(
|
||||||
|
swap_hash,
|
||||||
|
sender_script_pubkey,
|
||||||
|
receiver_script_pubkey,
|
||||||
|
sender_internal_pubkey,
|
||||||
|
receiver_internal_pubkey,
|
||||||
|
client_key_family,
|
||||||
|
client_key_index
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4, $5, $6, $7
|
||||||
|
)
|
||||||
|
`
|
||||||
|
|
||||||
|
type InsertHtlcKeysParams struct {
|
||||||
|
SwapHash []byte
|
||||||
|
SenderScriptPubkey []byte
|
||||||
|
ReceiverScriptPubkey []byte
|
||||||
|
SenderInternalPubkey []byte
|
||||||
|
ReceiverInternalPubkey []byte
|
||||||
|
ClientKeyFamily int32
|
||||||
|
ClientKeyIndex int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) InsertHtlcKeys(ctx context.Context, arg InsertHtlcKeysParams) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, insertHtlcKeys,
|
||||||
|
arg.SwapHash,
|
||||||
|
arg.SenderScriptPubkey,
|
||||||
|
arg.ReceiverScriptPubkey,
|
||||||
|
arg.SenderInternalPubkey,
|
||||||
|
arg.ReceiverInternalPubkey,
|
||||||
|
arg.ClientKeyFamily,
|
||||||
|
arg.ClientKeyIndex,
|
||||||
|
)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
const insertLoopIn = `-- name: InsertLoopIn :exec
|
||||||
|
INSERT INTO loopin_swaps (
|
||||||
|
swap_hash,
|
||||||
|
htlc_conf_target,
|
||||||
|
last_hop,
|
||||||
|
external_htlc
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4
|
||||||
|
)
|
||||||
|
`
|
||||||
|
|
||||||
|
type InsertLoopInParams struct {
|
||||||
|
SwapHash []byte
|
||||||
|
HtlcConfTarget int32
|
||||||
|
LastHop []byte
|
||||||
|
ExternalHtlc bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) InsertLoopIn(ctx context.Context, arg InsertLoopInParams) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, insertLoopIn,
|
||||||
|
arg.SwapHash,
|
||||||
|
arg.HtlcConfTarget,
|
||||||
|
arg.LastHop,
|
||||||
|
arg.ExternalHtlc,
|
||||||
|
)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
const insertLoopOut = `-- name: InsertLoopOut :exec
|
||||||
|
INSERT INTO loopout_swaps (
|
||||||
|
swap_hash,
|
||||||
|
dest_address,
|
||||||
|
swap_invoice,
|
||||||
|
max_swap_routing_fee,
|
||||||
|
sweep_conf_target,
|
||||||
|
htlc_confirmations,
|
||||||
|
outgoing_chan_set,
|
||||||
|
prepay_invoice,
|
||||||
|
max_prepay_routing_fee,
|
||||||
|
publication_deadline
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
|
||||||
|
)
|
||||||
|
`
|
||||||
|
|
||||||
|
type InsertLoopOutParams struct {
|
||||||
|
SwapHash []byte
|
||||||
|
DestAddress string
|
||||||
|
SwapInvoice string
|
||||||
|
MaxSwapRoutingFee int64
|
||||||
|
SweepConfTarget int32
|
||||||
|
HtlcConfirmations int32
|
||||||
|
OutgoingChanSet string
|
||||||
|
PrepayInvoice string
|
||||||
|
MaxPrepayRoutingFee int64
|
||||||
|
PublicationDeadline time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) InsertLoopOut(ctx context.Context, arg InsertLoopOutParams) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, insertLoopOut,
|
||||||
|
arg.SwapHash,
|
||||||
|
arg.DestAddress,
|
||||||
|
arg.SwapInvoice,
|
||||||
|
arg.MaxSwapRoutingFee,
|
||||||
|
arg.SweepConfTarget,
|
||||||
|
arg.HtlcConfirmations,
|
||||||
|
arg.OutgoingChanSet,
|
||||||
|
arg.PrepayInvoice,
|
||||||
|
arg.MaxPrepayRoutingFee,
|
||||||
|
arg.PublicationDeadline,
|
||||||
|
)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
const insertSwap = `-- name: InsertSwap :exec
|
||||||
|
INSERT INTO swaps (
|
||||||
|
swap_hash,
|
||||||
|
preimage,
|
||||||
|
initiation_time,
|
||||||
|
amount_requested,
|
||||||
|
cltv_expiry,
|
||||||
|
max_miner_fee,
|
||||||
|
max_swap_fee,
|
||||||
|
initiation_height,
|
||||||
|
protocol_version,
|
||||||
|
label
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10
|
||||||
|
)
|
||||||
|
`
|
||||||
|
|
||||||
|
type InsertSwapParams struct {
|
||||||
|
SwapHash []byte
|
||||||
|
Preimage []byte
|
||||||
|
InitiationTime time.Time
|
||||||
|
AmountRequested int64
|
||||||
|
CltvExpiry int32
|
||||||
|
MaxMinerFee int64
|
||||||
|
MaxSwapFee int64
|
||||||
|
InitiationHeight int32
|
||||||
|
ProtocolVersion int32
|
||||||
|
Label string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) InsertSwap(ctx context.Context, arg InsertSwapParams) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, insertSwap,
|
||||||
|
arg.SwapHash,
|
||||||
|
arg.Preimage,
|
||||||
|
arg.InitiationTime,
|
||||||
|
arg.AmountRequested,
|
||||||
|
arg.CltvExpiry,
|
||||||
|
arg.MaxMinerFee,
|
||||||
|
arg.MaxSwapFee,
|
||||||
|
arg.InitiationHeight,
|
||||||
|
arg.ProtocolVersion,
|
||||||
|
arg.Label,
|
||||||
|
)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
const insertSwapUpdate = `-- name: InsertSwapUpdate :exec
|
||||||
|
INSERT INTO swap_updates (
|
||||||
|
swap_hash,
|
||||||
|
update_timestamp,
|
||||||
|
update_state,
|
||||||
|
htlc_txhash,
|
||||||
|
server_cost,
|
||||||
|
onchain_cost,
|
||||||
|
offchain_cost
|
||||||
|
) VALUES (
|
||||||
|
$1, $2, $3, $4, $5, $6, $7
|
||||||
|
)
|
||||||
|
`
|
||||||
|
|
||||||
|
type InsertSwapUpdateParams struct {
|
||||||
|
SwapHash []byte
|
||||||
|
UpdateTimestamp time.Time
|
||||||
|
UpdateState int32
|
||||||
|
HtlcTxhash string
|
||||||
|
ServerCost int64
|
||||||
|
OnchainCost int64
|
||||||
|
OffchainCost int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Queries) InsertSwapUpdate(ctx context.Context, arg InsertSwapUpdateParams) error {
|
||||||
|
_, err := q.db.ExecContext(ctx, insertSwapUpdate,
|
||||||
|
arg.SwapHash,
|
||||||
|
arg.UpdateTimestamp,
|
||||||
|
arg.UpdateState,
|
||||||
|
arg.HtlcTxhash,
|
||||||
|
arg.ServerCost,
|
||||||
|
arg.OnchainCost,
|
||||||
|
arg.OffchainCost,
|
||||||
|
)
|
||||||
|
return err
|
||||||
|
}
|
@ -0,0 +1,71 @@
|
|||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/jackc/pgconn"
|
||||||
|
"github.com/jackc/pgerrcode"
|
||||||
|
"modernc.org/sqlite"
|
||||||
|
sqlite3 "modernc.org/sqlite/lib"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MapSQLError attempts to interpret a given error as a database agnostic SQL
|
||||||
|
// error.
|
||||||
|
func MapSQLError(err error) error {
|
||||||
|
// Attempt to interpret the error as a sqlite error.
|
||||||
|
var sqliteErr *sqlite.Error
|
||||||
|
if errors.As(err, &sqliteErr) {
|
||||||
|
return parseSqliteError(sqliteErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to interpret the error as a postgres error.
|
||||||
|
var pqErr *pgconn.PgError
|
||||||
|
if errors.As(err, &pqErr) {
|
||||||
|
return parsePostgresError(pqErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return original error if it could not be classified as a database
|
||||||
|
// specific error.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePostgresError attempts to parse a sqlite error as a database agnostic
|
||||||
|
// SQL error.
|
||||||
|
func parseSqliteError(sqliteErr *sqlite.Error) error {
|
||||||
|
switch sqliteErr.Code() {
|
||||||
|
// Handle unique constraint violation error.
|
||||||
|
case sqlite3.SQLITE_CONSTRAINT_UNIQUE:
|
||||||
|
return &ErrSqlUniqueConstraintViolation{
|
||||||
|
DbError: sqliteErr,
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown sqlite error: %w", sqliteErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePostgresError attempts to parse a postgres error as a database agnostic
|
||||||
|
// SQL error.
|
||||||
|
func parsePostgresError(pqErr *pgconn.PgError) error {
|
||||||
|
switch pqErr.Code {
|
||||||
|
// Handle unique constraint violation error.
|
||||||
|
case pgerrcode.UniqueViolation:
|
||||||
|
return &ErrSqlUniqueConstraintViolation{
|
||||||
|
DbError: pqErr,
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown postgres error: %w", pqErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrSqlUniqueConstraintViolation is an error type which represents a database
|
||||||
|
// agnostic SQL unique constraint violation.
|
||||||
|
type ErrSqlUniqueConstraintViolation struct {
|
||||||
|
DbError error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrSqlUniqueConstraintViolation) Error() string {
|
||||||
|
return fmt.Sprintf("sql unique constraint violation: %v", e.DbError)
|
||||||
|
}
|
@ -0,0 +1,221 @@
|
|||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
|
sqlite_migrate "github.com/golang-migrate/migrate/v4/database/sqlite"
|
||||||
|
"github.com/lightninglabs/loop/loopdb/sqlc"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
_ "modernc.org/sqlite" // Register relevant drivers.
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// sqliteOptionPrefix is the string prefix sqlite uses to set various
|
||||||
|
// options. This is used in the following format:
|
||||||
|
// * sqliteOptionPrefix || option_name = option_value.
|
||||||
|
sqliteOptionPrefix = "_pragma"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SqliteConfig holds all the config arguments needed to interact with our
|
||||||
|
// sqlite DB.
|
||||||
|
type SqliteConfig struct {
|
||||||
|
// SkipMigrations if true, then all the tables will be created on start
|
||||||
|
// up if they don't already exist.
|
||||||
|
SkipMigrations bool `long:"skipmigrations" description:"Skip applying migrations on startup."`
|
||||||
|
|
||||||
|
// DatabaseFileName is the full file path where the database file can be
|
||||||
|
// found.
|
||||||
|
DatabaseFileName string `long:"dbfile" description:"The full path to the database."`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SqliteSwapStore is a sqlite3 based database for the loop daemon.
|
||||||
|
type SqliteSwapStore struct {
|
||||||
|
cfg *SqliteConfig
|
||||||
|
|
||||||
|
*BaseDB
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSqliteStore attempts to open a new sqlite database based on the passed
|
||||||
|
// config.
|
||||||
|
func NewSqliteStore(cfg *SqliteConfig, network *chaincfg.Params) (*SqliteSwapStore, error) {
|
||||||
|
// The set of pragma options are accepted using query options. For now
|
||||||
|
// we only want to ensure that foreign key constraints are properly
|
||||||
|
// enforced.
|
||||||
|
pragmaOptions := []struct {
|
||||||
|
name string
|
||||||
|
value string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "foreign_keys",
|
||||||
|
value: "on",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "journal_mode",
|
||||||
|
value: "WAL",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "busy_timeout",
|
||||||
|
value: "5000",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
sqliteOptions := make(url.Values)
|
||||||
|
for _, option := range pragmaOptions {
|
||||||
|
sqliteOptions.Add(
|
||||||
|
sqliteOptionPrefix,
|
||||||
|
fmt.Sprintf("%v=%v", option.name, option.value),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the DSN which is just the database file name, appended
|
||||||
|
// with the series of pragma options as a query URL string. For more
|
||||||
|
// details on the formatting here, see the modernc.org/sqlite docs:
|
||||||
|
// https://pkg.go.dev/modernc.org/sqlite#Driver.Open.
|
||||||
|
dsn := fmt.Sprintf(
|
||||||
|
"%v?%v", cfg.DatabaseFileName, sqliteOptions.Encode(),
|
||||||
|
)
|
||||||
|
db, err := sql.Open("sqlite", dsn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cfg.SkipMigrations {
|
||||||
|
// Now that the database is open, populate the database with
|
||||||
|
// our set of schemas based on our embedded in-memory file
|
||||||
|
// system.
|
||||||
|
//
|
||||||
|
// First, we'll need to open up a new migration instance for
|
||||||
|
// our current target database: sqlite.
|
||||||
|
driver, err := sqlite_migrate.WithInstance(
|
||||||
|
db, &sqlite_migrate.Config{},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = applyMigrations(
|
||||||
|
sqlSchemas, driver, "sqlc/migrations", "sqlc",
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
queries := sqlc.New(db)
|
||||||
|
|
||||||
|
return &SqliteSwapStore{
|
||||||
|
cfg: cfg,
|
||||||
|
BaseDB: &BaseDB{
|
||||||
|
DB: db,
|
||||||
|
Queries: queries,
|
||||||
|
network: network,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTestSqliteDB is a helper function that creates an SQLite database for
|
||||||
|
// testing.
|
||||||
|
func NewTestSqliteDB(t *testing.T) *SqliteSwapStore {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
t.Logf("Creating new SQLite DB for testing")
|
||||||
|
|
||||||
|
dbFileName := filepath.Join(t.TempDir(), "tmp.db")
|
||||||
|
sqlDB, err := NewSqliteStore(&SqliteConfig{
|
||||||
|
DatabaseFileName: dbFileName,
|
||||||
|
SkipMigrations: false,
|
||||||
|
}, &chaincfg.MainNetParams)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
require.NoError(t, sqlDB.DB.Close())
|
||||||
|
})
|
||||||
|
|
||||||
|
return sqlDB
|
||||||
|
}
|
||||||
|
|
||||||
|
// BaseDB is the base database struct that each implementation can embed to
|
||||||
|
// gain some common functionality.
|
||||||
|
type BaseDB struct {
|
||||||
|
network *chaincfg.Params
|
||||||
|
|
||||||
|
*sql.DB
|
||||||
|
|
||||||
|
*sqlc.Queries
|
||||||
|
}
|
||||||
|
|
||||||
|
// BeginTx wraps the normal sql specific BeginTx method with the TxOptions
|
||||||
|
// interface. This interface is then mapped to the concrete sql tx options
|
||||||
|
// struct.
|
||||||
|
func (db *BaseDB) BeginTx(ctx context.Context,
|
||||||
|
opts TxOptions) (*sql.Tx, error) {
|
||||||
|
|
||||||
|
sqlOptions := sql.TxOptions{
|
||||||
|
ReadOnly: opts.ReadOnly(),
|
||||||
|
}
|
||||||
|
return db.DB.BeginTx(ctx, &sqlOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecTx is a wrapper for txBody to abstract the creation and commit of a db
|
||||||
|
// transaction. The db transaction is embedded in a `*postgres.Queries` that
|
||||||
|
// txBody needs to use when executing each one of the queries that need to be
|
||||||
|
// applied atomically.
|
||||||
|
func (db *BaseDB) ExecTx(ctx context.Context, txOptions TxOptions,
|
||||||
|
txBody func(*sqlc.Queries) error) error {
|
||||||
|
|
||||||
|
// Create the db transaction.
|
||||||
|
tx, err := db.BeginTx(ctx, txOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rollback is safe to call even if the tx is already closed, so if
|
||||||
|
// the tx commits successfully, this is a no-op.
|
||||||
|
defer tx.Rollback() //nolint: errcheck
|
||||||
|
|
||||||
|
if err := txBody(db.Queries.WithTx(tx)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit transaction.
|
||||||
|
if err = tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxOptions represents a set of options one can use to control what type of
|
||||||
|
// database transaction is created. Transaction can wither be read or write.
|
||||||
|
type TxOptions interface {
|
||||||
|
// ReadOnly returns true if the transaction should be read only.
|
||||||
|
ReadOnly() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// SqliteTxOptions defines the set of db txn options the KeyStore
|
||||||
|
// understands.
|
||||||
|
type SqliteTxOptions struct {
|
||||||
|
// readOnly governs if a read only transaction is needed or not.
|
||||||
|
readOnly bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewKeyStoreReadOpts returns a new KeyStoreTxOptions instance triggers a read
|
||||||
|
// transaction.
|
||||||
|
func NewSqlReadOpts() *SqliteTxOptions {
|
||||||
|
return &SqliteTxOptions{
|
||||||
|
readOnly: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadOnly returns true if the transaction should be read only.
|
||||||
|
//
|
||||||
|
// NOTE: This implements the TxOptions
|
||||||
|
func (r *SqliteTxOptions) ReadOnly() bool {
|
||||||
|
return r.readOnly
|
||||||
|
}
|
@ -0,0 +1,13 @@
|
|||||||
|
//go:build test_db_postgres
|
||||||
|
// +build test_db_postgres
|
||||||
|
|
||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewTestDB is a helper function that creates a Postgres database for testing.
|
||||||
|
func NewTestDB(t *testing.T) *PostgresStore {
|
||||||
|
return NewTestPostgresDB(t)
|
||||||
|
}
|
@ -0,0 +1,13 @@
|
|||||||
|
//go:build !test_db_postgres
|
||||||
|
// +build !test_db_postgres
|
||||||
|
|
||||||
|
package loopdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewTestDB is a helper function that creates an SQLite database for testing.
|
||||||
|
func NewTestDB(t *testing.T) *SqliteSwapStore {
|
||||||
|
return NewTestSqliteDB(t)
|
||||||
|
}
|
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Directory of the script file, independent of where it's called from.
|
||||||
|
DIR="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
|
||||||
|
# Use the user's cache directories
|
||||||
|
GOCACHE=`go env GOCACHE`
|
||||||
|
GOMODCACHE=`go env GOMODCACHE`
|
||||||
|
|
||||||
|
echo "Generating sql models and queries in go..."
|
||||||
|
|
||||||
|
docker run \
|
||||||
|
--rm \
|
||||||
|
--user "$UID:$(id -g)" \
|
||||||
|
-e UID=$UID \
|
||||||
|
-v "$DIR/../:/build" \
|
||||||
|
-w /build \
|
||||||
|
kjconroy/sqlc:1.17.2 generate
|
||||||
|
|
@ -0,0 +1,10 @@
|
|||||||
|
version: "2"
|
||||||
|
sql:
|
||||||
|
- engine: "postgresql"
|
||||||
|
schema: "loopdb/sqlc/migrations"
|
||||||
|
queries: "loopdb/sqlc/queries"
|
||||||
|
gen:
|
||||||
|
go:
|
||||||
|
out: loopdb/sqlc
|
||||||
|
package: sqlc
|
||||||
|
emit_interface: true
|
Loading…
Reference in New Issue