Upgrade xorm to v1.1.1 (#16339)
parent
32fd11395b
commit
760af187ba
2
go.mod
2
go.mod
|
@ -139,7 +139,7 @@ require (
|
||||||
mvdan.cc/xurls/v2 v2.2.0
|
mvdan.cc/xurls/v2 v2.2.0
|
||||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251
|
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251
|
||||||
xorm.io/builder v0.3.9
|
xorm.io/builder v0.3.9
|
||||||
xorm.io/xorm v1.1.0
|
xorm.io/xorm v1.1.1
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1
|
replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -1615,5 +1615,5 @@ xorm.io/builder v0.3.8/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
||||||
xorm.io/builder v0.3.9 h1:Sd65/LdWyO7LR8+Cbd+e7mm3sK/7U9k0jS3999IDHMc=
|
xorm.io/builder v0.3.9 h1:Sd65/LdWyO7LR8+Cbd+e7mm3sK/7U9k0jS3999IDHMc=
|
||||||
xorm.io/builder v0.3.9/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
xorm.io/builder v0.3.9/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
||||||
xorm.io/xorm v1.0.6/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4=
|
xorm.io/xorm v1.0.6/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4=
|
||||||
xorm.io/xorm v1.1.0 h1:mkEsQXLauZajiOld2cB2PkFcUZKePepPgs1bC1dw8RA=
|
xorm.io/xorm v1.1.1 h1:cc1yot5rhoBucfk2lgZPZPEuI/9QsVvHuQpjI0wmcf8=
|
||||||
xorm.io/xorm v1.1.0/go.mod h1:EDzNHMuCVZNszkIRSLL2nI0zX+nQE8RstAVranlSfqI=
|
xorm.io/xorm v1.1.1/go.mod h1:Cb0DKYTHbyECMaSfgRnIZp5aiUgQozxcJJ0vzcLGJSg=
|
||||||
|
|
|
@ -1037,7 +1037,7 @@ strk.kbt.io/projects/go/libravatar
|
||||||
# xorm.io/builder v0.3.9
|
# xorm.io/builder v0.3.9
|
||||||
## explicit
|
## explicit
|
||||||
xorm.io/builder
|
xorm.io/builder
|
||||||
# xorm.io/xorm v1.1.0
|
# xorm.io/xorm v1.1.1
|
||||||
## explicit
|
## explicit
|
||||||
xorm.io/xorm
|
xorm.io/xorm
|
||||||
xorm.io/xorm/caches
|
xorm.io/xorm/caches
|
||||||
|
|
|
@ -1,190 +1,65 @@
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
name: testing
|
|
||||||
steps:
|
|
||||||
- name: restore-cache
|
|
||||||
image: meltwater/drone-cache
|
|
||||||
pull: always
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
restore: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: test-vet
|
|
||||||
image: golang:1.15
|
|
||||||
environment:
|
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
commands:
|
|
||||||
- make vet
|
|
||||||
- make fmt-check
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
when:
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
- pull_request
|
|
||||||
|
|
||||||
- name: rebuild-cache
|
|
||||||
image: meltwater/drone-cache
|
|
||||||
pull: true
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
rebuild: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
temp: {}
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
name: test-sqlite
|
|
||||||
depends_on:
|
|
||||||
- testing
|
|
||||||
steps:
|
|
||||||
- name: restore-cache
|
|
||||||
image: meltwater/drone-cache:dev
|
|
||||||
pull: always
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
restore: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: test-sqlite3
|
|
||||||
image: golang:1.15
|
|
||||||
environment:
|
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
commands:
|
|
||||||
- make test-sqlite3
|
|
||||||
- TEST_CACHE_ENABLE=true make test-sqlite3
|
|
||||||
- TEST_QUOTE_POLICY=reserved make test-sqlite3
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: test-sqlite
|
|
||||||
image: golang:1.15
|
|
||||||
environment:
|
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
commands:
|
|
||||||
- make test-sqlite
|
|
||||||
- TEST_CACHE_ENABLE=true make test-sqlite
|
|
||||||
- TEST_QUOTE_POLICY=reserved make test-sqlite
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: rebuild-cache
|
|
||||||
image: meltwater/drone-cache:dev
|
|
||||||
pull: true
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
rebuild: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
temp: {}
|
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
name: test-mysql
|
name: test-mysql
|
||||||
depends_on:
|
environment:
|
||||||
- testing
|
GO111MODULE: "on"
|
||||||
|
GOPROXY: "https://goproxy.io"
|
||||||
|
CGO_ENABLED: 1
|
||||||
|
trigger:
|
||||||
|
ref:
|
||||||
|
- refs/heads/master
|
||||||
|
- refs/pull/*/head
|
||||||
steps:
|
steps:
|
||||||
- name: restore-cache
|
- name: test-vet
|
||||||
image: meltwater/drone-cache
|
image: golang:1.15
|
||||||
pull: always
|
pull: always
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
restore: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
path: /go
|
path: /go/pkg/mod
|
||||||
|
commands:
|
||||||
|
- make vet
|
||||||
|
- name: test-sqlite3
|
||||||
|
image: golang:1.15
|
||||||
|
volumes:
|
||||||
|
- name: cache
|
||||||
|
path: /go/pkg/mod
|
||||||
|
depends_on:
|
||||||
|
- test-vet
|
||||||
|
commands:
|
||||||
|
- make fmt-check
|
||||||
|
- make test
|
||||||
|
- make test-sqlite3
|
||||||
|
- TEST_CACHE_ENABLE=true make test-sqlite3
|
||||||
|
- TEST_QUOTE_POLICY=reserved make test-sqlite3
|
||||||
|
- make test-sqlite
|
||||||
|
- TEST_CACHE_ENABLE=true make test-sqlite
|
||||||
|
- TEST_QUOTE_POLICY=reserved make test-sqlite
|
||||||
- name: test-mysql
|
- name: test-mysql
|
||||||
image: golang:1.15
|
image: golang:1.15
|
||||||
|
pull: never
|
||||||
|
volumes:
|
||||||
|
- name: cache
|
||||||
|
path: /go/pkg/mod
|
||||||
|
depends_on:
|
||||||
|
- test-vet
|
||||||
environment:
|
environment:
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_MYSQL_HOST: mysql
|
TEST_MYSQL_HOST: mysql
|
||||||
TEST_MYSQL_CHARSET: utf8
|
TEST_MYSQL_CHARSET: utf8
|
||||||
TEST_MYSQL_DBNAME: xorm_test
|
TEST_MYSQL_DBNAME: xorm_test
|
||||||
TEST_MYSQL_USERNAME: root
|
TEST_MYSQL_USERNAME: root
|
||||||
TEST_MYSQL_PASSWORD:
|
TEST_MYSQL_PASSWORD:
|
||||||
commands:
|
commands:
|
||||||
- make test
|
|
||||||
- make test-mysql
|
|
||||||
- TEST_CACHE_ENABLE=true make test-mysql
|
- TEST_CACHE_ENABLE=true make test-mysql
|
||||||
- TEST_QUOTE_POLICY=reserved make test-mysql
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: test-mysql-utf8mb4
|
- name: test-mysql-utf8mb4
|
||||||
image: golang:1.15
|
image: golang:1.15
|
||||||
|
pull: never
|
||||||
|
volumes:
|
||||||
|
- name: cache
|
||||||
|
path: /go/pkg/mod
|
||||||
depends_on:
|
depends_on:
|
||||||
- test-mysql
|
- test-mysql
|
||||||
environment:
|
environment:
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_MYSQL_HOST: mysql
|
TEST_MYSQL_HOST: mysql
|
||||||
TEST_MYSQL_CHARSET: utf8mb4
|
TEST_MYSQL_CHARSET: utf8mb4
|
||||||
TEST_MYSQL_DBNAME: xorm_test
|
TEST_MYSQL_DBNAME: xorm_test
|
||||||
|
@ -192,62 +67,15 @@ steps:
|
||||||
TEST_MYSQL_PASSWORD:
|
TEST_MYSQL_PASSWORD:
|
||||||
commands:
|
commands:
|
||||||
- make test-mysql
|
- make test-mysql
|
||||||
- TEST_CACHE_ENABLE=true make test-mysql
|
|
||||||
- TEST_QUOTE_POLICY=reserved make test-mysql
|
- TEST_QUOTE_POLICY=reserved make test-mysql
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: test-mymysql
|
|
||||||
pull: default
|
|
||||||
image: golang:1.15
|
|
||||||
depends_on:
|
|
||||||
- test-mysql-utf8mb4
|
|
||||||
environment:
|
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_MYSQL_HOST: mysql:3306
|
|
||||||
TEST_MYSQL_DBNAME: xorm_test
|
|
||||||
TEST_MYSQL_USERNAME: root
|
|
||||||
TEST_MYSQL_PASSWORD:
|
|
||||||
commands:
|
|
||||||
- make test-mymysql
|
|
||||||
- TEST_CACHE_ENABLE=true make test-mymysql
|
|
||||||
- TEST_QUOTE_POLICY=reserved make test-mymysql
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: rebuild-cache
|
|
||||||
image: meltwater/drone-cache
|
|
||||||
depends_on:
|
|
||||||
- test-mysql
|
|
||||||
- test-mysql-utf8mb4
|
|
||||||
- test-mymysql
|
|
||||||
pull: true
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
rebuild: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
temp: {}
|
host:
|
||||||
|
path: /tmp/cache
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- name: mysql
|
- name: mysql
|
||||||
pull: default
|
|
||||||
image: mysql:5.7
|
image: mysql:5.7
|
||||||
environment:
|
environment:
|
||||||
MYSQL_ALLOW_EMPTY_PASSWORD: yes
|
MYSQL_ALLOW_EMPTY_PASSWORD: yes
|
||||||
|
@ -258,32 +86,18 @@ kind: pipeline
|
||||||
name: test-mysql8
|
name: test-mysql8
|
||||||
depends_on:
|
depends_on:
|
||||||
- test-mysql
|
- test-mysql
|
||||||
- test-sqlite
|
trigger:
|
||||||
|
ref:
|
||||||
|
- refs/heads/master
|
||||||
|
- refs/pull/*/head
|
||||||
steps:
|
steps:
|
||||||
- name: restore-cache
|
|
||||||
image: meltwater/drone-cache
|
|
||||||
pull: always
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
restore: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: test-mysql8
|
- name: test-mysql8
|
||||||
image: golang:1.15
|
image: golang:1.15
|
||||||
|
pull: never
|
||||||
|
volumes:
|
||||||
|
- name: cache
|
||||||
|
path: /go/pkg/mod
|
||||||
environment:
|
environment:
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_MYSQL_HOST: mysql8
|
TEST_MYSQL_HOST: mysql8
|
||||||
TEST_MYSQL_CHARSET: utf8mb4
|
TEST_MYSQL_CHARSET: utf8mb4
|
||||||
TEST_MYSQL_DBNAME: xorm_test
|
TEST_MYSQL_DBNAME: xorm_test
|
||||||
|
@ -293,35 +107,14 @@ steps:
|
||||||
- make test-mysql
|
- make test-mysql
|
||||||
- TEST_CACHE_ENABLE=true make test-mysql
|
- TEST_CACHE_ENABLE=true make test-mysql
|
||||||
- TEST_QUOTE_POLICY=reserved make test-mysql
|
- TEST_QUOTE_POLICY=reserved make test-mysql
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: rebuild-cache
|
|
||||||
image: meltwater/drone-cache:dev
|
|
||||||
pull: true
|
|
||||||
depends_on:
|
|
||||||
- test-mysql8
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
rebuild: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
temp: {}
|
host:
|
||||||
|
path: /tmp/cache
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- name: mysql8
|
- name: mysql8
|
||||||
pull: default
|
|
||||||
image: mysql:8.0
|
image: mysql:8.0
|
||||||
environment:
|
environment:
|
||||||
MYSQL_ALLOW_EMPTY_PASSWORD: yes
|
MYSQL_ALLOW_EMPTY_PASSWORD: yes
|
||||||
|
@ -332,31 +125,18 @@ kind: pipeline
|
||||||
name: test-mariadb
|
name: test-mariadb
|
||||||
depends_on:
|
depends_on:
|
||||||
- test-mysql8
|
- test-mysql8
|
||||||
|
trigger:
|
||||||
|
ref:
|
||||||
|
- refs/heads/master
|
||||||
|
- refs/pull/*/head
|
||||||
steps:
|
steps:
|
||||||
- name: restore-cache
|
|
||||||
image: meltwater/drone-cache
|
|
||||||
pull: always
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
restore: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: test-mariadb
|
- name: test-mariadb
|
||||||
image: golang:1.15
|
image: golang:1.15
|
||||||
|
pull: never
|
||||||
|
volumes:
|
||||||
|
- name: cache
|
||||||
|
path: /go/pkg/mod
|
||||||
environment:
|
environment:
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_MYSQL_HOST: mariadb
|
TEST_MYSQL_HOST: mariadb
|
||||||
TEST_MYSQL_CHARSET: utf8mb4
|
TEST_MYSQL_CHARSET: utf8mb4
|
||||||
TEST_MYSQL_DBNAME: xorm_test
|
TEST_MYSQL_DBNAME: xorm_test
|
||||||
|
@ -366,35 +146,14 @@ steps:
|
||||||
- make test-mysql
|
- make test-mysql
|
||||||
- TEST_CACHE_ENABLE=true make test-mysql
|
- TEST_CACHE_ENABLE=true make test-mysql
|
||||||
- TEST_QUOTE_POLICY=reserved make test-mysql
|
- TEST_QUOTE_POLICY=reserved make test-mysql
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: rebuild-cache
|
|
||||||
image: meltwater/drone-cache:dev
|
|
||||||
depends_on:
|
|
||||||
- test-mariadb
|
|
||||||
pull: true
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
rebuild: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
temp: {}
|
host:
|
||||||
|
path: /tmp/cache
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- name: mariadb
|
- name: mariadb
|
||||||
pull: default
|
|
||||||
image: mariadb:10.4
|
image: mariadb:10.4
|
||||||
environment:
|
environment:
|
||||||
MYSQL_ALLOW_EMPTY_PASSWORD: yes
|
MYSQL_ALLOW_EMPTY_PASSWORD: yes
|
||||||
|
@ -405,32 +164,18 @@ kind: pipeline
|
||||||
name: test-postgres
|
name: test-postgres
|
||||||
depends_on:
|
depends_on:
|
||||||
- test-mariadb
|
- test-mariadb
|
||||||
|
trigger:
|
||||||
|
ref:
|
||||||
|
- refs/heads/master
|
||||||
|
- refs/pull/*/head
|
||||||
steps:
|
steps:
|
||||||
- name: restore-cache
|
- name: test-postgres
|
||||||
image: meltwater/drone-cache
|
pull: never
|
||||||
pull: always
|
image: golang:1.15
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
restore: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
path: /go
|
path: /go/pkg/mod
|
||||||
|
|
||||||
- name: test-postgres
|
|
||||||
pull: default
|
|
||||||
image: golang:1.15
|
|
||||||
environment:
|
environment:
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_PGSQL_HOST: pgsql
|
TEST_PGSQL_HOST: pgsql
|
||||||
TEST_PGSQL_DBNAME: xorm_test
|
TEST_PGSQL_DBNAME: xorm_test
|
||||||
TEST_PGSQL_USERNAME: postgres
|
TEST_PGSQL_USERNAME: postgres
|
||||||
|
@ -438,60 +183,31 @@ steps:
|
||||||
commands:
|
commands:
|
||||||
- make test-postgres
|
- make test-postgres
|
||||||
- TEST_CACHE_ENABLE=true make test-postgres
|
- TEST_CACHE_ENABLE=true make test-postgres
|
||||||
- TEST_QUOTE_POLICY=reserved make test-postgres
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: test-postgres-schema
|
- name: test-postgres-schema
|
||||||
pull: default
|
pull: never
|
||||||
image: golang:1.15
|
image: golang:1.15
|
||||||
|
volumes:
|
||||||
|
- name: cache
|
||||||
|
path: /go/pkg/mod
|
||||||
depends_on:
|
depends_on:
|
||||||
- test-postgres
|
- test-postgres
|
||||||
environment:
|
environment:
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_PGSQL_HOST: pgsql
|
TEST_PGSQL_HOST: pgsql
|
||||||
TEST_PGSQL_SCHEMA: xorm
|
TEST_PGSQL_SCHEMA: xorm
|
||||||
TEST_PGSQL_DBNAME: xorm_test
|
TEST_PGSQL_DBNAME: xorm_test
|
||||||
TEST_PGSQL_USERNAME: postgres
|
TEST_PGSQL_USERNAME: postgres
|
||||||
TEST_PGSQL_PASSWORD: postgres
|
TEST_PGSQL_PASSWORD: postgres
|
||||||
commands:
|
commands:
|
||||||
- make test-postgres
|
|
||||||
- TEST_CACHE_ENABLE=true make test-postgres
|
|
||||||
- TEST_QUOTE_POLICY=reserved make test-postgres
|
- TEST_QUOTE_POLICY=reserved make test-postgres
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: rebuild-cache
|
|
||||||
image: meltwater/drone-cache:dev
|
|
||||||
pull: true
|
|
||||||
depends_on:
|
|
||||||
- test-postgres-schema
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
rebuild: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
temp: {}
|
host:
|
||||||
|
path: /tmp/cache
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- name: pgsql
|
- name: pgsql
|
||||||
pull: default
|
|
||||||
image: postgres:9.5
|
image: postgres:9.5
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_DB: xorm_test
|
POSTGRES_DB: xorm_test
|
||||||
|
@ -503,32 +219,18 @@ kind: pipeline
|
||||||
name: test-mssql
|
name: test-mssql
|
||||||
depends_on:
|
depends_on:
|
||||||
- test-postgres
|
- test-postgres
|
||||||
|
trigger:
|
||||||
|
ref:
|
||||||
|
- refs/heads/master
|
||||||
|
- refs/pull/*/head
|
||||||
steps:
|
steps:
|
||||||
- name: restore-cache
|
- name: test-mssql
|
||||||
image: meltwater/drone-cache
|
pull: never
|
||||||
pull: always
|
image: golang:1.15
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
restore: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
path: /go
|
path: /go/pkg/mod
|
||||||
|
|
||||||
- name: test-mssql
|
|
||||||
pull: default
|
|
||||||
image: golang:1.15
|
|
||||||
environment:
|
environment:
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_MSSQL_HOST: mssql
|
TEST_MSSQL_HOST: mssql
|
||||||
TEST_MSSQL_DBNAME: xorm_test
|
TEST_MSSQL_DBNAME: xorm_test
|
||||||
TEST_MSSQL_USERNAME: sa
|
TEST_MSSQL_USERNAME: sa
|
||||||
|
@ -538,70 +240,38 @@ steps:
|
||||||
- TEST_CACHE_ENABLE=true make test-mssql
|
- TEST_CACHE_ENABLE=true make test-mssql
|
||||||
- TEST_QUOTE_POLICY=reserved make test-mssql
|
- TEST_QUOTE_POLICY=reserved make test-mssql
|
||||||
- TEST_MSSQL_DEFAULT_VARCHAR=NVARCHAR TEST_MSSQL_DEFAULT_CHAR=NCHAR make test-mssql
|
- TEST_MSSQL_DEFAULT_VARCHAR=NVARCHAR TEST_MSSQL_DEFAULT_CHAR=NCHAR make test-mssql
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: rebuild-cache
|
|
||||||
image: meltwater/drone-cache:dev
|
|
||||||
pull: true
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
rebuild: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
temp: {}
|
host:
|
||||||
|
path: /tmp/cache
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- name: mssql
|
- name: mssql
|
||||||
pull: default
|
pull: always
|
||||||
image: microsoft/mssql-server-linux:latest
|
image: mcr.microsoft.com/mssql/server:latest
|
||||||
environment:
|
environment:
|
||||||
ACCEPT_EULA: Y
|
ACCEPT_EULA: Y
|
||||||
SA_PASSWORD: yourStrong(!)Password
|
SA_PASSWORD: yourStrong(!)Password
|
||||||
MSSQL_PID: Developer
|
MSSQL_PID: Standard
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
name: test-tidb
|
name: test-tidb
|
||||||
depends_on:
|
depends_on:
|
||||||
- test-mssql
|
- test-mssql
|
||||||
|
trigger:
|
||||||
|
ref:
|
||||||
|
- refs/heads/master
|
||||||
|
- refs/pull/*/head
|
||||||
steps:
|
steps:
|
||||||
- name: restore-cache
|
- name: test-tidb
|
||||||
image: meltwater/drone-cache
|
pull: never
|
||||||
pull: always
|
image: golang:1.15
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
restore: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
path: /go
|
path: /go/pkg/mod
|
||||||
|
|
||||||
- name: test-tidb
|
|
||||||
pull: default
|
|
||||||
image: golang:1.15
|
|
||||||
environment:
|
environment:
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_TIDB_HOST: "tidb:4000"
|
TEST_TIDB_HOST: "tidb:4000"
|
||||||
TEST_TIDB_DBNAME: xorm_test
|
TEST_TIDB_DBNAME: xorm_test
|
||||||
TEST_TIDB_USERNAME: root
|
TEST_TIDB_USERNAME: root
|
||||||
|
@ -610,33 +280,14 @@ steps:
|
||||||
- make test-tidb
|
- make test-tidb
|
||||||
- TEST_CACHE_ENABLE=true make test-tidb
|
- TEST_CACHE_ENABLE=true make test-tidb
|
||||||
- TEST_QUOTE_POLICY=reserved make test-tidb
|
- TEST_QUOTE_POLICY=reserved make test-tidb
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: rebuild-cache
|
|
||||||
image: meltwater/drone-cache:dev
|
|
||||||
pull: true
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
rebuild: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
temp: {}
|
host:
|
||||||
|
path: /tmp/cache
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- name: tidb
|
- name: tidb
|
||||||
pull: default
|
|
||||||
image: pingcap/tidb:v3.0.3
|
image: pingcap/tidb:v3.0.3
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -644,32 +295,18 @@ kind: pipeline
|
||||||
name: test-cockroach
|
name: test-cockroach
|
||||||
depends_on:
|
depends_on:
|
||||||
- test-tidb
|
- test-tidb
|
||||||
|
trigger:
|
||||||
|
ref:
|
||||||
|
- refs/heads/master
|
||||||
|
- refs/pull/*/head
|
||||||
steps:
|
steps:
|
||||||
- name: restore-cache
|
- name: test-cockroach
|
||||||
image: meltwater/drone-cache
|
pull: never
|
||||||
pull: always
|
image: golang:1.15
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
restore: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
path: /go
|
path: /go/pkg/mod
|
||||||
|
|
||||||
- name: test-cockroach
|
|
||||||
pull: default
|
|
||||||
image: golang:1.15
|
|
||||||
environment:
|
environment:
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
GOMODCACHE: '/drone/src/pkg.mod'
|
|
||||||
GOCACHE: '/drone/src/pkg.build'
|
|
||||||
TEST_COCKROACH_HOST: "cockroach:26257"
|
TEST_COCKROACH_HOST: "cockroach:26257"
|
||||||
TEST_COCKROACH_DBNAME: xorm_test
|
TEST_COCKROACH_DBNAME: xorm_test
|
||||||
TEST_COCKROACH_USERNAME: root
|
TEST_COCKROACH_USERNAME: root
|
||||||
|
@ -678,33 +315,14 @@ steps:
|
||||||
- sleep 10
|
- sleep 10
|
||||||
- make test-cockroach
|
- make test-cockroach
|
||||||
- TEST_CACHE_ENABLE=true make test-cockroach
|
- TEST_CACHE_ENABLE=true make test-cockroach
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
- name: rebuild-cache
|
|
||||||
image: meltwater/drone-cache:dev
|
|
||||||
pull: true
|
|
||||||
settings:
|
|
||||||
backend: "filesystem"
|
|
||||||
rebuild: true
|
|
||||||
cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
|
|
||||||
archive_format: "gzip"
|
|
||||||
filesystem_cache_root: "/go"
|
|
||||||
mount:
|
|
||||||
- pkg.mod
|
|
||||||
- pkg.build
|
|
||||||
volumes:
|
|
||||||
- name: cache
|
|
||||||
path: /go
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: cache
|
- name: cache
|
||||||
temp: {}
|
host:
|
||||||
|
path: /tmp/cache
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- name: cockroach
|
- name: cockroach
|
||||||
pull: default
|
|
||||||
image: cockroachdb/cockroach:v19.2.4
|
image: cockroachdb/cockroach:v19.2.4
|
||||||
commands:
|
commands:
|
||||||
- /cockroach/cockroach start --insecure
|
- /cockroach/cockroach start --insecure
|
||||||
|
@ -713,8 +331,6 @@ services:
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
name: merge_coverage
|
name: merge_coverage
|
||||||
depends_on:
|
depends_on:
|
||||||
- testing
|
|
||||||
- test-sqlite
|
|
||||||
- test-mysql
|
- test-mysql
|
||||||
- test-mysql8
|
- test-mysql8
|
||||||
- test-mariadb
|
- test-mariadb
|
||||||
|
@ -722,18 +338,12 @@ depends_on:
|
||||||
- test-mssql
|
- test-mssql
|
||||||
- test-tidb
|
- test-tidb
|
||||||
- test-cockroach
|
- test-cockroach
|
||||||
|
trigger:
|
||||||
|
ref:
|
||||||
|
- refs/heads/master
|
||||||
|
- refs/pull/*/head
|
||||||
steps:
|
steps:
|
||||||
- name: merge_coverage
|
- name: merge_coverage
|
||||||
pull: default
|
|
||||||
image: golang:1.15
|
image: golang:1.15
|
||||||
environment:
|
|
||||||
GO111MODULE: "on"
|
|
||||||
GOPROXY: "https://goproxy.io"
|
|
||||||
commands:
|
commands:
|
||||||
- make coverage
|
- make coverage
|
||||||
when:
|
|
||||||
branch:
|
|
||||||
- master
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
- pull_request
|
|
||||||
|
|
|
@ -36,4 +36,5 @@ test.db.sql
|
||||||
*coverage.out
|
*coverage.out
|
||||||
test.db
|
test.db
|
||||||
integrations/*.sql
|
integrations/*.sql
|
||||||
integrations/test_sqlite*
|
integrations/test_sqlite*
|
||||||
|
cover.out
|
|
@ -8,20 +8,22 @@ warningCode = 1
|
||||||
[rule.context-as-argument]
|
[rule.context-as-argument]
|
||||||
[rule.context-keys-type]
|
[rule.context-keys-type]
|
||||||
[rule.dot-imports]
|
[rule.dot-imports]
|
||||||
|
[rule.empty-lines]
|
||||||
|
[rule.errorf]
|
||||||
[rule.error-return]
|
[rule.error-return]
|
||||||
[rule.error-strings]
|
[rule.error-strings]
|
||||||
[rule.error-naming]
|
[rule.error-naming]
|
||||||
[rule.exported]
|
[rule.exported]
|
||||||
[rule.if-return]
|
[rule.if-return]
|
||||||
[rule.increment-decrement]
|
[rule.increment-decrement]
|
||||||
[rule.var-naming]
|
[rule.indent-error-flow]
|
||||||
arguments = [["ID", "UID", "UUID", "URL", "JSON"], []]
|
|
||||||
[rule.var-declaration]
|
|
||||||
[rule.package-comments]
|
[rule.package-comments]
|
||||||
[rule.range]
|
[rule.range]
|
||||||
[rule.receiver-naming]
|
[rule.receiver-naming]
|
||||||
|
[rule.struct-tag]
|
||||||
[rule.time-naming]
|
[rule.time-naming]
|
||||||
[rule.unexported-return]
|
[rule.unexported-return]
|
||||||
[rule.indent-error-flow]
|
[rule.unnecessary-stmt]
|
||||||
[rule.errorf]
|
[rule.var-declaration]
|
||||||
[rule.struct-tag]
|
[rule.var-naming]
|
||||||
|
arguments = [["ID", "UID", "UUID", "URL", "JSON"], []]
|
|
@ -3,6 +3,36 @@
|
||||||
This changelog goes through all the changes that have been made in each release
|
This changelog goes through all the changes that have been made in each release
|
||||||
without substantial changes to our git log.
|
without substantial changes to our git log.
|
||||||
|
|
||||||
|
## [1.1.1](https://gitea.com/xorm/xorm/releases/tag/1.1.1) - 2021-07-03
|
||||||
|
|
||||||
|
* BUGFIXES
|
||||||
|
* Ignore comments when deciding when to replace question marks. #1954 (#1955)
|
||||||
|
* Fix bug didn't reset statement on update (#1939)
|
||||||
|
* Fix create table with struct missing columns (#1938)
|
||||||
|
* Fix #929 (#1936)
|
||||||
|
* Fix exist (#1921)
|
||||||
|
* ENHANCEMENTS
|
||||||
|
* Improve get field value of bean (#1961)
|
||||||
|
* refactor splitTag function (#1960)
|
||||||
|
* Fix #1663 (#1952)
|
||||||
|
* fix pg GetColumns missing comment (#1949)
|
||||||
|
* Support build flag jsoniter to replace default json (#1916)
|
||||||
|
* refactor exprParam (#1825)
|
||||||
|
* Add DBVersion (#1723)
|
||||||
|
* TESTING
|
||||||
|
* Add test to confirm #1247 resolved (#1951)
|
||||||
|
* Add test for dump table with default value (#1950)
|
||||||
|
* Test for #1486 (#1942)
|
||||||
|
* Add sync tests to confirm #539 is gone (#1937)
|
||||||
|
* test for unsigned int32 (#1923)
|
||||||
|
* Add tests for array store (#1922)
|
||||||
|
* BUILD
|
||||||
|
* Remove mymysql from ci (#1928)
|
||||||
|
* MISC
|
||||||
|
* fix lint (#1953)
|
||||||
|
* Compitable with cockroach (#1930)
|
||||||
|
* Replace goracle with godror (#1914)
|
||||||
|
|
||||||
## [1.1.0](https://gitea.com/xorm/xorm/releases/tag/1.1.0) - 2021-05-14
|
## [1.1.0](https://gitea.com/xorm/xorm/releases/tag/1.1.0) - 2021-05-14
|
||||||
|
|
||||||
* FEATURES
|
* FEATURES
|
||||||
|
|
|
@ -175,7 +175,10 @@ func convertAssign(dest, src interface{}) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
dpv := reflect.ValueOf(dest)
|
return convertAssignV(reflect.ValueOf(dest), src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertAssignV(dpv reflect.Value, src interface{}) error {
|
||||||
if dpv.Kind() != reflect.Ptr {
|
if dpv.Kind() != reflect.Ptr {
|
||||||
return errors.New("destination not a pointer")
|
return errors.New("destination not a pointer")
|
||||||
}
|
}
|
||||||
|
@ -183,9 +186,7 @@ func convertAssign(dest, src interface{}) error {
|
||||||
return errNilPtr
|
return errNilPtr
|
||||||
}
|
}
|
||||||
|
|
||||||
if !sv.IsValid() {
|
var sv = reflect.ValueOf(src)
|
||||||
sv = reflect.ValueOf(src)
|
|
||||||
}
|
|
||||||
|
|
||||||
dv := reflect.Indirect(dpv)
|
dv := reflect.Indirect(dpv)
|
||||||
if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) {
|
if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) {
|
||||||
|
@ -244,7 +245,7 @@ func convertAssign(dest, src interface{}) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest)
|
return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dpv.Interface())
|
||||||
}
|
}
|
||||||
|
|
||||||
func asKind(vv reflect.Value, tp reflect.Type) (interface{}, error) {
|
func asKind(vv reflect.Value, tp reflect.Type) (interface{}, error) {
|
||||||
|
@ -375,48 +376,3 @@ func str2PK(s string, tp reflect.Type) (interface{}, error) {
|
||||||
}
|
}
|
||||||
return v.Interface(), nil
|
return v.Interface(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func int64ToIntValue(id int64, tp reflect.Type) reflect.Value {
|
|
||||||
var v interface{}
|
|
||||||
kind := tp.Kind()
|
|
||||||
|
|
||||||
if kind == reflect.Ptr {
|
|
||||||
kind = tp.Elem().Kind()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch kind {
|
|
||||||
case reflect.Int16:
|
|
||||||
temp := int16(id)
|
|
||||||
v = &temp
|
|
||||||
case reflect.Int32:
|
|
||||||
temp := int32(id)
|
|
||||||
v = &temp
|
|
||||||
case reflect.Int:
|
|
||||||
temp := int(id)
|
|
||||||
v = &temp
|
|
||||||
case reflect.Int64:
|
|
||||||
temp := id
|
|
||||||
v = &temp
|
|
||||||
case reflect.Uint16:
|
|
||||||
temp := uint16(id)
|
|
||||||
v = &temp
|
|
||||||
case reflect.Uint32:
|
|
||||||
temp := uint32(id)
|
|
||||||
v = &temp
|
|
||||||
case reflect.Uint64:
|
|
||||||
temp := uint64(id)
|
|
||||||
v = &temp
|
|
||||||
case reflect.Uint:
|
|
||||||
temp := uint(id)
|
|
||||||
v = &temp
|
|
||||||
}
|
|
||||||
|
|
||||||
if tp.Kind() == reflect.Ptr {
|
|
||||||
return reflect.ValueOf(v).Convert(tp)
|
|
||||||
}
|
|
||||||
return reflect.ValueOf(v).Elem().Convert(tp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func int64ToInt(id int64, tp reflect.Type) interface{} {
|
|
||||||
return int64ToIntValue(id, tp).Interface()
|
|
||||||
}
|
|
||||||
|
|
|
@ -44,6 +44,7 @@ type Dialect interface {
|
||||||
URI() *URI
|
URI() *URI
|
||||||
SQLType(*schemas.Column) string
|
SQLType(*schemas.Column) string
|
||||||
FormatBytes(b []byte) string
|
FormatBytes(b []byte) string
|
||||||
|
Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error)
|
||||||
|
|
||||||
IsReserved(string) bool
|
IsReserved(string) bool
|
||||||
Quoter() schemas.Quoter
|
Quoter() schemas.Quoter
|
||||||
|
@ -217,7 +218,7 @@ func regDrvsNDialects() bool {
|
||||||
"sqlite3": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }},
|
"sqlite3": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }},
|
||||||
"sqlite": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }},
|
"sqlite": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }},
|
||||||
"oci8": {"oracle", func() Driver { return &oci8Driver{} }, func() Dialect { return &oracle{} }},
|
"oci8": {"oracle", func() Driver { return &oci8Driver{} }, func() Dialect { return &oracle{} }},
|
||||||
"goracle": {"oracle", func() Driver { return &goracleDriver{} }, func() Dialect { return &oracle{} }},
|
"godror": {"oracle", func() Driver { return &godrorDriver{} }, func() Dialect { return &oracle{} }},
|
||||||
}
|
}
|
||||||
|
|
||||||
for driverName, v := range providedDrvsNDialects {
|
for driverName, v := range providedDrvsNDialects {
|
||||||
|
|
|
@ -23,13 +23,45 @@ type SeqFilter struct {
|
||||||
func convertQuestionMark(sql, prefix string, start int) string {
|
func convertQuestionMark(sql, prefix string, start int) string {
|
||||||
var buf strings.Builder
|
var buf strings.Builder
|
||||||
var beginSingleQuote bool
|
var beginSingleQuote bool
|
||||||
|
var isLineComment bool
|
||||||
|
var isComment bool
|
||||||
|
var isMaybeLineComment bool
|
||||||
|
var isMaybeComment bool
|
||||||
|
var isMaybeCommentEnd bool
|
||||||
var index = start
|
var index = start
|
||||||
for _, c := range sql {
|
for _, c := range sql {
|
||||||
if !beginSingleQuote && c == '?' {
|
if !beginSingleQuote && !isLineComment && !isComment && c == '?' {
|
||||||
buf.WriteString(fmt.Sprintf("%s%v", prefix, index))
|
buf.WriteString(fmt.Sprintf("%s%v", prefix, index))
|
||||||
index++
|
index++
|
||||||
} else {
|
} else {
|
||||||
if c == '\'' {
|
if isMaybeLineComment {
|
||||||
|
if c == '-' {
|
||||||
|
isLineComment = true
|
||||||
|
}
|
||||||
|
isMaybeLineComment = false
|
||||||
|
} else if isMaybeComment {
|
||||||
|
if c == '*' {
|
||||||
|
isComment = true
|
||||||
|
}
|
||||||
|
isMaybeComment = false
|
||||||
|
} else if isMaybeCommentEnd {
|
||||||
|
if c == '/' {
|
||||||
|
isComment = false
|
||||||
|
}
|
||||||
|
isMaybeCommentEnd = false
|
||||||
|
} else if isLineComment {
|
||||||
|
if c == '\n' {
|
||||||
|
isLineComment = false
|
||||||
|
}
|
||||||
|
} else if isComment {
|
||||||
|
if c == '*' {
|
||||||
|
isMaybeCommentEnd = true
|
||||||
|
}
|
||||||
|
} else if !beginSingleQuote && c == '-' {
|
||||||
|
isMaybeLineComment = true
|
||||||
|
} else if !beginSingleQuote && c == '/' {
|
||||||
|
isMaybeComment = true
|
||||||
|
} else if c == '\'' {
|
||||||
beginSingleQuote = !beginSingleQuote
|
beginSingleQuote = !beginSingleQuote
|
||||||
}
|
}
|
||||||
buf.WriteRune(c)
|
buf.WriteRune(c)
|
||||||
|
|
|
@ -253,6 +253,31 @@ func (db *mssql) SetParams(params map[string]string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *mssql) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
|
||||||
|
rows, err := queryer.QueryContext(ctx,
|
||||||
|
"SELECT SERVERPROPERTY('productversion'), SERVERPROPERTY ('productlevel') AS ProductLevel, SERVERPROPERTY ('edition') AS ProductEdition")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var version, level, edition string
|
||||||
|
if !rows.Next() {
|
||||||
|
return nil, errors.New("unknow version")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(&version, &level, &edition); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MSSQL: Microsoft SQL Server 2017 (RTM-CU13) (KB4466404) - 14.0.3048.4 (X64) Nov 30 2018 12:57:58 Copyright (C) 2017 Microsoft Corporation Developer Edition (64-bit) on Linux (Ubuntu 16.04.5 LTS)
|
||||||
|
return &schemas.Version{
|
||||||
|
Number: version,
|
||||||
|
Level: level,
|
||||||
|
Edition: edition,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (db *mssql) SQLType(c *schemas.Column) string {
|
func (db *mssql) SQLType(c *schemas.Column) string {
|
||||||
var res string
|
var res string
|
||||||
switch t := c.SQLType.Name; t {
|
switch t := c.SQLType.Name; t {
|
||||||
|
@ -284,7 +309,7 @@ func (db *mssql) SQLType(c *schemas.Column) string {
|
||||||
case schemas.TimeStampz:
|
case schemas.TimeStampz:
|
||||||
res = "DATETIMEOFFSET"
|
res = "DATETIMEOFFSET"
|
||||||
c.Length = 7
|
c.Length = 7
|
||||||
case schemas.MediumInt, schemas.UnsignedInt:
|
case schemas.MediumInt:
|
||||||
res = schemas.Int
|
res = schemas.Int
|
||||||
case schemas.Text, schemas.MediumText, schemas.TinyText, schemas.LongText, schemas.Json:
|
case schemas.Text, schemas.MediumText, schemas.TinyText, schemas.LongText, schemas.Json:
|
||||||
res = db.defaultVarchar + "(MAX)"
|
res = db.defaultVarchar + "(MAX)"
|
||||||
|
@ -296,7 +321,7 @@ func (db *mssql) SQLType(c *schemas.Column) string {
|
||||||
case schemas.TinyInt:
|
case schemas.TinyInt:
|
||||||
res = schemas.TinyInt
|
res = schemas.TinyInt
|
||||||
c.Length = 0
|
c.Length = 0
|
||||||
case schemas.BigInt, schemas.UnsignedBigInt:
|
case schemas.BigInt, schemas.UnsignedBigInt, schemas.UnsignedInt:
|
||||||
res = schemas.BigInt
|
res = schemas.BigInt
|
||||||
c.Length = 0
|
c.Length = 0
|
||||||
case schemas.NVarchar:
|
case schemas.NVarchar:
|
||||||
|
|
|
@ -188,6 +188,43 @@ func (db *mysql) Init(uri *URI) error {
|
||||||
return db.Base.Init(db, uri)
|
return db.Base.Init(db, uri)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *mysql) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
|
||||||
|
rows, err := queryer.QueryContext(ctx, "SELECT @@VERSION")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var version string
|
||||||
|
if !rows.Next() {
|
||||||
|
return nil, errors.New("Unknow version")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(&version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := strings.Split(version, "-")
|
||||||
|
if len(fields) == 3 && fields[1] == "TiDB" {
|
||||||
|
// 5.7.25-TiDB-v3.0.3
|
||||||
|
return &schemas.Version{
|
||||||
|
Number: strings.TrimPrefix(fields[2], "v"),
|
||||||
|
Level: fields[0],
|
||||||
|
Edition: fields[1],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var edition string
|
||||||
|
if len(fields) == 2 {
|
||||||
|
edition = fields[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return &schemas.Version{
|
||||||
|
Number: fields[0],
|
||||||
|
Edition: edition,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (db *mysql) SetParams(params map[string]string) {
|
func (db *mysql) SetParams(params map[string]string) {
|
||||||
rowFormat, ok := params["rowFormat"]
|
rowFormat, ok := params["rowFormat"]
|
||||||
if ok {
|
if ok {
|
||||||
|
|
|
@ -515,6 +515,26 @@ func (db *oracle) Init(uri *URI) error {
|
||||||
return db.Base.Init(db, uri)
|
return db.Base.Init(db, uri)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *oracle) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
|
||||||
|
rows, err := queryer.QueryContext(ctx, "select * from v$version where banner like 'Oracle%'")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var version string
|
||||||
|
if !rows.Next() {
|
||||||
|
return nil, errors.New("unknow version")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(&version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &schemas.Version{
|
||||||
|
Number: version,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (db *oracle) SQLType(c *schemas.Column) string {
|
func (db *oracle) SQLType(c *schemas.Column) string {
|
||||||
var res string
|
var res string
|
||||||
switch t := c.SQLType.Name; t {
|
switch t := c.SQLType.Name; t {
|
||||||
|
@ -802,10 +822,10 @@ func (db *oracle) Filters() []Filter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type goracleDriver struct {
|
type godrorDriver struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *goracleDriver) Parse(driverName, dataSourceName string) (*URI, error) {
|
func (cfg *godrorDriver) Parse(driverName, dataSourceName string) (*URI, error) {
|
||||||
db := &URI{DBType: schemas.ORACLE}
|
db := &URI{DBType: schemas.ORACLE}
|
||||||
dsnPattern := regexp.MustCompile(
|
dsnPattern := regexp.MustCompile(
|
||||||
`^(?:(?P<user>.*?)(?::(?P<passwd>.*))?@)?` + // [user[:password]@]
|
`^(?:(?P<user>.*?)(?::(?P<passwd>.*))?@)?` + // [user[:password]@]
|
||||||
|
|
|
@ -788,6 +788,42 @@ func (db *postgres) Init(uri *URI) error {
|
||||||
return db.Base.Init(db, uri)
|
return db.Base.Init(db, uri)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *postgres) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
|
||||||
|
rows, err := queryer.QueryContext(ctx, "SELECT version()")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var version string
|
||||||
|
if !rows.Next() {
|
||||||
|
return nil, errors.New("Unknow version")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(&version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Postgres: 9.5.22 on x86_64-pc-linux-gnu (Debian 9.5.22-1.pgdg90+1), compiled by gcc (Debian 6.3.0-18+deb9u1) 6.3.0 20170516, 64-bit
|
||||||
|
// CockroachDB CCL v19.2.4 (x86_64-unknown-linux-gnu, built
|
||||||
|
if strings.HasPrefix(version, "CockroachDB") {
|
||||||
|
versions := strings.Split(strings.TrimPrefix(version, "CockroachDB CCL "), " ")
|
||||||
|
return &schemas.Version{
|
||||||
|
Number: strings.TrimPrefix(versions[0], "v"),
|
||||||
|
Edition: "CockroachDB",
|
||||||
|
}, nil
|
||||||
|
} else if strings.HasPrefix(version, "PostgreSQL") {
|
||||||
|
versions := strings.Split(strings.TrimPrefix(version, "PostgreSQL "), " on ")
|
||||||
|
return &schemas.Version{
|
||||||
|
Number: versions[0],
|
||||||
|
Level: versions[1],
|
||||||
|
Edition: "PostgreSQL",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("unknow database version")
|
||||||
|
}
|
||||||
|
|
||||||
func (db *postgres) getSchema() string {
|
func (db *postgres) getSchema() string {
|
||||||
if db.uri.Schema != "" {
|
if db.uri.Schema != "" {
|
||||||
return db.uri.Schema
|
return db.uri.Schema
|
||||||
|
@ -838,12 +874,12 @@ func (db *postgres) SQLType(c *schemas.Column) string {
|
||||||
case schemas.Bit:
|
case schemas.Bit:
|
||||||
res = schemas.Boolean
|
res = schemas.Boolean
|
||||||
return res
|
return res
|
||||||
case schemas.MediumInt, schemas.Int, schemas.Integer, schemas.UnsignedInt:
|
case schemas.MediumInt, schemas.Int, schemas.Integer:
|
||||||
if c.IsAutoIncrement {
|
if c.IsAutoIncrement {
|
||||||
return schemas.Serial
|
return schemas.Serial
|
||||||
}
|
}
|
||||||
return schemas.Integer
|
return schemas.Integer
|
||||||
case schemas.BigInt, schemas.UnsignedBigInt:
|
case schemas.BigInt, schemas.UnsignedBigInt, schemas.UnsignedInt:
|
||||||
if c.IsAutoIncrement {
|
if c.IsAutoIncrement {
|
||||||
return schemas.BigSerial
|
return schemas.BigSerial
|
||||||
}
|
}
|
||||||
|
@ -1008,12 +1044,13 @@ func (db *postgres) IsColumnExist(queryer core.Queryer, ctx context.Context, tab
|
||||||
|
|
||||||
func (db *postgres) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) {
|
func (db *postgres) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) {
|
||||||
args := []interface{}{tableName}
|
args := []interface{}{tableName}
|
||||||
s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length,
|
s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length, description,
|
||||||
CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey,
|
CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey,
|
||||||
CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey
|
CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey
|
||||||
FROM pg_attribute f
|
FROM pg_attribute f
|
||||||
JOIN pg_class c ON c.oid = f.attrelid JOIN pg_type t ON t.oid = f.atttypid
|
JOIN pg_class c ON c.oid = f.attrelid JOIN pg_type t ON t.oid = f.atttypid
|
||||||
LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
|
LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
|
||||||
|
LEFT JOIN pg_description de ON f.attrelid=de.objoid AND f.attnum=de.objsubid
|
||||||
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||||
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
|
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
|
||||||
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
|
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
|
||||||
|
@ -1042,9 +1079,9 @@ WHERE n.nspname= s.table_schema AND c.relkind = 'r'::char AND c.relname = $1%s A
|
||||||
col.Indexes = make(map[string]int)
|
col.Indexes = make(map[string]int)
|
||||||
|
|
||||||
var colName, isNullable, dataType string
|
var colName, isNullable, dataType string
|
||||||
var maxLenStr, colDefault *string
|
var maxLenStr, colDefault, description *string
|
||||||
var isPK, isUnique bool
|
var isPK, isUnique bool
|
||||||
err = rows.Scan(&colName, &colDefault, &isNullable, &dataType, &maxLenStr, &isPK, &isUnique)
|
err = rows.Scan(&colName, &colDefault, &isNullable, &dataType, &maxLenStr, &description, &isPK, &isUnique)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -1090,6 +1127,10 @@ WHERE n.nspname= s.table_schema AND c.relkind = 'r'::char AND c.relname = $1%s A
|
||||||
col.DefaultIsEmpty = true
|
col.DefaultIsEmpty = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if description != nil {
|
||||||
|
col.Comment = *description
|
||||||
|
}
|
||||||
|
|
||||||
if isPK {
|
if isPK {
|
||||||
col.IsPrimaryKey = true
|
col.IsPrimaryKey = true
|
||||||
}
|
}
|
||||||
|
@ -1221,7 +1262,8 @@ func (db *postgres) GetIndexes(queryer core.Queryer, ctx context.Context, tableN
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
indexName = strings.Trim(indexName, `" `)
|
indexName = strings.Trim(indexName, `" `)
|
||||||
if strings.HasSuffix(indexName, "_pkey") {
|
// ignore primary index
|
||||||
|
if strings.HasSuffix(indexName, "_pkey") || strings.EqualFold(indexName, "primary") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(indexdef, "CREATE UNIQUE INDEX") {
|
if strings.HasPrefix(indexdef, "CREATE UNIQUE INDEX") {
|
||||||
|
@ -1241,7 +1283,9 @@ func (db *postgres) GetIndexes(queryer core.Queryer, ctx context.Context, tableN
|
||||||
|
|
||||||
index := &schemas.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)}
|
index := &schemas.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)}
|
||||||
for _, colName := range colNames {
|
for _, colName := range colNames {
|
||||||
index.Cols = append(index.Cols, strings.TrimSpace(strings.Replace(colName, `"`, "", -1)))
|
col := strings.TrimSpace(strings.Replace(colName, `"`, "", -1))
|
||||||
|
fields := strings.Split(col, " ")
|
||||||
|
index.Cols = append(index.Cols, fields[0])
|
||||||
}
|
}
|
||||||
index.IsRegular = isRegular
|
index.IsRegular = isRegular
|
||||||
indexes[index.Name] = index
|
indexes[index.Name] = index
|
||||||
|
|
|
@ -160,6 +160,27 @@ func (db *sqlite3) Init(uri *URI) error {
|
||||||
return db.Base.Init(db, uri)
|
return db.Base.Init(db, uri)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *sqlite3) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
|
||||||
|
rows, err := queryer.QueryContext(ctx, "SELECT sqlite_version()")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var version string
|
||||||
|
if !rows.Next() {
|
||||||
|
return nil, errors.New("Unknow version")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(&version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &schemas.Version{
|
||||||
|
Number: version,
|
||||||
|
Edition: "sqlite",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (db *sqlite3) SetQuotePolicy(quotePolicy QuotePolicy) {
|
func (db *sqlite3) SetQuotePolicy(quotePolicy QuotePolicy) {
|
||||||
switch quotePolicy {
|
switch quotePolicy {
|
||||||
case QuotePolicyNone:
|
case QuotePolicyNone:
|
||||||
|
|
|
@ -444,7 +444,7 @@ func (engine *Engine) DumpTables(tables []*schemas.Table, w io.Writer, tp ...sch
|
||||||
return engine.dumpTables(tables, w, tp...)
|
return engine.dumpTables(tables, w, tp...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatColumnValue(dstDialect dialects.Dialect, d interface{}, col *schemas.Column) string {
|
func formatColumnValue(dbLocation *time.Location, dstDialect dialects.Dialect, d interface{}, col *schemas.Column) string {
|
||||||
if d == nil {
|
if d == nil {
|
||||||
return "NULL"
|
return "NULL"
|
||||||
}
|
}
|
||||||
|
@ -473,10 +473,8 @@ func formatColumnValue(dstDialect dialects.Dialect, d interface{}, col *schemas.
|
||||||
|
|
||||||
return "'" + strings.Replace(v, "'", "''", -1) + "'"
|
return "'" + strings.Replace(v, "'", "''", -1) + "'"
|
||||||
} else if col.SQLType.IsTime() {
|
} else if col.SQLType.IsTime() {
|
||||||
if dstDialect.URI().DBType == schemas.MSSQL && col.SQLType.Name == schemas.DateTime {
|
if t, ok := d.(time.Time); ok {
|
||||||
if t, ok := d.(time.Time); ok {
|
return "'" + t.In(dbLocation).Format("2006-01-02 15:04:05") + "'"
|
||||||
return "'" + t.UTC().Format("2006-01-02 15:04:05") + "'"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
var v = fmt.Sprintf("%s", d)
|
var v = fmt.Sprintf("%s", d)
|
||||||
if strings.HasSuffix(v, " +0000 UTC") {
|
if strings.HasSuffix(v, " +0000 UTC") {
|
||||||
|
@ -652,12 +650,8 @@ func (engine *Engine) dumpTables(tables []*schemas.Table, w io.Writer, tp ...sch
|
||||||
return errors.New("unknown column error")
|
return errors.New("unknown column error")
|
||||||
}
|
}
|
||||||
|
|
||||||
fields := strings.Split(col.FieldName, ".")
|
field := dataStruct.FieldByIndex(col.FieldIndex)
|
||||||
field := dataStruct
|
temp += "," + formatColumnValue(engine.DatabaseTZ, dstDialect, field.Interface(), col)
|
||||||
for _, fieldName := range fields {
|
|
||||||
field = field.FieldByName(fieldName)
|
|
||||||
}
|
|
||||||
temp += "," + formatColumnValue(dstDialect, field.Interface(), col)
|
|
||||||
}
|
}
|
||||||
_, err = io.WriteString(w, temp[1:]+");\n")
|
_, err = io.WriteString(w, temp[1:]+");\n")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -684,7 +678,7 @@ func (engine *Engine) dumpTables(tables []*schemas.Table, w io.Writer, tp ...sch
|
||||||
return errors.New("unknow column error")
|
return errors.New("unknow column error")
|
||||||
}
|
}
|
||||||
|
|
||||||
temp += "," + formatColumnValue(dstDialect, d, col)
|
temp += "," + formatColumnValue(engine.DatabaseTZ, dstDialect, d, col)
|
||||||
}
|
}
|
||||||
_, err = io.WriteString(w, temp[1:]+");\n")
|
_, err = io.WriteString(w, temp[1:]+");\n")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -925,15 +919,9 @@ func (engine *Engine) Having(conditions string) *Session {
|
||||||
return session.Having(conditions)
|
return session.Having(conditions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Table table struct
|
// DBVersion returns the database version
|
||||||
type Table struct {
|
func (engine *Engine) DBVersion() (*schemas.Version, error) {
|
||||||
*schemas.Table
|
return engine.dialect.Version(engine.defaultContext, engine.db)
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid if table is valid
|
|
||||||
func (t *Table) IsValid() bool {
|
|
||||||
return t.Table != nil && len(t.Name) > 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TableInfo get table info according to bean's content
|
// TableInfo get table info according to bean's content
|
||||||
|
|
|
@ -5,6 +5,7 @@ go 1.13
|
||||||
require (
|
require (
|
||||||
github.com/denisenkom/go-mssqldb v0.9.0
|
github.com/denisenkom/go-mssqldb v0.9.0
|
||||||
github.com/go-sql-driver/mysql v1.5.0
|
github.com/go-sql-driver/mysql v1.5.0
|
||||||
|
github.com/json-iterator/go v1.1.11
|
||||||
github.com/lib/pq v1.7.0
|
github.com/lib/pq v1.7.0
|
||||||
github.com/mattn/go-sqlite3 v1.14.6
|
github.com/mattn/go-sqlite3 v1.14.6
|
||||||
github.com/stretchr/testify v1.4.0
|
github.com/stretchr/testify v1.4.0
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s=
|
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s=
|
||||||
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU=
|
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU=
|
||||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/denisenkom/go-mssqldb v0.9.0 h1:RSohk2RsiZqLZ0zCjtfn3S4Gp4exhpBWHyQ7D0yGjAk=
|
github.com/denisenkom/go-mssqldb v0.9.0 h1:RSohk2RsiZqLZ0zCjtfn3S4Gp4exhpBWHyQ7D0yGjAk=
|
||||||
github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||||
|
@ -18,8 +19,11 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pO
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
|
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
|
||||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||||
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||||
github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
|
github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
|
||||||
|
@ -28,6 +32,10 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX
|
||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
|
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
|
||||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
|
|
@ -83,6 +83,7 @@ type EngineInterface interface {
|
||||||
Context(context.Context) *Session
|
Context(context.Context) *Session
|
||||||
CreateTables(...interface{}) error
|
CreateTables(...interface{}) error
|
||||||
DBMetas() ([]*schemas.Table, error)
|
DBMetas() ([]*schemas.Table, error)
|
||||||
|
DBVersion() (*schemas.Version, error)
|
||||||
Dialect() dialects.Dialect
|
Dialect() dialects.Dialect
|
||||||
DriverName() string
|
DriverName() string
|
||||||
DropTables(...interface{}) error
|
DropTables(...interface{}) error
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
// Copyright 2021 The Xorm Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build jsoniter
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
DefaultJSONHandler = JSONiter{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONiter implements JSONInterface via jsoniter
|
||||||
|
type JSONiter struct{}
|
||||||
|
|
||||||
|
// Marshal implements JSONInterface
|
||||||
|
func (JSONiter) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
return jsoniter.Marshal(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal implements JSONInterface
|
||||||
|
func (JSONiter) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return jsoniter.Unmarshal(data, v)
|
||||||
|
}
|
|
@ -0,0 +1,94 @@
|
||||||
|
// Copyright 2019 The Xorm Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package statements
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"xorm.io/builder"
|
||||||
|
"xorm.io/xorm/schemas"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrUnsupportedExprType represents an error with unsupported express type
|
||||||
|
type ErrUnsupportedExprType struct {
|
||||||
|
tp string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrUnsupportedExprType) Error() string {
|
||||||
|
return fmt.Sprintf("Unsupported expression type: %v", err.tp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expr represents an SQL express
|
||||||
|
type Expr struct {
|
||||||
|
ColName string
|
||||||
|
Arg interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteArgs writes args to the writer
|
||||||
|
func (expr *Expr) WriteArgs(w *builder.BytesWriter) error {
|
||||||
|
switch arg := expr.Arg.(type) {
|
||||||
|
case *builder.Builder:
|
||||||
|
if _, err := w.WriteString("("); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := arg.WriteTo(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString(")"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case string:
|
||||||
|
if arg == "" {
|
||||||
|
arg = "''"
|
||||||
|
}
|
||||||
|
if _, err := w.WriteString(fmt.Sprintf("%v", arg)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if _, err := w.WriteString("?"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.Append(arg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type exprParams []Expr
|
||||||
|
|
||||||
|
func (exprs exprParams) ColNames() []string {
|
||||||
|
var cols = make([]string, 0, len(exprs))
|
||||||
|
for _, expr := range exprs {
|
||||||
|
cols = append(cols, expr.ColName)
|
||||||
|
}
|
||||||
|
return cols
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exprs *exprParams) Add(name string, arg interface{}) {
|
||||||
|
*exprs = append(*exprs, Expr{name, arg})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exprs exprParams) IsColExist(colName string) bool {
|
||||||
|
for _, expr := range exprs {
|
||||||
|
if strings.EqualFold(schemas.CommonQuoter.Trim(expr.ColName), schemas.CommonQuoter.Trim(colName)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exprs exprParams) WriteArgs(w *builder.BytesWriter) error {
|
||||||
|
for i, expr := range exprs {
|
||||||
|
if err := expr.WriteArgs(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if i != len(exprs)-1 {
|
||||||
|
if _, err := w.WriteString(","); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,127 +0,0 @@
|
||||||
// Copyright 2019 The Xorm Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package statements
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"xorm.io/builder"
|
|
||||||
"xorm.io/xorm/schemas"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrUnsupportedExprType represents an error with unsupported express type
|
|
||||||
type ErrUnsupportedExprType struct {
|
|
||||||
tp string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (err ErrUnsupportedExprType) Error() string {
|
|
||||||
return fmt.Sprintf("Unsupported expression type: %v", err.tp)
|
|
||||||
}
|
|
||||||
|
|
||||||
type exprParam struct {
|
|
||||||
colName string
|
|
||||||
arg interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type exprParams struct {
|
|
||||||
ColNames []string
|
|
||||||
Args []interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (exprs *exprParams) Len() int {
|
|
||||||
return len(exprs.ColNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (exprs *exprParams) addParam(colName string, arg interface{}) {
|
|
||||||
exprs.ColNames = append(exprs.ColNames, colName)
|
|
||||||
exprs.Args = append(exprs.Args, arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (exprs *exprParams) IsColExist(colName string) bool {
|
|
||||||
for _, name := range exprs.ColNames {
|
|
||||||
if strings.EqualFold(schemas.CommonQuoter.Trim(name), schemas.CommonQuoter.Trim(colName)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (exprs *exprParams) getByName(colName string) (exprParam, bool) {
|
|
||||||
for i, name := range exprs.ColNames {
|
|
||||||
if strings.EqualFold(name, colName) {
|
|
||||||
return exprParam{name, exprs.Args[i]}, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return exprParam{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (exprs *exprParams) WriteArgs(w *builder.BytesWriter) error {
|
|
||||||
for i, expr := range exprs.Args {
|
|
||||||
switch arg := expr.(type) {
|
|
||||||
case *builder.Builder:
|
|
||||||
if _, err := w.WriteString("("); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := arg.WriteTo(w); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(")"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case string:
|
|
||||||
if arg == "" {
|
|
||||||
arg = "''"
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(fmt.Sprintf("%v", arg)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if _, err := w.WriteString("?"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.Append(arg)
|
|
||||||
}
|
|
||||||
if i != len(exprs.Args)-1 {
|
|
||||||
if _, err := w.WriteString(","); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (exprs *exprParams) writeNameArgs(w *builder.BytesWriter) error {
|
|
||||||
for i, colName := range exprs.ColNames {
|
|
||||||
if _, err := w.WriteString(colName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString("="); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch arg := exprs.Args[i].(type) {
|
|
||||||
case *builder.Builder:
|
|
||||||
if _, err := w.WriteString("("); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := arg.WriteTo(w); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString("("); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
w.Append(exprs.Args[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
if i+1 != len(exprs.ColNames) {
|
|
||||||
if _, err := w.WriteString(","); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -17,7 +17,7 @@ func (statement *Statement) writeInsertOutput(buf *strings.Builder, table *schem
|
||||||
if _, err := buf.WriteString(" OUTPUT Inserted."); err != nil {
|
if _, err := buf.WriteString(" OUTPUT Inserted."); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := buf.WriteString(table.AutoIncrement); err != nil {
|
if err := statement.dialect.Quoter().QuoteTo(buf, table.AutoIncrement); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -59,7 +59,7 @@ func (statement *Statement) GenInsertSQL(colNames []string, args []interface{})
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(colNames, exprs.ColNames...), ","); err != nil {
|
if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(colNames, exprs.ColNames()...), ","); err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ func (statement *Statement) GenInsertSQL(colNames []string, args []interface{})
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(exprs.Args) > 0 {
|
if len(exprs) > 0 {
|
||||||
if _, err := buf.WriteString(","); err != nil {
|
if _, err := buf.WriteString(","); err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,7 @@ func (statement *Statement) GenInsertSQL(colNames []string, args []interface{})
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(exprs.Args) > 0 {
|
if len(exprs) > 0 {
|
||||||
if _, err := buf.WriteString(","); err != nil {
|
if _, err := buf.WriteString(","); err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
@ -152,7 +152,7 @@ func (statement *Statement) GenInsertMapSQL(columns []string, args []interface{}
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(columns, exprs.ColNames...), ","); err != nil {
|
if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(columns, exprs.ColNames()...), ","); err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ func (statement *Statement) GenInsertMapSQL(columns []string, args []interface{}
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(exprs.Args) > 0 {
|
if len(exprs) > 0 {
|
||||||
if _, err := buf.WriteString(","); err != nil {
|
if _, err := buf.WriteString(","); err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ func (statement *Statement) GenInsertMapSQL(columns []string, args []interface{}
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(exprs.Args) > 0 {
|
if len(exprs) > 0 {
|
||||||
if _, err := buf.WriteString(","); err != nil {
|
if _, err := buf.WriteString(","); err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,10 +106,13 @@ func (statement *Statement) GenSumSQL(bean interface{}, columns ...string) (stri
|
||||||
|
|
||||||
// GenGetSQL generates Get SQL
|
// GenGetSQL generates Get SQL
|
||||||
func (statement *Statement) GenGetSQL(bean interface{}) (string, []interface{}, error) {
|
func (statement *Statement) GenGetSQL(bean interface{}) (string, []interface{}, error) {
|
||||||
v := rValue(bean)
|
var isStruct bool
|
||||||
isStruct := v.Kind() == reflect.Struct
|
if bean != nil {
|
||||||
if isStruct {
|
v := rValue(bean)
|
||||||
statement.SetRefBean(bean)
|
isStruct = v.Kind() == reflect.Struct
|
||||||
|
if isStruct {
|
||||||
|
statement.SetRefBean(bean)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var columnStr = statement.ColumnStr()
|
var columnStr = statement.ColumnStr()
|
||||||
|
@ -181,11 +184,22 @@ func (statement *Statement) GenCountSQL(beans ...interface{}) (string, []interfa
|
||||||
selectSQL = "count(*)"
|
selectSQL = "count(*)"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sqlStr, condArgs, err := statement.genSelectSQL(selectSQL, false, false)
|
var subQuerySelect string
|
||||||
|
if statement.GroupByStr != "" {
|
||||||
|
subQuerySelect = statement.GroupByStr
|
||||||
|
} else {
|
||||||
|
subQuerySelect = selectSQL
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlStr, condArgs, err := statement.genSelectSQL(subQuerySelect, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if statement.GroupByStr != "" {
|
||||||
|
sqlStr = fmt.Sprintf("SELECT %s FROM (%s) sub", selectSQL, sqlStr)
|
||||||
|
}
|
||||||
|
|
||||||
return sqlStr, append(statement.joinArgs, condArgs...), nil
|
return sqlStr, append(statement.joinArgs, condArgs...), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -329,12 +343,25 @@ func (statement *Statement) GenExistSQL(bean ...interface{}) (string, []interfac
|
||||||
var args []interface{}
|
var args []interface{}
|
||||||
var joinStr string
|
var joinStr string
|
||||||
var err error
|
var err error
|
||||||
if len(bean) == 0 {
|
var b interface{}
|
||||||
tableName := statement.TableName()
|
if len(bean) > 0 {
|
||||||
if len(tableName) <= 0 {
|
b = bean[0]
|
||||||
return "", nil, ErrTableNotFound
|
beanValue := reflect.ValueOf(bean[0])
|
||||||
|
if beanValue.Kind() != reflect.Ptr {
|
||||||
|
return "", nil, errors.New("needs a pointer")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if beanValue.Elem().Kind() == reflect.Struct {
|
||||||
|
if err := statement.SetRefBean(bean[0]); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tableName := statement.TableName()
|
||||||
|
if len(tableName) <= 0 {
|
||||||
|
return "", nil, ErrTableNotFound
|
||||||
|
}
|
||||||
|
if statement.RefTable == nil {
|
||||||
tableName = statement.quote(tableName)
|
tableName = statement.quote(tableName)
|
||||||
if len(statement.JoinStr) > 0 {
|
if len(statement.JoinStr) > 0 {
|
||||||
joinStr = statement.JoinStr
|
joinStr = statement.JoinStr
|
||||||
|
@ -365,22 +392,8 @@ func (statement *Statement) GenExistSQL(bean ...interface{}) (string, []interfac
|
||||||
args = []interface{}{}
|
args = []interface{}{}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
beanValue := reflect.ValueOf(bean[0])
|
|
||||||
if beanValue.Kind() != reflect.Ptr {
|
|
||||||
return "", nil, errors.New("needs a pointer")
|
|
||||||
}
|
|
||||||
|
|
||||||
if beanValue.Elem().Kind() == reflect.Struct {
|
|
||||||
if err := statement.SetRefBean(bean[0]); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(statement.TableName()) <= 0 {
|
|
||||||
return "", nil, ErrTableNotFound
|
|
||||||
}
|
|
||||||
statement.Limit(1)
|
statement.Limit(1)
|
||||||
sqlStr, args, err = statement.GenGetSQL(bean[0])
|
sqlStr, args, err = statement.GenGetSQL(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -208,20 +208,18 @@ func (statement *Statement) quote(s string) string {
|
||||||
|
|
||||||
// And add Where & and statement
|
// And add Where & and statement
|
||||||
func (statement *Statement) And(query interface{}, args ...interface{}) *Statement {
|
func (statement *Statement) And(query interface{}, args ...interface{}) *Statement {
|
||||||
switch query.(type) {
|
switch qr := query.(type) {
|
||||||
case string:
|
case string:
|
||||||
cond := builder.Expr(query.(string), args...)
|
cond := builder.Expr(qr, args...)
|
||||||
statement.cond = statement.cond.And(cond)
|
statement.cond = statement.cond.And(cond)
|
||||||
case map[string]interface{}:
|
case map[string]interface{}:
|
||||||
queryMap := query.(map[string]interface{})
|
cond := make(builder.Eq)
|
||||||
newMap := make(map[string]interface{})
|
for k, v := range qr {
|
||||||
for k, v := range queryMap {
|
cond[statement.quote(k)] = v
|
||||||
newMap[statement.quote(k)] = v
|
|
||||||
}
|
}
|
||||||
statement.cond = statement.cond.And(builder.Eq(newMap))
|
|
||||||
case builder.Cond:
|
|
||||||
cond := query.(builder.Cond)
|
|
||||||
statement.cond = statement.cond.And(cond)
|
statement.cond = statement.cond.And(cond)
|
||||||
|
case builder.Cond:
|
||||||
|
statement.cond = statement.cond.And(qr)
|
||||||
for _, v := range args {
|
for _, v := range args {
|
||||||
if vv, ok := v.(builder.Cond); ok {
|
if vv, ok := v.(builder.Cond); ok {
|
||||||
statement.cond = statement.cond.And(vv)
|
statement.cond = statement.cond.And(vv)
|
||||||
|
@ -236,23 +234,25 @@ func (statement *Statement) And(query interface{}, args ...interface{}) *Stateme
|
||||||
|
|
||||||
// Or add Where & Or statement
|
// Or add Where & Or statement
|
||||||
func (statement *Statement) Or(query interface{}, args ...interface{}) *Statement {
|
func (statement *Statement) Or(query interface{}, args ...interface{}) *Statement {
|
||||||
switch query.(type) {
|
switch qr := query.(type) {
|
||||||
case string:
|
case string:
|
||||||
cond := builder.Expr(query.(string), args...)
|
cond := builder.Expr(qr, args...)
|
||||||
statement.cond = statement.cond.Or(cond)
|
statement.cond = statement.cond.Or(cond)
|
||||||
case map[string]interface{}:
|
case map[string]interface{}:
|
||||||
cond := builder.Eq(query.(map[string]interface{}))
|
cond := make(builder.Eq)
|
||||||
|
for k, v := range qr {
|
||||||
|
cond[statement.quote(k)] = v
|
||||||
|
}
|
||||||
statement.cond = statement.cond.Or(cond)
|
statement.cond = statement.cond.Or(cond)
|
||||||
case builder.Cond:
|
case builder.Cond:
|
||||||
cond := query.(builder.Cond)
|
statement.cond = statement.cond.Or(qr)
|
||||||
statement.cond = statement.cond.Or(cond)
|
|
||||||
for _, v := range args {
|
for _, v := range args {
|
||||||
if vv, ok := v.(builder.Cond); ok {
|
if vv, ok := v.(builder.Cond); ok {
|
||||||
statement.cond = statement.cond.Or(vv)
|
statement.cond = statement.cond.Or(vv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
// TODO: not support condition type
|
statement.LastError = ErrConditionType
|
||||||
}
|
}
|
||||||
return statement
|
return statement
|
||||||
}
|
}
|
||||||
|
@ -324,9 +324,9 @@ func (statement *Statement) TableName() string {
|
||||||
// Incr Generate "Update ... Set column = column + arg" statement
|
// Incr Generate "Update ... Set column = column + arg" statement
|
||||||
func (statement *Statement) Incr(column string, arg ...interface{}) *Statement {
|
func (statement *Statement) Incr(column string, arg ...interface{}) *Statement {
|
||||||
if len(arg) > 0 {
|
if len(arg) > 0 {
|
||||||
statement.IncrColumns.addParam(column, arg[0])
|
statement.IncrColumns.Add(column, arg[0])
|
||||||
} else {
|
} else {
|
||||||
statement.IncrColumns.addParam(column, 1)
|
statement.IncrColumns.Add(column, 1)
|
||||||
}
|
}
|
||||||
return statement
|
return statement
|
||||||
}
|
}
|
||||||
|
@ -334,9 +334,9 @@ func (statement *Statement) Incr(column string, arg ...interface{}) *Statement {
|
||||||
// Decr Generate "Update ... Set column = column - arg" statement
|
// Decr Generate "Update ... Set column = column - arg" statement
|
||||||
func (statement *Statement) Decr(column string, arg ...interface{}) *Statement {
|
func (statement *Statement) Decr(column string, arg ...interface{}) *Statement {
|
||||||
if len(arg) > 0 {
|
if len(arg) > 0 {
|
||||||
statement.DecrColumns.addParam(column, arg[0])
|
statement.DecrColumns.Add(column, arg[0])
|
||||||
} else {
|
} else {
|
||||||
statement.DecrColumns.addParam(column, 1)
|
statement.DecrColumns.Add(column, 1)
|
||||||
}
|
}
|
||||||
return statement
|
return statement
|
||||||
}
|
}
|
||||||
|
@ -344,9 +344,9 @@ func (statement *Statement) Decr(column string, arg ...interface{}) *Statement {
|
||||||
// SetExpr Generate "Update ... Set column = {expression}" statement
|
// SetExpr Generate "Update ... Set column = {expression}" statement
|
||||||
func (statement *Statement) SetExpr(column string, expression interface{}) *Statement {
|
func (statement *Statement) SetExpr(column string, expression interface{}) *Statement {
|
||||||
if e, ok := expression.(string); ok {
|
if e, ok := expression.(string); ok {
|
||||||
statement.ExprColumns.addParam(column, statement.dialect.Quoter().Replace(e))
|
statement.ExprColumns.Add(column, statement.dialect.Quoter().Replace(e))
|
||||||
} else {
|
} else {
|
||||||
statement.ExprColumns.addParam(column, expression)
|
statement.ExprColumns.Add(column, expression)
|
||||||
}
|
}
|
||||||
return statement
|
return statement
|
||||||
}
|
}
|
||||||
|
@ -734,6 +734,8 @@ func (statement *Statement) buildConds2(table *schemas.Table, bean interface{},
|
||||||
//engine.logger.Warn(err)
|
//engine.logger.Warn(err)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
} else if fieldValuePtr == nil {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if col.IsDeleted && !unscoped { // tag "deleted" is enabled
|
if col.IsDeleted && !unscoped { // tag "deleted" is enabled
|
||||||
|
@ -976,7 +978,7 @@ func (statement *Statement) joinColumns(cols []*schemas.Column, includeTableName
|
||||||
|
|
||||||
// CondDeleted returns the conditions whether a record is soft deleted.
|
// CondDeleted returns the conditions whether a record is soft deleted.
|
||||||
func (statement *Statement) CondDeleted(col *schemas.Column) builder.Cond {
|
func (statement *Statement) CondDeleted(col *schemas.Column) builder.Cond {
|
||||||
var colName = col.Name
|
var colName = statement.quote(col.Name)
|
||||||
if statement.JoinStr != "" {
|
if statement.JoinStr != "" {
|
||||||
var prefix string
|
var prefix string
|
||||||
if statement.TableAlias != "" {
|
if statement.TableAlias != "" {
|
||||||
|
|
|
@ -88,6 +88,9 @@ func (statement *Statement) BuildUpdates(tableValue reflect.Value,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
if fieldValuePtr == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
fieldValue := *fieldValuePtr
|
fieldValue := *fieldValuePtr
|
||||||
fieldType := reflect.TypeOf(fieldValue.Interface())
|
fieldType := reflect.TypeOf(fieldValue.Interface())
|
||||||
|
|
|
@ -132,7 +132,6 @@ func (s *SimpleLogger) Error(v ...interface{}) {
|
||||||
if s.level <= LOG_ERR {
|
if s.level <= LOG_ERR {
|
||||||
s.ERR.Output(2, fmt.Sprintln(v...))
|
s.ERR.Output(2, fmt.Sprintln(v...))
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf implement ILogger
|
// Errorf implement ILogger
|
||||||
|
@ -140,7 +139,6 @@ func (s *SimpleLogger) Errorf(format string, v ...interface{}) {
|
||||||
if s.level <= LOG_ERR {
|
if s.level <= LOG_ERR {
|
||||||
s.ERR.Output(2, fmt.Sprintf(format, v...))
|
s.ERR.Output(2, fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debug implement ILogger
|
// Debug implement ILogger
|
||||||
|
@ -148,7 +146,6 @@ func (s *SimpleLogger) Debug(v ...interface{}) {
|
||||||
if s.level <= LOG_DEBUG {
|
if s.level <= LOG_DEBUG {
|
||||||
s.DEBUG.Output(2, fmt.Sprintln(v...))
|
s.DEBUG.Output(2, fmt.Sprintln(v...))
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debugf implement ILogger
|
// Debugf implement ILogger
|
||||||
|
@ -156,7 +153,6 @@ func (s *SimpleLogger) Debugf(format string, v ...interface{}) {
|
||||||
if s.level <= LOG_DEBUG {
|
if s.level <= LOG_DEBUG {
|
||||||
s.DEBUG.Output(2, fmt.Sprintf(format, v...))
|
s.DEBUG.Output(2, fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info implement ILogger
|
// Info implement ILogger
|
||||||
|
@ -164,7 +160,6 @@ func (s *SimpleLogger) Info(v ...interface{}) {
|
||||||
if s.level <= LOG_INFO {
|
if s.level <= LOG_INFO {
|
||||||
s.INFO.Output(2, fmt.Sprintln(v...))
|
s.INFO.Output(2, fmt.Sprintln(v...))
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof implement ILogger
|
// Infof implement ILogger
|
||||||
|
@ -172,7 +167,6 @@ func (s *SimpleLogger) Infof(format string, v ...interface{}) {
|
||||||
if s.level <= LOG_INFO {
|
if s.level <= LOG_INFO {
|
||||||
s.INFO.Output(2, fmt.Sprintf(format, v...))
|
s.INFO.Output(2, fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warn implement ILogger
|
// Warn implement ILogger
|
||||||
|
@ -180,7 +174,6 @@ func (s *SimpleLogger) Warn(v ...interface{}) {
|
||||||
if s.level <= LOG_WARNING {
|
if s.level <= LOG_WARNING {
|
||||||
s.WARN.Output(2, fmt.Sprintln(v...))
|
s.WARN.Output(2, fmt.Sprintln(v...))
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warnf implement ILogger
|
// Warnf implement ILogger
|
||||||
|
@ -188,7 +181,6 @@ func (s *SimpleLogger) Warnf(format string, v ...interface{}) {
|
||||||
if s.level <= LOG_WARNING {
|
if s.level <= LOG_WARNING {
|
||||||
s.WARN.Output(2, fmt.Sprintf(format, v...))
|
s.WARN.Output(2, fmt.Sprintf(format, v...))
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Level implement ILogger
|
// Level implement ILogger
|
||||||
|
@ -199,7 +191,6 @@ func (s *SimpleLogger) Level() LogLevel {
|
||||||
// SetLevel implement ILogger
|
// SetLevel implement ILogger
|
||||||
func (s *SimpleLogger) SetLevel(l LogLevel) {
|
func (s *SimpleLogger) SetLevel(l LogLevel) {
|
||||||
s.level = l
|
s.level = l
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShowSQL implement ILogger
|
// ShowSQL implement ILogger
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright 2021 The Xorm Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package xorm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"xorm.io/xorm/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (engine *Engine) row2mapStr(rows *core.Rows, types []*sql.ColumnType, fields []string) (map[string]string, error) {
|
||||||
|
var scanResults = make([]interface{}, len(fields))
|
||||||
|
for i := 0; i < len(fields); i++ {
|
||||||
|
var s sql.NullString
|
||||||
|
scanResults[i] = &s
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(scanResults...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make(map[string]string, len(fields))
|
||||||
|
for ii, key := range fields {
|
||||||
|
s := scanResults[ii].(*sql.NullString)
|
||||||
|
result[key] = s.String
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (engine *Engine) row2mapBytes(rows *core.Rows, types []*sql.ColumnType, fields []string) (map[string][]byte, error) {
|
||||||
|
var scanResults = make([]interface{}, len(fields))
|
||||||
|
for i := 0; i < len(fields); i++ {
|
||||||
|
var s sql.NullString
|
||||||
|
scanResults[i] = &s
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(scanResults...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make(map[string][]byte, len(fields))
|
||||||
|
for ii, key := range fields {
|
||||||
|
s := scanResults[ii].(*sql.NullString)
|
||||||
|
result[key] = []byte(s.String)
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (engine *Engine) row2sliceStr(rows *core.Rows, types []*sql.ColumnType, fields []string) ([]string, error) {
|
||||||
|
results := make([]string, 0, len(fields))
|
||||||
|
var scanResults = make([]interface{}, len(fields))
|
||||||
|
for i := 0; i < len(fields); i++ {
|
||||||
|
var s sql.NullString
|
||||||
|
scanResults[i] = &s
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Scan(scanResults...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(fields); i++ {
|
||||||
|
results = append(results, scanResults[i].(*sql.NullString).String)
|
||||||
|
}
|
||||||
|
return results, nil
|
||||||
|
}
|
|
@ -6,10 +6,8 @@ package schemas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,7 +22,8 @@ const (
|
||||||
type Column struct {
|
type Column struct {
|
||||||
Name string
|
Name string
|
||||||
TableName string
|
TableName string
|
||||||
FieldName string // Avaiable only when parsed from a struct
|
FieldName string // Available only when parsed from a struct
|
||||||
|
FieldIndex []int // Available only when parsed from a struct
|
||||||
SQLType SQLType
|
SQLType SQLType
|
||||||
IsJSON bool
|
IsJSON bool
|
||||||
Length int
|
Length int
|
||||||
|
@ -83,41 +82,17 @@ func (col *Column) ValueOf(bean interface{}) (*reflect.Value, error) {
|
||||||
|
|
||||||
// ValueOfV returns column's filed of struct's value accept reflevt value
|
// ValueOfV returns column's filed of struct's value accept reflevt value
|
||||||
func (col *Column) ValueOfV(dataStruct *reflect.Value) (*reflect.Value, error) {
|
func (col *Column) ValueOfV(dataStruct *reflect.Value) (*reflect.Value, error) {
|
||||||
var fieldValue reflect.Value
|
var v = *dataStruct
|
||||||
fieldPath := strings.Split(col.FieldName, ".")
|
for _, i := range col.FieldIndex {
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
if dataStruct.Type().Kind() == reflect.Map {
|
if v.IsNil() {
|
||||||
keyValue := reflect.ValueOf(fieldPath[len(fieldPath)-1])
|
v.Set(reflect.New(v.Type().Elem()))
|
||||||
fieldValue = dataStruct.MapIndex(keyValue)
|
|
||||||
return &fieldValue, nil
|
|
||||||
} else if dataStruct.Type().Kind() == reflect.Interface {
|
|
||||||
structValue := reflect.ValueOf(dataStruct.Interface())
|
|
||||||
dataStruct = &structValue
|
|
||||||
}
|
|
||||||
|
|
||||||
level := len(fieldPath)
|
|
||||||
fieldValue = dataStruct.FieldByName(fieldPath[0])
|
|
||||||
for i := 0; i < level-1; i++ {
|
|
||||||
if !fieldValue.IsValid() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if fieldValue.Kind() == reflect.Struct {
|
|
||||||
fieldValue = fieldValue.FieldByName(fieldPath[i+1])
|
|
||||||
} else if fieldValue.Kind() == reflect.Ptr {
|
|
||||||
if fieldValue.IsNil() {
|
|
||||||
fieldValue.Set(reflect.New(fieldValue.Type().Elem()))
|
|
||||||
}
|
}
|
||||||
fieldValue = fieldValue.Elem().FieldByName(fieldPath[i+1])
|
v = v.Elem()
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("field %v is not valid", col.FieldName)
|
|
||||||
}
|
}
|
||||||
|
v = v.FieldByIndex([]int{i})
|
||||||
}
|
}
|
||||||
|
return &v, nil
|
||||||
if !fieldValue.IsValid() {
|
|
||||||
return nil, fmt.Errorf("field %v is not valid", col.FieldName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &fieldValue, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvertID converts id content to suitable type according column type
|
// ConvertID converts id content to suitable type according column type
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
package schemas
|
package schemas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -159,24 +158,8 @@ func (table *Table) IDOfV(rv reflect.Value) (PK, error) {
|
||||||
for i, col := range table.PKColumns() {
|
for i, col := range table.PKColumns() {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
fieldName := col.FieldName
|
pkField := v.FieldByIndex(col.FieldIndex)
|
||||||
for {
|
|
||||||
parts := strings.SplitN(fieldName, ".", 2)
|
|
||||||
if len(parts) == 1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
v = v.FieldByName(parts[0])
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
if v.Kind() != reflect.Struct {
|
|
||||||
return nil, fmt.Errorf("Unsupported read value of column %s from field %s", col.Name, col.FieldName)
|
|
||||||
}
|
|
||||||
fieldName = parts[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
pkField := v.FieldByName(fieldName)
|
|
||||||
switch pkField.Kind() {
|
switch pkField.Kind() {
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
pk[i], err = col.ConvertID(pkField.String())
|
pk[i], err = col.ConvertID(pkField.String())
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
// Copyright 2021 The Xorm Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package schemas
|
||||||
|
|
||||||
|
// Version represents a database version
|
||||||
|
type Version struct {
|
||||||
|
Number string // the version number which could be compared
|
||||||
|
Level string
|
||||||
|
Edition string
|
||||||
|
}
|
|
@ -375,6 +375,9 @@ func (session *Session) getField(dataStruct *reflect.Value, key string, table *s
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if fieldValue == nil {
|
||||||
|
return nil, ErrFieldIsNotValid{key, table.Name}
|
||||||
|
}
|
||||||
|
|
||||||
if !fieldValue.IsValid() || !fieldValue.CanSet() {
|
if !fieldValue.IsValid() || !fieldValue.CanSet() {
|
||||||
return nil, ErrFieldIsNotValid{key, table.Name}
|
return nil, ErrFieldIsNotValid{key, table.Name}
|
||||||
|
|
|
@ -35,27 +35,20 @@ func (session *Session) str2Time(col *schemas.Column, data string) (outTime time
|
||||||
sd, err := strconv.ParseInt(sdata, 10, 64)
|
sd, err := strconv.ParseInt(sdata, 10, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
x = time.Unix(sd, 0)
|
x = time.Unix(sd, 0)
|
||||||
//session.engine.logger.Debugf("time(0) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
|
|
||||||
} else {
|
|
||||||
//session.engine.logger.Debugf("time(0) err key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
|
|
||||||
}
|
}
|
||||||
} else if len(sdata) > 19 && strings.Contains(sdata, "-") {
|
} else if len(sdata) > 19 && strings.Contains(sdata, "-") {
|
||||||
x, err = time.ParseInLocation(time.RFC3339Nano, sdata, parseLoc)
|
x, err = time.ParseInLocation(time.RFC3339Nano, sdata, parseLoc)
|
||||||
session.engine.logger.Debugf("time(1) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
|
session.engine.logger.Debugf("time(1) key[%v]: %+v | sdata: [%v]\n", col.Name, x, sdata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
x, err = time.ParseInLocation("2006-01-02 15:04:05.999999999", sdata, parseLoc)
|
x, err = time.ParseInLocation("2006-01-02 15:04:05.999999999", sdata, parseLoc)
|
||||||
//session.engine.logger.Debugf("time(2) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
x, err = time.ParseInLocation("2006-01-02 15:04:05.9999999 Z07:00", sdata, parseLoc)
|
x, err = time.ParseInLocation("2006-01-02 15:04:05.9999999 Z07:00", sdata, parseLoc)
|
||||||
//session.engine.logger.Debugf("time(3) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
|
|
||||||
}
|
}
|
||||||
} else if len(sdata) == 19 && strings.Contains(sdata, "-") {
|
} else if len(sdata) == 19 && strings.Contains(sdata, "-") {
|
||||||
x, err = time.ParseInLocation("2006-01-02 15:04:05", sdata, parseLoc)
|
x, err = time.ParseInLocation("2006-01-02 15:04:05", sdata, parseLoc)
|
||||||
//session.engine.logger.Debugf("time(4) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
|
|
||||||
} else if len(sdata) == 10 && sdata[4] == '-' && sdata[7] == '-' {
|
} else if len(sdata) == 10 && sdata[4] == '-' && sdata[7] == '-' {
|
||||||
x, err = time.ParseInLocation("2006-01-02", sdata, parseLoc)
|
x, err = time.ParseInLocation("2006-01-02", sdata, parseLoc)
|
||||||
//session.engine.logger.Debugf("time(5) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
|
|
||||||
} else if col.SQLType.Name == schemas.Time {
|
} else if col.SQLType.Name == schemas.Time {
|
||||||
if strings.Contains(sdata, " ") {
|
if strings.Contains(sdata, " ") {
|
||||||
ssd := strings.Split(sdata, " ")
|
ssd := strings.Split(sdata, " ")
|
||||||
|
@ -69,7 +62,6 @@ func (session *Session) str2Time(col *schemas.Column, data string) (outTime time
|
||||||
|
|
||||||
st := fmt.Sprintf("2006-01-02 %v", sdata)
|
st := fmt.Sprintf("2006-01-02 %v", sdata)
|
||||||
x, err = time.ParseInLocation("2006-01-02 15:04:05", st, parseLoc)
|
x, err = time.ParseInLocation("2006-01-02 15:04:05", st, parseLoc)
|
||||||
//session.engine.logger.Debugf("time(6) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
|
|
||||||
} else {
|
} else {
|
||||||
outErr = fmt.Errorf("unsupported time format %v", sdata)
|
outErr = fmt.Errorf("unsupported time format %v", sdata)
|
||||||
return
|
return
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"xorm.io/xorm/internal/utils"
|
"xorm.io/xorm/internal/utils"
|
||||||
"xorm.io/xorm/schemas"
|
"xorm.io/xorm/schemas"
|
||||||
|
@ -374,9 +375,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||||
return 1, nil
|
return 1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
aiValue.Set(int64ToIntValue(id, aiValue.Type()))
|
return 1, convertAssignV(aiValue.Addr(), id)
|
||||||
|
|
||||||
return 1, nil
|
|
||||||
} else if len(table.AutoIncrement) > 0 && (session.engine.dialect.URI().DBType == schemas.POSTGRES ||
|
} else if len(table.AutoIncrement) > 0 && (session.engine.dialect.URI().DBType == schemas.POSTGRES ||
|
||||||
session.engine.dialect.URI().DBType == schemas.MSSQL) {
|
session.engine.dialect.URI().DBType == schemas.MSSQL) {
|
||||||
res, err := session.queryBytes(sqlStr, args...)
|
res, err := session.queryBytes(sqlStr, args...)
|
||||||
|
@ -416,9 +415,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||||
return 1, nil
|
return 1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
aiValue.Set(int64ToIntValue(id, aiValue.Type()))
|
return 1, convertAssignV(aiValue.Addr(), id)
|
||||||
|
|
||||||
return 1, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := session.exec(sqlStr, args...)
|
res, err := session.exec(sqlStr, args...)
|
||||||
|
@ -458,7 +455,9 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||||
return res.RowsAffected()
|
return res.RowsAffected()
|
||||||
}
|
}
|
||||||
|
|
||||||
aiValue.Set(int64ToIntValue(id, aiValue.Type()))
|
if err := convertAssignV(aiValue.Addr(), id); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
return res.RowsAffected()
|
return res.RowsAffected()
|
||||||
}
|
}
|
||||||
|
@ -499,6 +498,16 @@ func (session *Session) genInsertColumns(bean interface{}) ([]string, []interfac
|
||||||
}
|
}
|
||||||
|
|
||||||
if col.IsDeleted {
|
if col.IsDeleted {
|
||||||
|
colNames = append(colNames, col.Name)
|
||||||
|
if !col.Nullable {
|
||||||
|
if col.SQLType.IsNumeric() {
|
||||||
|
args = append(args, 0)
|
||||||
|
} else {
|
||||||
|
args = append(args, time.Time{}.Format("2006-01-02 15:04:05"))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args = append(args, nil)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,69 +75,18 @@ func value2String(rawValue *reflect.Value) (str string, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func row2mapStr(rows *core.Rows, fields []string) (resultsMap map[string]string, err error) {
|
func (session *Session) rows2Strings(rows *core.Rows) (resultsSlice []map[string]string, err error) {
|
||||||
result := make(map[string]string)
|
|
||||||
scanResultContainers := make([]interface{}, len(fields))
|
|
||||||
for i := 0; i < len(fields); i++ {
|
|
||||||
var scanResultContainer interface{}
|
|
||||||
scanResultContainers[i] = &scanResultContainer
|
|
||||||
}
|
|
||||||
if err := rows.Scan(scanResultContainers...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for ii, key := range fields {
|
|
||||||
rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii]))
|
|
||||||
// if row is null then as empty string
|
|
||||||
if rawValue.Interface() == nil {
|
|
||||||
result[key] = ""
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if data, err := value2String(&rawValue); err == nil {
|
|
||||||
result[key] = data
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func row2sliceStr(rows *core.Rows, fields []string) (results []string, err error) {
|
|
||||||
result := make([]string, 0, len(fields))
|
|
||||||
scanResultContainers := make([]interface{}, len(fields))
|
|
||||||
for i := 0; i < len(fields); i++ {
|
|
||||||
var scanResultContainer interface{}
|
|
||||||
scanResultContainers[i] = &scanResultContainer
|
|
||||||
}
|
|
||||||
if err := rows.Scan(scanResultContainers...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(fields); i++ {
|
|
||||||
rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[i]))
|
|
||||||
// if row is null then as empty string
|
|
||||||
if rawValue.Interface() == nil {
|
|
||||||
result = append(result, "")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if data, err := value2String(&rawValue); err == nil {
|
|
||||||
result = append(result, data)
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func rows2Strings(rows *core.Rows) (resultsSlice []map[string]string, err error) {
|
|
||||||
fields, err := rows.Columns()
|
fields, err := rows.Columns()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
types, err := rows.ColumnTypes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
result, err := row2mapStr(rows, fields)
|
result, err := session.engine.row2mapStr(rows, types, fields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -147,13 +96,18 @@ func rows2Strings(rows *core.Rows) (resultsSlice []map[string]string, err error)
|
||||||
return resultsSlice, nil
|
return resultsSlice, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func rows2SliceString(rows *core.Rows) (resultsSlice [][]string, err error) {
|
func (session *Session) rows2SliceString(rows *core.Rows) (resultsSlice [][]string, err error) {
|
||||||
fields, err := rows.Columns()
|
fields, err := rows.Columns()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
types, err := rows.ColumnTypes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
record, err := row2sliceStr(rows, fields)
|
record, err := session.engine.row2sliceStr(rows, types, fields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -180,7 +134,7 @@ func (session *Session) QueryString(sqlOrArgs ...interface{}) ([]map[string]stri
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
return rows2Strings(rows)
|
return session.rows2Strings(rows)
|
||||||
}
|
}
|
||||||
|
|
||||||
// QuerySliceString runs a raw sql and return records as [][]string
|
// QuerySliceString runs a raw sql and return records as [][]string
|
||||||
|
@ -200,7 +154,7 @@ func (session *Session) QuerySliceString(sqlOrArgs ...interface{}) ([][]string,
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
return rows2SliceString(rows)
|
return session.rows2SliceString(rows)
|
||||||
}
|
}
|
||||||
|
|
||||||
func row2mapInterface(rows *core.Rows, fields []string) (resultsMap map[string]interface{}, err error) {
|
func row2mapInterface(rows *core.Rows, fields []string) (resultsMap map[string]interface{}, err error) {
|
||||||
|
|
|
@ -79,41 +79,17 @@ func value2Bytes(rawValue *reflect.Value) ([]byte, error) {
|
||||||
return []byte(str), nil
|
return []byte(str), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func row2map(rows *core.Rows, fields []string) (resultsMap map[string][]byte, err error) {
|
func (session *Session) rows2maps(rows *core.Rows) (resultsSlice []map[string][]byte, err error) {
|
||||||
result := make(map[string][]byte)
|
|
||||||
scanResultContainers := make([]interface{}, len(fields))
|
|
||||||
for i := 0; i < len(fields); i++ {
|
|
||||||
var scanResultContainer interface{}
|
|
||||||
scanResultContainers[i] = &scanResultContainer
|
|
||||||
}
|
|
||||||
if err := rows.Scan(scanResultContainers...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for ii, key := range fields {
|
|
||||||
rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii]))
|
|
||||||
//if row is null then ignore
|
|
||||||
if rawValue.Interface() == nil {
|
|
||||||
result[key] = []byte{}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if data, err := value2Bytes(&rawValue); err == nil {
|
|
||||||
result[key] = data
|
|
||||||
} else {
|
|
||||||
return nil, err // !nashtsai! REVIEW, should return err or just error log?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func rows2maps(rows *core.Rows) (resultsSlice []map[string][]byte, err error) {
|
|
||||||
fields, err := rows.Columns()
|
fields, err := rows.Columns()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
types, err := rows.ColumnTypes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
result, err := row2map(rows, fields)
|
result, err := session.engine.row2mapBytes(rows, types, fields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -130,7 +106,7 @@ func (session *Session) queryBytes(sqlStr string, args ...interface{}) ([]map[st
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
return rows2maps(rows)
|
return session.rows2maps(rows)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, error) {
|
func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, error) {
|
||||||
|
|
|
@ -17,6 +17,11 @@ import (
|
||||||
"xorm.io/xorm/schemas"
|
"xorm.io/xorm/schemas"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// enumerated all errors
|
||||||
|
var (
|
||||||
|
ErrNoColumnsTobeUpdated = errors.New("no columns found to be updated")
|
||||||
|
)
|
||||||
|
|
||||||
func (session *Session) cacheUpdate(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error {
|
func (session *Session) cacheUpdate(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error {
|
||||||
if table == nil ||
|
if table == nil ||
|
||||||
session.tx != nil {
|
session.tx != nil {
|
||||||
|
@ -144,6 +149,8 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
|
||||||
defer session.Close()
|
defer session.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer session.resetStatement()
|
||||||
|
|
||||||
if session.statement.LastError != nil {
|
if session.statement.LastError != nil {
|
||||||
return 0, session.statement.LastError
|
return 0, session.statement.LastError
|
||||||
}
|
}
|
||||||
|
@ -224,35 +231,35 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
|
||||||
|
|
||||||
// for update action to like "column = column + ?"
|
// for update action to like "column = column + ?"
|
||||||
incColumns := session.statement.IncrColumns
|
incColumns := session.statement.IncrColumns
|
||||||
for i, colName := range incColumns.ColNames {
|
for _, expr := range incColumns {
|
||||||
colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" + ?")
|
colNames = append(colNames, session.engine.Quote(expr.ColName)+" = "+session.engine.Quote(expr.ColName)+" + ?")
|
||||||
args = append(args, incColumns.Args[i])
|
args = append(args, expr.Arg)
|
||||||
}
|
}
|
||||||
// for update action to like "column = column - ?"
|
// for update action to like "column = column - ?"
|
||||||
decColumns := session.statement.DecrColumns
|
decColumns := session.statement.DecrColumns
|
||||||
for i, colName := range decColumns.ColNames {
|
for _, expr := range decColumns {
|
||||||
colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" - ?")
|
colNames = append(colNames, session.engine.Quote(expr.ColName)+" = "+session.engine.Quote(expr.ColName)+" - ?")
|
||||||
args = append(args, decColumns.Args[i])
|
args = append(args, expr.Arg)
|
||||||
}
|
}
|
||||||
// for update action to like "column = expression"
|
// for update action to like "column = expression"
|
||||||
exprColumns := session.statement.ExprColumns
|
exprColumns := session.statement.ExprColumns
|
||||||
for i, colName := range exprColumns.ColNames {
|
for _, expr := range exprColumns {
|
||||||
switch tp := exprColumns.Args[i].(type) {
|
switch tp := expr.Arg.(type) {
|
||||||
case string:
|
case string:
|
||||||
if len(tp) == 0 {
|
if len(tp) == 0 {
|
||||||
tp = "''"
|
tp = "''"
|
||||||
}
|
}
|
||||||
colNames = append(colNames, session.engine.Quote(colName)+"="+tp)
|
colNames = append(colNames, session.engine.Quote(expr.ColName)+"="+tp)
|
||||||
case *builder.Builder:
|
case *builder.Builder:
|
||||||
subQuery, subArgs, err := session.statement.GenCondSQL(tp)
|
subQuery, subArgs, err := session.statement.GenCondSQL(tp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
colNames = append(colNames, session.engine.Quote(colName)+"=("+subQuery+")")
|
colNames = append(colNames, session.engine.Quote(expr.ColName)+"=("+subQuery+")")
|
||||||
args = append(args, subArgs...)
|
args = append(args, subArgs...)
|
||||||
default:
|
default:
|
||||||
colNames = append(colNames, session.engine.Quote(colName)+"=?")
|
colNames = append(colNames, session.engine.Quote(expr.ColName)+"=?")
|
||||||
args = append(args, exprColumns.Args[i])
|
args = append(args, expr.Arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,15 +280,12 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
|
||||||
k = ct.Elem().Kind()
|
k = ct.Elem().Kind()
|
||||||
}
|
}
|
||||||
if k == reflect.Struct {
|
if k == reflect.Struct {
|
||||||
var refTable = session.statement.RefTable
|
condTable, err := session.engine.TableInfo(condiBean[0])
|
||||||
if refTable == nil {
|
if err != nil {
|
||||||
refTable, err = session.engine.TableInfo(condiBean[0])
|
return 0, err
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
var err error
|
|
||||||
autoCond, err = session.statement.BuildConds(refTable, condiBean[0], true, true, false, true, false)
|
autoCond, err = session.statement.BuildConds(condTable, condiBean[0], true, true, false, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -329,7 +333,7 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(colNames) <= 0 {
|
if len(colNames) <= 0 {
|
||||||
return 0, errors.New("No content found to be updated")
|
return 0, ErrNoColumnsTobeUpdated
|
||||||
}
|
}
|
||||||
|
|
||||||
condSQL, condArgs, err = session.statement.GenCondSQL(cond)
|
condSQL, condArgs, err = session.statement.GenCondSQL(cond)
|
||||||
|
@ -450,7 +454,6 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
|
||||||
// FIXME: if bean is a map type, it will panic because map cannot be as map key
|
// FIXME: if bean is a map type, it will panic because map cannot be as map key
|
||||||
session.afterUpdateBeans[bean] = &afterClosures
|
session.afterUpdateBeans[bean] = &afterClosures
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if _, ok := interface{}(bean).(AfterUpdateProcessor); ok {
|
if _, ok := interface{}(bean).(AfterUpdateProcessor); ok {
|
||||||
session.afterUpdateBeans[bean] = nil
|
session.afterUpdateBeans[bean] = nil
|
||||||
|
|
|
@ -7,11 +7,11 @@ package tags
|
||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
"xorm.io/xorm/caches"
|
"xorm.io/xorm/caches"
|
||||||
"xorm.io/xorm/convert"
|
"xorm.io/xorm/convert"
|
||||||
|
@ -22,7 +22,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrUnsupportedType represents an unsupported type error
|
// ErrUnsupportedType represents an unsupported type error
|
||||||
ErrUnsupportedType = errors.New("Unsupported type")
|
ErrUnsupportedType = errors.New("unsupported type")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Parser represents a parser for xorm tag
|
// Parser represents a parser for xorm tag
|
||||||
|
@ -124,6 +124,147 @@ func addIndex(indexName string, table *schemas.Table, col *schemas.Column, index
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var ErrIgnoreField = errors.New("field will be ignored")
|
||||||
|
|
||||||
|
func (parser *Parser) parseFieldWithNoTag(fieldIndex int, field reflect.StructField, fieldValue reflect.Value) (*schemas.Column, error) {
|
||||||
|
var sqlType schemas.SQLType
|
||||||
|
if fieldValue.CanAddr() {
|
||||||
|
if _, ok := fieldValue.Addr().Interface().(convert.Conversion); ok {
|
||||||
|
sqlType = schemas.SQLType{Name: schemas.Text}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := fieldValue.Interface().(convert.Conversion); ok {
|
||||||
|
sqlType = schemas.SQLType{Name: schemas.Text}
|
||||||
|
} else {
|
||||||
|
sqlType = schemas.Type2SQLType(field.Type)
|
||||||
|
}
|
||||||
|
col := schemas.NewColumn(parser.columnMapper.Obj2Table(field.Name),
|
||||||
|
field.Name, sqlType, sqlType.DefaultLength,
|
||||||
|
sqlType.DefaultLength2, true)
|
||||||
|
col.FieldIndex = []int{fieldIndex}
|
||||||
|
|
||||||
|
if field.Type.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) {
|
||||||
|
col.IsAutoIncrement = true
|
||||||
|
col.IsPrimaryKey = true
|
||||||
|
col.Nullable = false
|
||||||
|
}
|
||||||
|
return col, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (parser *Parser) parseFieldWithTags(table *schemas.Table, fieldIndex int, field reflect.StructField, fieldValue reflect.Value, tags []tag) (*schemas.Column, error) {
|
||||||
|
var col = &schemas.Column{
|
||||||
|
FieldName: field.Name,
|
||||||
|
FieldIndex: []int{fieldIndex},
|
||||||
|
Nullable: true,
|
||||||
|
IsPrimaryKey: false,
|
||||||
|
IsAutoIncrement: false,
|
||||||
|
MapType: schemas.TWOSIDES,
|
||||||
|
Indexes: make(map[string]int),
|
||||||
|
DefaultIsEmpty: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
var ctx = Context{
|
||||||
|
table: table,
|
||||||
|
col: col,
|
||||||
|
fieldValue: fieldValue,
|
||||||
|
indexNames: make(map[string]int),
|
||||||
|
parser: parser,
|
||||||
|
}
|
||||||
|
|
||||||
|
for j, tag := range tags {
|
||||||
|
if ctx.ignoreNext {
|
||||||
|
ctx.ignoreNext = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.tag = tag
|
||||||
|
ctx.tagUname = strings.ToUpper(tag.name)
|
||||||
|
|
||||||
|
if j > 0 {
|
||||||
|
ctx.preTag = strings.ToUpper(tags[j-1].name)
|
||||||
|
}
|
||||||
|
if j < len(tags)-1 {
|
||||||
|
ctx.nextTag = tags[j+1].name
|
||||||
|
} else {
|
||||||
|
ctx.nextTag = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if h, ok := parser.handlers[ctx.tagUname]; ok {
|
||||||
|
if err := h(&ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if strings.HasPrefix(ctx.tag.name, "'") && strings.HasSuffix(ctx.tag.name, "'") {
|
||||||
|
col.Name = ctx.tag.name[1 : len(ctx.tag.name)-1]
|
||||||
|
} else {
|
||||||
|
col.Name = ctx.tag.name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.hasCacheTag {
|
||||||
|
if parser.cacherMgr.GetDefaultCacher() != nil {
|
||||||
|
parser.cacherMgr.SetCacher(table.Name, parser.cacherMgr.GetDefaultCacher())
|
||||||
|
} else {
|
||||||
|
parser.cacherMgr.SetCacher(table.Name, caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ctx.hasNoCacheTag {
|
||||||
|
parser.cacherMgr.SetCacher(table.Name, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if col.SQLType.Name == "" {
|
||||||
|
col.SQLType = schemas.Type2SQLType(field.Type)
|
||||||
|
}
|
||||||
|
parser.dialect.SQLType(col)
|
||||||
|
if col.Length == 0 {
|
||||||
|
col.Length = col.SQLType.DefaultLength
|
||||||
|
}
|
||||||
|
if col.Length2 == 0 {
|
||||||
|
col.Length2 = col.SQLType.DefaultLength2
|
||||||
|
}
|
||||||
|
if col.Name == "" {
|
||||||
|
col.Name = parser.columnMapper.Obj2Table(field.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.isUnique {
|
||||||
|
ctx.indexNames[col.Name] = schemas.UniqueType
|
||||||
|
} else if ctx.isIndex {
|
||||||
|
ctx.indexNames[col.Name] = schemas.IndexType
|
||||||
|
}
|
||||||
|
|
||||||
|
for indexName, indexType := range ctx.indexNames {
|
||||||
|
addIndex(indexName, table, col, indexType)
|
||||||
|
}
|
||||||
|
|
||||||
|
return col, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (parser *Parser) parseField(table *schemas.Table, fieldIndex int, field reflect.StructField, fieldValue reflect.Value) (*schemas.Column, error) {
|
||||||
|
var (
|
||||||
|
tag = field.Tag
|
||||||
|
ormTagStr = strings.TrimSpace(tag.Get(parser.identifier))
|
||||||
|
)
|
||||||
|
if ormTagStr == "-" {
|
||||||
|
return nil, ErrIgnoreField
|
||||||
|
}
|
||||||
|
if ormTagStr == "" {
|
||||||
|
return parser.parseFieldWithNoTag(fieldIndex, field, fieldValue)
|
||||||
|
}
|
||||||
|
tags, err := splitTag(ormTagStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return parser.parseFieldWithTags(table, fieldIndex, field, fieldValue, tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNotTitle(n string) bool {
|
||||||
|
for _, c := range n {
|
||||||
|
return unicode.IsLower(c)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Parse parses a struct as a table information
|
// Parse parses a struct as a table information
|
||||||
func (parser *Parser) Parse(v reflect.Value) (*schemas.Table, error) {
|
func (parser *Parser) Parse(v reflect.Value) (*schemas.Table, error) {
|
||||||
t := v.Type()
|
t := v.Type()
|
||||||
|
@ -139,185 +280,26 @@ func (parser *Parser) Parse(v reflect.Value) (*schemas.Table, error) {
|
||||||
table.Type = t
|
table.Type = t
|
||||||
table.Name = names.GetTableName(parser.tableMapper, v)
|
table.Name = names.GetTableName(parser.tableMapper, v)
|
||||||
|
|
||||||
var idFieldColName string
|
|
||||||
var hasCacheTag, hasNoCacheTag bool
|
|
||||||
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
for i := 0; i < t.NumField(); i++ {
|
||||||
tag := t.Field(i).Tag
|
var field = t.Field(i)
|
||||||
|
if isNotTitle(field.Name) {
|
||||||
ormTagStr := tag.Get(parser.identifier)
|
|
||||||
var col *schemas.Column
|
|
||||||
fieldValue := v.Field(i)
|
|
||||||
fieldType := fieldValue.Type()
|
|
||||||
|
|
||||||
if ormTagStr != "" {
|
|
||||||
col = &schemas.Column{
|
|
||||||
FieldName: t.Field(i).Name,
|
|
||||||
Nullable: true,
|
|
||||||
IsPrimaryKey: false,
|
|
||||||
IsAutoIncrement: false,
|
|
||||||
MapType: schemas.TWOSIDES,
|
|
||||||
Indexes: make(map[string]int),
|
|
||||||
DefaultIsEmpty: true,
|
|
||||||
}
|
|
||||||
tags := splitTag(ormTagStr)
|
|
||||||
|
|
||||||
if len(tags) > 0 {
|
|
||||||
if tags[0] == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var ctx = Context{
|
|
||||||
table: table,
|
|
||||||
col: col,
|
|
||||||
fieldValue: fieldValue,
|
|
||||||
indexNames: make(map[string]int),
|
|
||||||
parser: parser,
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(strings.ToUpper(tags[0]), "EXTENDS") {
|
|
||||||
pStart := strings.Index(tags[0], "(")
|
|
||||||
if pStart > -1 && strings.HasSuffix(tags[0], ")") {
|
|
||||||
var tagPrefix = strings.TrimFunc(tags[0][pStart+1:len(tags[0])-1], func(r rune) bool {
|
|
||||||
return r == '\'' || r == '"'
|
|
||||||
})
|
|
||||||
|
|
||||||
ctx.params = []string{tagPrefix}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ExtendsTagHandler(&ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for j, key := range tags {
|
|
||||||
if ctx.ignoreNext {
|
|
||||||
ctx.ignoreNext = false
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
k := strings.ToUpper(key)
|
|
||||||
ctx.tagName = k
|
|
||||||
ctx.params = []string{}
|
|
||||||
|
|
||||||
pStart := strings.Index(k, "(")
|
|
||||||
if pStart == 0 {
|
|
||||||
return nil, errors.New("( could not be the first character")
|
|
||||||
}
|
|
||||||
if pStart > -1 {
|
|
||||||
if !strings.HasSuffix(k, ")") {
|
|
||||||
return nil, fmt.Errorf("field %s tag %s cannot match ) character", col.FieldName, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.tagName = k[:pStart]
|
|
||||||
ctx.params = strings.Split(key[pStart+1:len(k)-1], ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
if j > 0 {
|
|
||||||
ctx.preTag = strings.ToUpper(tags[j-1])
|
|
||||||
}
|
|
||||||
if j < len(tags)-1 {
|
|
||||||
ctx.nextTag = tags[j+1]
|
|
||||||
} else {
|
|
||||||
ctx.nextTag = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if h, ok := parser.handlers[ctx.tagName]; ok {
|
|
||||||
if err := h(&ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if strings.HasPrefix(key, "'") && strings.HasSuffix(key, "'") {
|
|
||||||
col.Name = key[1 : len(key)-1]
|
|
||||||
} else {
|
|
||||||
col.Name = key
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.hasCacheTag {
|
|
||||||
hasCacheTag = true
|
|
||||||
}
|
|
||||||
if ctx.hasNoCacheTag {
|
|
||||||
hasNoCacheTag = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if col.SQLType.Name == "" {
|
|
||||||
col.SQLType = schemas.Type2SQLType(fieldType)
|
|
||||||
}
|
|
||||||
parser.dialect.SQLType(col)
|
|
||||||
if col.Length == 0 {
|
|
||||||
col.Length = col.SQLType.DefaultLength
|
|
||||||
}
|
|
||||||
if col.Length2 == 0 {
|
|
||||||
col.Length2 = col.SQLType.DefaultLength2
|
|
||||||
}
|
|
||||||
if col.Name == "" {
|
|
||||||
col.Name = parser.columnMapper.Obj2Table(t.Field(i).Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.isUnique {
|
|
||||||
ctx.indexNames[col.Name] = schemas.UniqueType
|
|
||||||
} else if ctx.isIndex {
|
|
||||||
ctx.indexNames[col.Name] = schemas.IndexType
|
|
||||||
}
|
|
||||||
|
|
||||||
for indexName, indexType := range ctx.indexNames {
|
|
||||||
addIndex(indexName, table, col, indexType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if fieldValue.CanSet() {
|
|
||||||
var sqlType schemas.SQLType
|
|
||||||
if fieldValue.CanAddr() {
|
|
||||||
if _, ok := fieldValue.Addr().Interface().(convert.Conversion); ok {
|
|
||||||
sqlType = schemas.SQLType{Name: schemas.Text}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := fieldValue.Interface().(convert.Conversion); ok {
|
|
||||||
sqlType = schemas.SQLType{Name: schemas.Text}
|
|
||||||
} else {
|
|
||||||
sqlType = schemas.Type2SQLType(fieldType)
|
|
||||||
}
|
|
||||||
col = schemas.NewColumn(parser.columnMapper.Obj2Table(t.Field(i).Name),
|
|
||||||
t.Field(i).Name, sqlType, sqlType.DefaultLength,
|
|
||||||
sqlType.DefaultLength2, true)
|
|
||||||
|
|
||||||
if fieldType.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) {
|
|
||||||
idFieldColName = col.Name
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if col.IsAutoIncrement {
|
|
||||||
col.Nullable = false
|
col, err := parser.parseField(table, i, field, v.Field(i))
|
||||||
|
if err == ErrIgnoreField {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
table.AddColumn(col)
|
table.AddColumn(col)
|
||||||
|
|
||||||
} // end for
|
} // end for
|
||||||
|
|
||||||
if idFieldColName != "" && len(table.PrimaryKeys) == 0 {
|
deletedColumn := table.DeletedColumn()
|
||||||
col := table.GetColumn(idFieldColName)
|
// check columns
|
||||||
col.IsPrimaryKey = true
|
if deletedColumn != nil {
|
||||||
col.IsAutoIncrement = true
|
deletedColumn.Nullable = true
|
||||||
col.Nullable = false
|
|
||||||
table.PrimaryKeys = append(table.PrimaryKeys, col.Name)
|
|
||||||
table.AutoIncrement = col.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasCacheTag {
|
|
||||||
if parser.cacherMgr.GetDefaultCacher() != nil { // !nash! use engine's cacher if provided
|
|
||||||
//engine.logger.Info("enable cache on table:", table.Name)
|
|
||||||
parser.cacherMgr.SetCacher(table.Name, parser.cacherMgr.GetDefaultCacher())
|
|
||||||
} else {
|
|
||||||
//engine.logger.Info("enable LRU cache on table:", table.Name)
|
|
||||||
parser.cacherMgr.SetCacher(table.Name, caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if hasNoCacheTag {
|
|
||||||
//engine.logger.Info("disable cache on table:", table.Name)
|
|
||||||
parser.cacherMgr.SetCacher(table.Name, nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return table, nil
|
return table, nil
|
||||||
|
|
|
@ -14,30 +14,74 @@ import (
|
||||||
"xorm.io/xorm/schemas"
|
"xorm.io/xorm/schemas"
|
||||||
)
|
)
|
||||||
|
|
||||||
func splitTag(tag string) (tags []string) {
|
type tag struct {
|
||||||
tag = strings.TrimSpace(tag)
|
name string
|
||||||
var hasQuote = false
|
params []string
|
||||||
var lastIdx = 0
|
}
|
||||||
for i, t := range tag {
|
|
||||||
if t == '\'' {
|
func splitTag(tagStr string) ([]tag, error) {
|
||||||
hasQuote = !hasQuote
|
tagStr = strings.TrimSpace(tagStr)
|
||||||
} else if t == ' ' {
|
var (
|
||||||
if lastIdx < i && !hasQuote {
|
inQuote bool
|
||||||
tags = append(tags, strings.TrimSpace(tag[lastIdx:i]))
|
inBigQuote bool
|
||||||
lastIdx = i + 1
|
lastIdx int
|
||||||
|
curTag tag
|
||||||
|
paramStart int
|
||||||
|
tags []tag
|
||||||
|
)
|
||||||
|
for i, t := range tagStr {
|
||||||
|
switch t {
|
||||||
|
case '\'':
|
||||||
|
inQuote = !inQuote
|
||||||
|
case ' ':
|
||||||
|
if !inQuote && !inBigQuote {
|
||||||
|
if lastIdx < i {
|
||||||
|
if curTag.name == "" {
|
||||||
|
curTag.name = tagStr[lastIdx:i]
|
||||||
|
}
|
||||||
|
tags = append(tags, curTag)
|
||||||
|
lastIdx = i + 1
|
||||||
|
curTag = tag{}
|
||||||
|
} else if lastIdx == i {
|
||||||
|
lastIdx = i + 1
|
||||||
|
}
|
||||||
|
} else if inBigQuote && !inQuote {
|
||||||
|
paramStart = i + 1
|
||||||
|
}
|
||||||
|
case ',':
|
||||||
|
if !inQuote && !inBigQuote {
|
||||||
|
return nil, fmt.Errorf("comma[%d] of %s should be in quote or big quote", i, tagStr)
|
||||||
|
}
|
||||||
|
if !inQuote && inBigQuote {
|
||||||
|
curTag.params = append(curTag.params, strings.TrimSpace(tagStr[paramStart:i]))
|
||||||
|
paramStart = i + 1
|
||||||
|
}
|
||||||
|
case '(':
|
||||||
|
inBigQuote = true
|
||||||
|
if !inQuote {
|
||||||
|
curTag.name = tagStr[lastIdx:i]
|
||||||
|
paramStart = i + 1
|
||||||
|
}
|
||||||
|
case ')':
|
||||||
|
inBigQuote = false
|
||||||
|
if !inQuote {
|
||||||
|
curTag.params = append(curTag.params, tagStr[paramStart:i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if lastIdx < len(tag) {
|
if lastIdx < len(tagStr) {
|
||||||
tags = append(tags, strings.TrimSpace(tag[lastIdx:]))
|
if curTag.name == "" {
|
||||||
|
curTag.name = tagStr[lastIdx:]
|
||||||
|
}
|
||||||
|
tags = append(tags, curTag)
|
||||||
}
|
}
|
||||||
return
|
return tags, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Context represents a context for xorm tag parse.
|
// Context represents a context for xorm tag parse.
|
||||||
type Context struct {
|
type Context struct {
|
||||||
tagName string
|
tag
|
||||||
params []string
|
tagUname string
|
||||||
preTag, nextTag string
|
preTag, nextTag string
|
||||||
table *schemas.Table
|
table *schemas.Table
|
||||||
col *schemas.Column
|
col *schemas.Column
|
||||||
|
@ -76,6 +120,7 @@ var (
|
||||||
"CACHE": CacheTagHandler,
|
"CACHE": CacheTagHandler,
|
||||||
"NOCACHE": NoCacheTagHandler,
|
"NOCACHE": NoCacheTagHandler,
|
||||||
"COMMENT": CommentTagHandler,
|
"COMMENT": CommentTagHandler,
|
||||||
|
"EXTENDS": ExtendsTagHandler,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -124,6 +169,7 @@ func NotNullTagHandler(ctx *Context) error {
|
||||||
// AutoIncrTagHandler describes autoincr tag handler
|
// AutoIncrTagHandler describes autoincr tag handler
|
||||||
func AutoIncrTagHandler(ctx *Context) error {
|
func AutoIncrTagHandler(ctx *Context) error {
|
||||||
ctx.col.IsAutoIncrement = true
|
ctx.col.IsAutoIncrement = true
|
||||||
|
ctx.col.Nullable = false
|
||||||
/*
|
/*
|
||||||
if len(ctx.params) > 0 {
|
if len(ctx.params) > 0 {
|
||||||
autoStartInt, err := strconv.Atoi(ctx.params[0])
|
autoStartInt, err := strconv.Atoi(ctx.params[0])
|
||||||
|
@ -192,6 +238,7 @@ func UpdatedTagHandler(ctx *Context) error {
|
||||||
// DeletedTagHandler describes deleted tag handler
|
// DeletedTagHandler describes deleted tag handler
|
||||||
func DeletedTagHandler(ctx *Context) error {
|
func DeletedTagHandler(ctx *Context) error {
|
||||||
ctx.col.IsDeleted = true
|
ctx.col.IsDeleted = true
|
||||||
|
ctx.col.Nullable = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,41 +272,44 @@ func CommentTagHandler(ctx *Context) error {
|
||||||
|
|
||||||
// SQLTypeTagHandler describes SQL Type tag handler
|
// SQLTypeTagHandler describes SQL Type tag handler
|
||||||
func SQLTypeTagHandler(ctx *Context) error {
|
func SQLTypeTagHandler(ctx *Context) error {
|
||||||
ctx.col.SQLType = schemas.SQLType{Name: ctx.tagName}
|
ctx.col.SQLType = schemas.SQLType{Name: ctx.tagUname}
|
||||||
if strings.EqualFold(ctx.tagName, "JSON") {
|
if ctx.tagUname == "JSON" {
|
||||||
ctx.col.IsJSON = true
|
ctx.col.IsJSON = true
|
||||||
}
|
}
|
||||||
if len(ctx.params) > 0 {
|
if len(ctx.params) == 0 {
|
||||||
if ctx.tagName == schemas.Enum {
|
return nil
|
||||||
ctx.col.EnumOptions = make(map[string]int)
|
}
|
||||||
for k, v := range ctx.params {
|
|
||||||
v = strings.TrimSpace(v)
|
switch ctx.tagUname {
|
||||||
v = strings.Trim(v, "'")
|
case schemas.Enum:
|
||||||
ctx.col.EnumOptions[v] = k
|
ctx.col.EnumOptions = make(map[string]int)
|
||||||
|
for k, v := range ctx.params {
|
||||||
|
v = strings.TrimSpace(v)
|
||||||
|
v = strings.Trim(v, "'")
|
||||||
|
ctx.col.EnumOptions[v] = k
|
||||||
|
}
|
||||||
|
case schemas.Set:
|
||||||
|
ctx.col.SetOptions = make(map[string]int)
|
||||||
|
for k, v := range ctx.params {
|
||||||
|
v = strings.TrimSpace(v)
|
||||||
|
v = strings.Trim(v, "'")
|
||||||
|
ctx.col.SetOptions[v] = k
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
var err error
|
||||||
|
if len(ctx.params) == 2 {
|
||||||
|
ctx.col.Length, err = strconv.Atoi(ctx.params[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
} else if ctx.tagName == schemas.Set {
|
ctx.col.Length2, err = strconv.Atoi(ctx.params[1])
|
||||||
ctx.col.SetOptions = make(map[string]int)
|
if err != nil {
|
||||||
for k, v := range ctx.params {
|
return err
|
||||||
v = strings.TrimSpace(v)
|
|
||||||
v = strings.Trim(v, "'")
|
|
||||||
ctx.col.SetOptions[v] = k
|
|
||||||
}
|
}
|
||||||
} else {
|
} else if len(ctx.params) == 1 {
|
||||||
var err error
|
ctx.col.Length, err = strconv.Atoi(ctx.params[0])
|
||||||
if len(ctx.params) == 2 {
|
if err != nil {
|
||||||
ctx.col.Length, err = strconv.Atoi(ctx.params[0])
|
return err
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ctx.col.Length2, err = strconv.Atoi(ctx.params[1])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if len(ctx.params) == 1 {
|
|
||||||
ctx.col.Length, err = strconv.Atoi(ctx.params[0])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -289,11 +339,12 @@ func ExtendsTagHandler(ctx *Context) error {
|
||||||
}
|
}
|
||||||
for _, col := range parentTable.Columns() {
|
for _, col := range parentTable.Columns() {
|
||||||
col.FieldName = fmt.Sprintf("%v.%v", ctx.col.FieldName, col.FieldName)
|
col.FieldName = fmt.Sprintf("%v.%v", ctx.col.FieldName, col.FieldName)
|
||||||
|
col.FieldIndex = append(ctx.col.FieldIndex, col.FieldIndex...)
|
||||||
|
|
||||||
var tagPrefix = ctx.col.FieldName
|
var tagPrefix = ctx.col.FieldName
|
||||||
if len(ctx.params) > 0 {
|
if len(ctx.params) > 0 {
|
||||||
col.Nullable = isPtr
|
col.Nullable = isPtr
|
||||||
tagPrefix = ctx.params[0]
|
tagPrefix = strings.Trim(ctx.params[0], "'")
|
||||||
if col.IsPrimaryKey {
|
if col.IsPrimaryKey {
|
||||||
col.Name = ctx.col.FieldName
|
col.Name = ctx.col.FieldName
|
||||||
col.IsPrimaryKey = false
|
col.IsPrimaryKey = false
|
||||||
|
@ -315,7 +366,7 @@ func ExtendsTagHandler(ctx *Context) error {
|
||||||
default:
|
default:
|
||||||
//TODO: warning
|
//TODO: warning
|
||||||
}
|
}
|
||||||
return nil
|
return ErrIgnoreField
|
||||||
}
|
}
|
||||||
|
|
||||||
// CacheTagHandler describes cache tag handler
|
// CacheTagHandler describes cache tag handler
|
||||||
|
|
Loading…
Reference in New Issue