Compare commits

72 Commits

Author SHA1 Message Date
liuyuanchuang
7df6587fd6 chore: update docker compose 2026-03-06 11:22:41 +08:00
liuyuanchuang
94988790f8 feat: update callback url 2026-03-06 11:10:44 +08:00
liuyuanchuang
45dcef5702 feat: add proxy 2026-03-06 11:03:41 +08:00
liuyuanchuang
ed7232e5c0 feat google oauth 2026-03-06 10:28:56 +08:00
liuyuanchuang
8852ee5a3a Merge branch 'test' 2026-02-13 18:04:26 +08:00
liuyuanchuang
a7b73b0928 Merge branch 'master' of https://code.texpixel.com/YogeLiu/doc_ai_backed 2026-02-13 18:04:21 +08:00
liuyuanchuang
e35f3ed684 fix: update bucket 2026-02-12 19:45:29 +08:00
liuyuanchuang
aed09d4341 feat: update bucket 2026-02-12 19:41:53 +08:00
liuyuanchuang
a0cf063ff9 Merge branch 'master' into test 2026-02-12 19:40:51 +08:00
liuyuanchuang
323b712c18 fix: rm skd file 2026-02-12 19:40:23 +08:00
6786d174a6 Merge pull request 'feat: add mml' (#4) from test into master
Reviewed-on: #4
2026-02-05 13:48:14 +08:00
liuyuanchuang
de6b5d3960 Merge branch 'master' into test 2026-02-05 10:44:39 +08:00
liuyuanchuang
81c2767423 feat: add mml from backend 2026-02-05 10:44:11 +08:00
liuyuanchuang
a59fbd0edd refact: update script 2026-01-27 23:46:52 +08:00
liuyuanchuang
d1a56a2ab3 fix: panic 2026-01-27 23:23:42 +08:00
liuyuanchuang
41df42dea4 feat: decode uid 2026-01-27 22:28:13 +08:00
liuyuanchuang
be3e82fc2e feat: decode user_id 2026-01-27 22:26:25 +08:00
liuyuanchuang
9e01ee79f1 Merge branch 'master' into test 2026-01-27 22:22:48 +08:00
liuyuanchuang
52c9e48a0f fix: rm router db 2026-01-27 22:22:06 +08:00
liuyuanchuang
9b7657cd73 Merge branch 'master' into test 2026-01-27 22:20:27 +08:00
liuyuanchuang
a04eedc423 feat: add track point 2026-01-27 22:20:07 +08:00
liuyuanchuang
a5f1ad153e refactor: update package path 2026-01-27 21:56:21 +08:00
liuyuanchuang
db3beeddb9 Merge branch 'master' of https://code.texpixel.com/YogeLiu/doc_ai_backed into test 2026-01-27 17:40:47 +08:00
eabfd83fdf feat: add scrip; 2026-01-27 17:40:15 +08:00
97c3617731 feat: replace export url 2026-01-25 09:10:54 +08:00
ece026bea2 feat: add new path for recognize 2026-01-25 09:10:54 +08:00
b9124451d2 feat: update default init env 2026-01-25 09:08:51 +08:00
2e158d3fee feat: add new path for recognize 2025-12-31 17:53:12 +08:00
be1047618e Merge branch 'master' into test 2025-12-27 22:22:15 +08:00
3293f1f8a5 fix: downgrade error 2025-12-27 22:21:34 +08:00
ff6795b469 feat: convert markdown to mml 2025-12-27 22:06:48 +08:00
cb461f0134 feat: update req 2025-12-26 21:31:47 +08:00
7c4dfaba54 feat: modify field 2025-12-26 17:27:35 +08:00
5ee1cea0d7 feat: add gls 2025-12-26 17:11:59 +08:00
a538bd6680 fix: modify ip 2025-12-26 16:41:36 +08:00
cd221719cf fix: http req 2025-12-26 16:38:04 +08:00
d0c0d2cbc3 fix: query by task-no 2025-12-26 16:28:49 +08:00
930d782f18 feat: add api for export 2025-12-26 16:24:34 +08:00
bdd21c4b0f Merge branch 'master' into test 2025-12-26 15:48:25 +08:00
0aaafdbaa3 feat: add file export 2025-12-26 15:48:14 +08:00
68a1755a83 Merge branch 'master' into test 2025-12-25 14:02:33 +08:00
bb7403f700 feat: add baidu api 2025-12-25 14:02:29 +08:00
3a86f811d0 feat: add log for time 2025-12-23 22:32:29 +08:00
28295f825b feat: update http retry 2025-12-23 21:12:44 +08:00
e0904f5bfb feat: add mml 2025-12-20 22:57:53 +08:00
073808eb30 feat: add mml 2025-12-20 22:57:14 +08:00
7be0d705fe Merge pull request 'feat: add mathpixel' (#3) from test into master
Reviewed-on: #3
2025-12-20 22:53:23 +08:00
770c334083 fix: update app key 2025-12-20 22:48:02 +08:00
08d5e37d0e fix: udpate app_id 2025-12-20 22:15:56 +08:00
203c2b64c0 feat: add mathpixel 2025-12-20 21:42:58 +08:00
aa7fb1c7ca feat: rm uname 2025-12-19 17:06:38 +08:00
ae2b58149d Merge pull request 'test' (#2) from test into master
Reviewed-on: #2
2025-12-19 16:40:46 +08:00
9e088879c2 feat: update dockerfile 2025-12-19 16:39:35 +08:00
be00a91637 feat: check login for list 2025-12-19 13:59:47 +08:00
4bbbb99634 build: update dockerfile 2025-12-19 10:50:02 +08:00
4bb59ecf7e feat: update vlm url 2025-12-19 09:55:26 +08:00
5a1983f08b feat: update oss download url 2025-12-18 15:14:42 +08:00
8a6da5b627 feat: add list api 2025-12-18 12:39:50 +08:00
d06f2d9df1 feat: update docker 2025-12-17 21:40:18 +08:00
b1a3b7cd17 Merge pull request 'feat: add user register' (#1) from feature/user_login into master
Reviewed-on: #1
2025-12-17 20:45:41 +08:00
f0449bab25 feat: add user register 2025-12-17 20:43:08 +08:00
liuyuanchuang
f86898ae5f build: update dockerfile 2025-12-16 11:32:36 +08:00
liuyuanchuang
a9db8576eb build: update dockerfile 2025-12-16 11:24:11 +08:00
9ceb5fe92a feat: update url 2025-12-15 23:32:07 +08:00
liuyuanchuang
50922641a9 feat: update ocr model 2025-12-11 19:51:51 +08:00
liuyuanchuang
904ea3d146 feat: update token 2025-12-11 19:43:30 +08:00
liuyuanchuang
696919611c feat: use siliconflow model 2025-12-11 19:39:35 +08:00
liuyuanchuang
ea0f5d8765 refact: update dockerfile 2025-12-11 11:22:56 +08:00
0bc77f61e2 feat: update dockerfile 2025-12-10 23:17:24 +08:00
083142491f refact: update oss config 2025-12-10 22:23:05 +08:00
liuyuanchuang
4bd8cef372 refact: modify db config 2025-12-10 20:01:56 +08:00
liuyuanchuang
89b55edf9f build: rm vendor 2025-12-10 19:33:20 +08:00
2402 changed files with 2717 additions and 1049517 deletions

6
.gitignore vendored
View File

@@ -4,4 +4,8 @@
*.cursorrules *.cursorrules
*png *png
/upload /upload
document_ai texpixel
/vendor
dev_deploy.sh
speed_take.sh

View File

@@ -1,16 +1,21 @@
# Build stage # Build stage
FROM registry.cn-beijing.aliyuncs.com/bitwsd/golang AS builder FROM crpi-8s2ierii2xan4klg.cn-beijing.personal.cr.aliyuncs.com/texpixel/golang:1.20-alpine AS builder
WORKDIR /app WORKDIR /app
# Copy source code # Copy source code
COPY . . COPY . .
ENV GOPROXY=https://goproxy.cn,direct
ENV GOSUMDB=off
# Build binary # Build binary
RUN CGO_ENABLED=0 GOOS=linux go build -mod=vendor -o main ./main.go RUN go mod download && \
CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o doc_ai ./main.go
# Runtime stage # Runtime stage
FROM registry.cn-beijing.aliyuncs.com/bitwsd/alpine FROM crpi-8s2ierii2xan4klg.cn-beijing.personal.cr.aliyuncs.com/texpixel/alpine:latest
# Set timezone # Set timezone
RUN apk add --no-cache tzdata && \ RUN apk add --no-cache tzdata && \
@@ -21,7 +26,7 @@ RUN apk add --no-cache tzdata && \
WORKDIR /app WORKDIR /app
# Copy binary from builder # Copy binary from builder
COPY --from=builder /app/main . COPY --from=builder /app/doc_ai .
# Copy config files # Copy config files
COPY config/config_*.yaml ./config/ COPY config/config_*.yaml ./config/
@@ -34,7 +39,7 @@ RUN mkdir -p /data/formula && \
EXPOSE 8024 EXPOSE 8024
# Set entrypoint # Set entrypoint
ENTRYPOINT ["./main"] ENTRYPOINT ["./doc_ai"]
# Default command (can be overridden) # Default command (can be overridden)
CMD ["-env", "prod"] CMD ["-env", "prod"]

View File

@@ -1,19 +1,65 @@
package api package api
import ( import (
"gitea.com/bitwsd/document_ai/api/v1/formula" "gitea.com/texpixel/document_ai/api/v1/analytics"
"gitea.com/bitwsd/document_ai/api/v1/oss" "gitea.com/texpixel/document_ai/api/v1/formula"
"gitea.com/bitwsd/document_ai/api/v1/task" "gitea.com/texpixel/document_ai/api/v1/oss"
"gitea.com/bitwsd/document_ai/api/v1/user" "gitea.com/texpixel/document_ai/api/v1/task"
"gitea.com/texpixel/document_ai/api/v1/user"
"gitea.com/texpixel/document_ai/pkg/common"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
func SetupRouter(engine *gin.RouterGroup) { func SetupRouter(engine *gin.RouterGroup) {
v1 := engine.Group("/v1") v1 := engine.Group("/v1")
{ {
formula.SetupRouter(v1) formulaRouter := v1.Group("/formula", common.GetAuthMiddleware())
oss.SetupRouter(v1) {
task.SetupRouter(v1) endpoint := formula.NewFormulaEndpoint()
user.SetupRouter(v1) formulaRouter.POST("/recognition", endpoint.CreateTask)
formulaRouter.POST("/ai_enhance", endpoint.AIEnhanceRecognition)
formulaRouter.GET("/recognition/:task_no", endpoint.GetTaskStatus)
formulaRouter.POST("/test_process_mathpix_task", endpoint.TestProcessMathpixTask)
}
taskRouter := v1.Group("/task", common.GetAuthMiddleware())
{
endpoint := task.NewTaskEndpoint()
taskRouter.POST("/evaluate", endpoint.EvaluateTask)
taskRouter.GET("/list", common.MustAuthMiddleware(), endpoint.GetTaskList)
taskRouter.POST("/export", endpoint.ExportTask)
}
ossRouter := v1.Group("/oss", common.GetAuthMiddleware())
{
endpoint := oss.NewOSSEndpoint()
ossRouter.POST("/signature", endpoint.GetPostObjectSignature)
ossRouter.POST("/signature_url", endpoint.GetSignatureURL)
ossRouter.POST("/file/upload", endpoint.UploadFile)
}
userEndpoint := user.NewUserEndpoint()
userRouter := v1.Group("/user")
{
userRouter.POST("/sms", userEndpoint.SendVerificationCode)
userRouter.POST("/register", userEndpoint.RegisterByEmail)
userRouter.POST("/login", userEndpoint.LoginByEmail)
userRouter.GET("/oauth/google/url", userEndpoint.GetGoogleOAuthUrl)
userRouter.POST("/oauth/google/callback", userEndpoint.GoogleOAuthCallback)
}
userAuthRouter := v1.Group("/user", common.GetAuthMiddleware())
{
userAuthRouter.GET("/info", common.MustAuthMiddleware(), userEndpoint.GetUserInfo)
}
// 数据埋点路由
analyticsRouter := v1.Group("/analytics", common.GetAuthMiddleware())
{
analyticsHandler := analytics.NewAnalyticsHandler()
analyticsRouter.POST("/track", analyticsHandler.TrackEvent)
}
} }
} }

View File

@@ -0,0 +1,50 @@
package analytics
import (
"net/http"
"gitea.com/texpixel/document_ai/internal/model/analytics"
"gitea.com/texpixel/document_ai/internal/service"
"gitea.com/texpixel/document_ai/pkg/common"
"gitea.com/texpixel/document_ai/pkg/log"
"github.com/gin-gonic/gin"
)
type AnalyticsHandler struct {
analyticsService *service.AnalyticsService
}
func NewAnalyticsHandler() *AnalyticsHandler {
return &AnalyticsHandler{
analyticsService: service.NewAnalyticsService(),
}
}
// TrackEvent 记录单个事件
// @Summary 记录单个埋点事件
// @Description 记录用户行为埋点事件
// @Tags Analytics
// @Accept json
// @Produce json
// @Param request body analytics.TrackEventRequest true "事件信息"
// @Success 200 {object} common.Response
// @Router /api/v1/analytics/track [post]
func (h *AnalyticsHandler) TrackEvent(c *gin.Context) {
var req analytics.TrackEventRequest
if err := c.ShouldBindJSON(&req); err != nil {
log.Error(c.Request.Context(), "bind request failed", "error", err)
c.JSON(http.StatusOK, common.ErrorResponse(c, common.CodeParamError, "invalid request"))
return
}
userID := common.GetUserIDFromContext(c)
req.UserID = userID
if err := h.analyticsService.TrackEvent(c.Request.Context(), &req); err != nil {
log.Error(c.Request.Context(), "track event failed", "error", err)
c.JSON(http.StatusOK, common.ErrorResponse(c, common.CodeSystemError, "failed to track event"))
return
}
c.JSON(http.StatusOK, common.SuccessResponse(c, "success"))
}

View File

@@ -3,12 +3,14 @@ package formula
import ( import (
"net/http" "net/http"
"path/filepath" "path/filepath"
"strings"
"gitea.com/bitwsd/document_ai/internal/model/formula" "gitea.com/texpixel/document_ai/internal/model/formula"
"gitea.com/bitwsd/document_ai/internal/service" "gitea.com/texpixel/document_ai/internal/service"
"gitea.com/bitwsd/document_ai/internal/storage/dao" "gitea.com/texpixel/document_ai/internal/storage/dao"
"gitea.com/bitwsd/document_ai/pkg/common" "gitea.com/texpixel/document_ai/pkg/common"
"gitea.com/bitwsd/document_ai/pkg/utils" "gitea.com/texpixel/document_ai/pkg/constant"
"gitea.com/texpixel/document_ai/pkg/utils"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
@@ -36,17 +38,20 @@ func NewFormulaEndpoint() *FormulaEndpoint {
// @Router /v1/formula/recognition [post] // @Router /v1/formula/recognition [post]
func (endpoint *FormulaEndpoint) CreateTask(ctx *gin.Context) { func (endpoint *FormulaEndpoint) CreateTask(ctx *gin.Context) {
var req formula.CreateFormulaRecognitionRequest var req formula.CreateFormulaRecognitionRequest
uid := ctx.GetInt64(constant.ContextUserID)
if err := ctx.BindJSON(&req); err != nil { if err := ctx.BindJSON(&req); err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "Invalid parameters")) ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "Invalid parameters"))
return return
} }
req.UserID = uid
if !utils.InArray(req.TaskType, []string{string(dao.TaskTypeFormula), string(dao.TaskTypeFormula)}) { if !utils.InArray(req.TaskType, []string{string(dao.TaskTypeFormula), string(dao.TaskTypeFormula)}) {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "Invalid task type")) ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "Invalid task type"))
return return
} }
fileExt := filepath.Ext(req.FileName) fileExt := strings.ToLower(filepath.Ext(req.FileName))
if !utils.InArray(fileExt, []string{".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff", ".webp"}) { if !utils.InArray(fileExt, []string{".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff", ".webp"}) {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "Invalid file type")) ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "Invalid file type"))
return return
@@ -116,3 +121,20 @@ func (endpoint *FormulaEndpoint) AIEnhanceRecognition(c *gin.Context) {
c.JSON(http.StatusOK, common.SuccessResponse(c, nil)) c.JSON(http.StatusOK, common.SuccessResponse(c, nil))
} }
func (endpoint *FormulaEndpoint) TestProcessMathpixTask(c *gin.Context) {
postData := make(map[string]int)
if err := c.BindJSON(&postData); err != nil {
c.JSON(http.StatusOK, common.ErrorResponse(c, common.CodeParamError, "Invalid parameters"))
return
}
taskID := postData["task_id"]
err := endpoint.recognitionService.TestProcessMathpixTask(c, int64(taskID))
if err != nil {
c.JSON(http.StatusOK, common.ErrorResponse(c, common.CodeSystemError, err.Error()))
return
}
c.JSON(http.StatusOK, common.SuccessResponse(c, nil))
}

View File

@@ -1,12 +0,0 @@
package formula
import (
"github.com/gin-gonic/gin"
)
func SetupRouter(engine *gin.RouterGroup) {
endpoint := NewFormulaEndpoint()
engine.POST("/formula/recognition", endpoint.CreateTask)
engine.POST("/formula/ai_enhance", endpoint.AIEnhanceRecognition)
engine.GET("/formula/recognition/:task_no", endpoint.GetTaskStatus)
}

View File

@@ -5,19 +5,26 @@ import (
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"time" "time"
"gitea.com/bitwsd/document_ai/config" "gitea.com/texpixel/document_ai/config"
"gitea.com/bitwsd/document_ai/internal/storage/dao" "gitea.com/texpixel/document_ai/internal/storage/dao"
"gitea.com/bitwsd/document_ai/pkg/common" "gitea.com/texpixel/document_ai/pkg/common"
"gitea.com/bitwsd/document_ai/pkg/constant" "gitea.com/texpixel/document_ai/pkg/oss"
"gitea.com/bitwsd/document_ai/pkg/oss" "gitea.com/texpixel/document_ai/pkg/utils"
"gitea.com/bitwsd/document_ai/pkg/utils"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"gorm.io/gorm" "gorm.io/gorm"
) )
func GetPostObjectSignature(ctx *gin.Context) { type OSSEndpoint struct {
}
func NewOSSEndpoint() *OSSEndpoint {
return &OSSEndpoint{}
}
func (h *OSSEndpoint) GetPostObjectSignature(ctx *gin.Context) {
policyToken, err := oss.GetPolicyToken() policyToken, err := oss.GetPolicyToken()
if err != nil { if err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, err.Error())) ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, err.Error()))
@@ -36,8 +43,7 @@ func GetPostObjectSignature(ctx *gin.Context) {
// @Success 200 {object} common.Response{data=map[string]string{"sign_url":string, "repeat":bool, "path":string}} "Signed URL generated successfully" // @Success 200 {object} common.Response{data=map[string]string{"sign_url":string, "repeat":bool, "path":string}} "Signed URL generated successfully"
// @Failure 200 {object} common.Response "Error response" // @Failure 200 {object} common.Response "Error response"
// @Router /signature_url [get] // @Router /signature_url [get]
func GetSignatureURL(ctx *gin.Context) { func (h *OSSEndpoint) GetSignatureURL(ctx *gin.Context) {
userID := ctx.GetInt64(constant.ContextUserID)
type Req struct { type Req struct {
FileHash string `json:"file_hash" binding:"required"` FileHash string `json:"file_hash" binding:"required"`
FileName string `json:"file_name" binding:"required"` FileName string `json:"file_name" binding:"required"`
@@ -50,7 +56,7 @@ func GetSignatureURL(ctx *gin.Context) {
} }
taskDao := dao.NewRecognitionTaskDao() taskDao := dao.NewRecognitionTaskDao()
sess := dao.DB.WithContext(ctx) sess := dao.DB.WithContext(ctx)
task, err := taskDao.GetTaskByFileURL(sess, userID, req.FileHash) task, err := taskDao.GetTaskByFileURL(sess, req.FileHash)
if err != nil && err != gorm.ErrRecordNotFound { if err != nil && err != gorm.ErrRecordNotFound {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeDBError, "failed to get task")) ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeDBError, "failed to get task"))
return return
@@ -59,7 +65,7 @@ func GetSignatureURL(ctx *gin.Context) {
ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, gin.H{"sign_url": "", "repeat": true, "path": task.FileURL})) ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, gin.H{"sign_url": "", "repeat": true, "path": task.FileURL}))
return return
} }
extend := filepath.Ext(req.FileName) extend := strings.ToLower(filepath.Ext(req.FileName))
if extend == "" { if extend == "" {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "invalid file name")) ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, "invalid file name"))
return return
@@ -77,7 +83,7 @@ func GetSignatureURL(ctx *gin.Context) {
ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, gin.H{"sign_url": url, "repeat": false, "path": path})) ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, gin.H{"sign_url": url, "repeat": false, "path": path}))
} }
func UploadFile(ctx *gin.Context) { func (h *OSSEndpoint) UploadFile(ctx *gin.Context) {
if err := os.MkdirAll(config.GlobalConfig.UploadDir, 0755); err != nil { if err := os.MkdirAll(config.GlobalConfig.UploadDir, 0755); err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, "Failed to create upload directory")) ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, "Failed to create upload directory"))
return return

View File

@@ -1,12 +0,0 @@
package oss
import "github.com/gin-gonic/gin"
func SetupRouter(parent *gin.RouterGroup) {
router := parent.Group("oss")
{
router.POST("/signature", GetPostObjectSignature)
router.POST("/signature_url", GetSignatureURL)
router.POST("/file/upload", UploadFile)
}
}

View File

@@ -3,10 +3,10 @@ package task
import ( import (
"net/http" "net/http"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/internal/model/task"
"gitea.com/bitwsd/document_ai/internal/model/task" "gitea.com/texpixel/document_ai/internal/service"
"gitea.com/bitwsd/document_ai/internal/service" "gitea.com/texpixel/document_ai/pkg/common"
"gitea.com/bitwsd/document_ai/pkg/common" "gitea.com/texpixel/document_ai/pkg/log"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
@@ -43,6 +43,8 @@ func (h *TaskEndpoint) GetTaskList(c *gin.Context) {
return return
} }
req.UserID = common.GetUserIDFromContext(c)
if req.Page <= 0 { if req.Page <= 0 {
req.Page = 1 req.Page = 1
} }
@@ -59,3 +61,31 @@ func (h *TaskEndpoint) GetTaskList(c *gin.Context) {
c.JSON(http.StatusOK, common.SuccessResponse(c, resp)) c.JSON(http.StatusOK, common.SuccessResponse(c, resp))
} }
func (h *TaskEndpoint) ExportTask(c *gin.Context) {
var req task.ExportTaskRequest
if err := c.ShouldBindJSON(&req); err != nil {
log.Error(c, "func", "ExportTask", "msg", "Invalid parameters", "error", err)
c.JSON(http.StatusOK, common.ErrorResponse(c, common.CodeParamError, "Invalid parameters"))
return
}
fileData, contentType, err := h.taskService.ExportTask(c, &req)
if err != nil {
c.JSON(http.StatusOK, common.ErrorResponse(c, common.CodeSystemError, "导出任务失败"))
return
}
// set filename based on export type
var filename string
switch req.Type {
case "pdf":
filename = "texpixel_export.pdf"
case "docx":
filename = "texpixel_export.docx"
default:
filename = "texpixel_export"
}
c.Header("Content-Disposition", "attachment; filename="+filename)
c.Data(http.StatusOK, contentType, fileData)
}

View File

@@ -1,11 +0,0 @@
package task
import (
"github.com/gin-gonic/gin"
)
func SetupRouter(engine *gin.RouterGroup) {
endpoint := NewTaskEndpoint()
engine.POST("/task/evaluate", endpoint.EvaluateTask)
engine.GET("/task/list", endpoint.GetTaskList)
}

View File

@@ -1,15 +1,17 @@
package user package user
import ( import (
"fmt"
"net/http" "net/http"
"net/url"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/config"
"gitea.com/bitwsd/document_ai/config" model "gitea.com/texpixel/document_ai/internal/model/user"
model "gitea.com/bitwsd/document_ai/internal/model/user" "gitea.com/texpixel/document_ai/internal/service"
"gitea.com/bitwsd/document_ai/internal/service" "gitea.com/texpixel/document_ai/pkg/common"
"gitea.com/bitwsd/document_ai/pkg/common" "gitea.com/texpixel/document_ai/pkg/constant"
"gitea.com/bitwsd/document_ai/pkg/constant" "gitea.com/texpixel/document_ai/pkg/jwt"
"gitea.com/bitwsd/document_ai/pkg/jwt" "gitea.com/texpixel/document_ai/pkg/log"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
@@ -55,12 +57,15 @@ func (h *UserEndpoint) LoginByPhoneCode(ctx *gin.Context) {
if config.GlobalConfig.Server.IsDebug() { if config.GlobalConfig.Server.IsDebug() {
uid := 1 uid := 1
token, err := jwt.CreateToken(jwt.User{UserId: int64(uid)}) tokenResult, err := jwt.CreateToken(jwt.User{UserId: int64(uid)})
if err != nil { if err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeUnauthorized, common.CodeUnauthorizedMsg)) ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeUnauthorized, common.CodeUnauthorizedMsg))
return return
} }
ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, model.PhoneLoginResponse{Token: token})) ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, model.PhoneLoginResponse{
Token: tokenResult.Token,
ExpiresAt: tokenResult.ExpiresAt,
}))
return return
} }
@@ -70,13 +75,16 @@ func (h *UserEndpoint) LoginByPhoneCode(ctx *gin.Context) {
return return
} }
token, err := jwt.CreateToken(jwt.User{UserId: uid}) tokenResult, err := jwt.CreateToken(jwt.User{UserId: uid})
if err != nil { if err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeUnauthorized, common.CodeUnauthorizedMsg)) ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeUnauthorized, common.CodeUnauthorizedMsg))
return return
} }
ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, model.PhoneLoginResponse{Token: token})) ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, model.PhoneLoginResponse{
Token: tokenResult.Token,
ExpiresAt: tokenResult.ExpiresAt,
}))
} }
func (h *UserEndpoint) GetUserInfo(ctx *gin.Context) { func (h *UserEndpoint) GetUserInfo(ctx *gin.Context) {
@@ -103,3 +111,129 @@ func (h *UserEndpoint) GetUserInfo(ctx *gin.Context) {
Status: status, Status: status,
})) }))
} }
func (h *UserEndpoint) RegisterByEmail(ctx *gin.Context) {
req := model.EmailRegisterRequest{}
if err := ctx.ShouldBindJSON(&req); err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, common.CodeParamErrorMsg))
return
}
uid, err := h.userService.RegisterByEmail(ctx, req.Email, req.Password)
if err != nil {
if bizErr, ok := err.(*common.BusinessError); ok {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, int(bizErr.Code), bizErr.Message))
return
}
log.Error(ctx, "func", "RegisterByEmail", "msg", "注册失败", "error", err)
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, common.CodeSystemErrorMsg))
return
}
tokenResult, err := jwt.CreateToken(jwt.User{UserId: uid})
if err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, common.CodeSystemErrorMsg))
return
}
ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, model.EmailRegisterResponse{
Token: tokenResult.Token,
ExpiresAt: tokenResult.ExpiresAt,
}))
}
func (h *UserEndpoint) LoginByEmail(ctx *gin.Context) {
req := model.EmailLoginRequest{}
if err := ctx.ShouldBindJSON(&req); err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, common.CodeParamErrorMsg))
return
}
uid, err := h.userService.LoginByEmail(ctx, req.Email, req.Password)
if err != nil {
if bizErr, ok := err.(*common.BusinessError); ok {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, int(bizErr.Code), bizErr.Message))
return
}
log.Error(ctx, "func", "LoginByEmail", "msg", "登录失败", "error", err)
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, common.CodeSystemErrorMsg))
return
}
tokenResult, err := jwt.CreateToken(jwt.User{UserId: uid})
if err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, common.CodeSystemErrorMsg))
return
}
ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, model.EmailLoginResponse{
Token: tokenResult.Token,
ExpiresAt: tokenResult.ExpiresAt,
}))
}
func (h *UserEndpoint) GetGoogleOAuthUrl(ctx *gin.Context) {
req := model.GoogleAuthUrlRequest{}
if err := ctx.ShouldBindQuery(&req); err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, common.CodeParamErrorMsg))
return
}
googleConfig := config.GlobalConfig.Google
if googleConfig.ClientID == "" {
log.Error(ctx, "func", "GetGoogleOAuthUrl", "msg", "Google OAuth not configured")
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, common.CodeSystemErrorMsg))
return
}
authURL := fmt.Sprintf(
"https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&redirect_uri=%s&response_type=code&scope=openid%%20email%%20profile&state=%s",
url.QueryEscape(googleConfig.ClientID),
url.QueryEscape(req.RedirectURI),
url.QueryEscape(req.State),
)
ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, model.GoogleAuthUrlResponse{
AuthURL: authURL,
}))
}
func (h *UserEndpoint) GoogleOAuthCallback(ctx *gin.Context) {
req := model.GoogleOAuthCallbackRequest{}
if err := ctx.ShouldBindJSON(&req); err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeParamError, common.CodeParamErrorMsg))
return
}
googleConfig := config.GlobalConfig.Google
if googleConfig.ClientID == "" || googleConfig.ClientSecret == "" {
log.Error(ctx, "func", "GoogleOAuthCallback", "msg", "Google OAuth not configured")
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, common.CodeSystemErrorMsg))
return
}
userInfo, err := h.userService.ExchangeGoogleCodeAndGetUserInfo(ctx, googleConfig.ClientID, googleConfig.ClientSecret, req.Code, req.RedirectURI)
if err != nil {
log.Error(ctx, "func", "GoogleOAuthCallback", "msg", "exchange code failed", "error", err)
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, common.CodeSystemErrorMsg))
return
}
uid, err := h.userService.FindOrCreateGoogleUser(ctx, userInfo)
if err != nil {
log.Error(ctx, "func", "GoogleOAuthCallback", "msg", "find or create user failed", "error", err)
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeSystemError, common.CodeSystemErrorMsg))
return
}
tokenResult, err := jwt.CreateToken(jwt.User{UserId: uid})
if err != nil {
ctx.JSON(http.StatusOK, common.ErrorResponse(ctx, common.CodeUnauthorized, common.CodeUnauthorizedMsg))
return
}
ctx.JSON(http.StatusOK, common.SuccessResponse(ctx, model.GoogleOAuthCallbackResponse{
Token: tokenResult.Token,
ExpiresAt: tokenResult.ExpiresAt,
}))
}

View File

@@ -1,16 +0,0 @@
package user
import (
"gitea.com/bitwsd/document_ai/pkg/common"
"github.com/gin-gonic/gin"
)
func SetupRouter(router *gin.RouterGroup) {
userEndpoint := NewUserEndpoint()
userRouter := router.Group("/user")
{
userRouter.POST("/get/sms", userEndpoint.SendVerificationCode)
userRouter.POST("/login/phone", userEndpoint.LoginByPhoneCode)
userRouter.GET("/info", common.GetAuthMiddleware(), userEndpoint.GetUserInfo)
}
}

73
cmd/migrate/README.md Normal file
View File

@@ -0,0 +1,73 @@
# 数据迁移工具
用于将测试数据库的数据迁移到生产数据库避免ID冲突使用事务确保数据一致性。
## 功能特性
- ✅ 自动避免ID冲突使用数据库自增ID
- ✅ 使用事务确保每个任务和结果数据的一致性
- ✅ 自动跳过已存在的任务基于task_uuid
- ✅ 保留原始时间戳
- ✅ 处理NULL值
- ✅ 详细的日志输出和统计信息
## 使用方法
### 基本用法
```bash
# 从dev环境迁移到prod环境
go run cmd/migrate/main.go -test-env=dev -prod-env=prod
# 从prod环境迁移到dev环境测试反向迁移
go run cmd/migrate/main.go -test-env=prod -prod-env=dev
```
### 参数说明
- `-test-env`: 测试环境配置文件名dev/prod默认值dev
- `-prod-env`: 生产环境配置文件名dev/prod默认值prod
### 编译后使用
```bash
# 编译
go build -o migrate cmd/migrate/main.go
# 运行
./migrate -test-env=dev -prod-env=prod
```
## 工作原理
1. **连接数据库**:同时连接测试数据库和生产数据库
2. **读取数据**从测试数据库读取所有任务和结果数据LEFT JOIN
3. **检查重复**:基于`task_uuid`检查生产数据库中是否已存在
4. **事务迁移**:为每个任务创建独立事务:
- 创建任务记录自动生成新ID
- 如果存在结果数据创建结果记录关联新任务ID
- 提交事务或回滚
5. **统计报告**:输出迁移统计信息
## 注意事项
1. **配置文件**:确保`config/config_dev.yaml``config/config_prod.yaml`存在且配置正确
2. **数据库权限**:确保数据库用户有读写权限
3. **网络连接**:确保能同时连接到两个数据库
4. **数据备份**:迁移前建议备份生产数据库
5. **ID冲突**脚本会自动处理ID冲突使用数据库自增ID不会覆盖现有数据
## 输出示例
```
从测试数据库读取到 100 条任务记录
[1/100] 创建任务成功: task_uuid=xxx, 新ID=1001
[1/100] 创建结果成功: task_id=1001
[2/100] 跳过已存在的任务: task_uuid=yyy, id=1002
...
迁移完成统计:
成功: 95 条
跳过: 3 条
失败: 2 条
数据迁移完成!
```

255
cmd/migrate/main.go Normal file
View File

@@ -0,0 +1,255 @@
package main
import (
"context"
"flag"
"fmt"
"log"
"time"
"gitea.com/texpixel/document_ai/config"
"gitea.com/texpixel/document_ai/internal/storage/dao"
"github.com/spf13/viper"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
func main() {
// 解析命令行参数
testEnv := flag.String("test-env", "dev", "测试环境配置 (dev/prod)")
prodEnv := flag.String("prod-env", "prod", "生产环境配置 (dev/prod)")
flag.Parse()
// 加载测试环境配置
testConfigPath := fmt.Sprintf("./config/config_%s.yaml", *testEnv)
testConfig, err := loadDatabaseConfig(testConfigPath)
if err != nil {
log.Fatalf("加载测试环境配置失败: %v", err)
}
// 连接测试数据库
testDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Asia%%2FShanghai",
testConfig.Username, testConfig.Password, testConfig.Host, testConfig.Port, testConfig.DBName)
testDB, err := gorm.Open(mysql.Open(testDSN), &gorm.Config{
Logger: logger.Default.LogMode(logger.Info),
})
if err != nil {
log.Fatalf("连接测试数据库失败: %v", err)
}
// 加载生产环境配置
prodConfigPath := fmt.Sprintf("./config/config_%s.yaml", *prodEnv)
prodConfig, err := loadDatabaseConfig(prodConfigPath)
if err != nil {
log.Fatalf("加载生产环境配置失败: %v", err)
}
// 连接生产数据库
prodDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Asia%%2FShanghai",
prodConfig.Username, prodConfig.Password, prodConfig.Host, prodConfig.Port, prodConfig.DBName)
prodDB, err := gorm.Open(mysql.Open(prodDSN), &gorm.Config{
Logger: logger.Default.LogMode(logger.Info),
})
if err != nil {
log.Fatalf("连接生产数据库失败: %v", err)
}
// 执行迁移
if err := migrateData(testDB, prodDB); err != nil {
log.Fatalf("数据迁移失败: %v", err)
}
log.Println("数据迁移完成!")
}
func migrateData(testDB, prodDB *gorm.DB) error {
_ = context.Background() // 保留以备将来使用
// 从测试数据库读取所有任务数据(包含结果)
type TaskWithResult struct {
// Task 字段
TaskID int64 `gorm:"column:id"`
UserID int64 `gorm:"column:user_id"`
TaskUUID string `gorm:"column:task_uuid"`
FileName string `gorm:"column:file_name"`
FileHash string `gorm:"column:file_hash"`
FileURL string `gorm:"column:file_url"`
TaskType string `gorm:"column:task_type"`
Status int `gorm:"column:status"`
CompletedAt time.Time `gorm:"column:completed_at"`
Remark string `gorm:"column:remark"`
IP string `gorm:"column:ip"`
TaskCreatedAt time.Time `gorm:"column:created_at"`
TaskUpdatedAt time.Time `gorm:"column:updated_at"`
// Result 字段
ResultID *int64 `gorm:"column:result_id"`
ResultTaskID *int64 `gorm:"column:result_task_id"`
ResultTaskType *string `gorm:"column:result_task_type"`
Latex *string `gorm:"column:latex"`
Markdown *string `gorm:"column:markdown"`
MathML *string `gorm:"column:mathml"`
ResultCreatedAt *time.Time `gorm:"column:result_created_at"`
ResultUpdatedAt *time.Time `gorm:"column:result_updated_at"`
}
var tasksWithResults []TaskWithResult
query := `
SELECT
t.id,
t.user_id,
t.task_uuid,
t.file_name,
t.file_hash,
t.file_url,
t.task_type,
t.status,
t.completed_at,
t.remark,
t.ip,
t.created_at,
t.updated_at,
r.id as result_id,
r.task_id as result_task_id,
r.task_type as result_task_type,
r.latex,
r.markdown,
r.mathml,
r.created_at as result_created_at,
r.updated_at as result_updated_at
FROM recognition_tasks t
LEFT JOIN recognition_results r ON t.id = r.task_id
ORDER BY t.id
`
if err := testDB.Raw(query).Scan(&tasksWithResults).Error; err != nil {
return fmt.Errorf("读取测试数据失败: %v", err)
}
log.Printf("从测试数据库读取到 %d 条任务记录", len(tasksWithResults))
successCount := 0
skipCount := 0
errorCount := 0
// 为每个任务使用独立事务,确保单个任务失败不影响其他任务
for i, item := range tasksWithResults {
// 开始事务
tx := prodDB.Begin()
// 检查生产数据库中是否已存在相同的 task_uuid
var existingTask dao.RecognitionTask
err := tx.Where("task_uuid = ?", item.TaskUUID).First(&existingTask).Error
if err == nil {
log.Printf("[%d/%d] 跳过已存在的任务: task_uuid=%s, id=%d", i+1, len(tasksWithResults), item.TaskUUID, existingTask.ID)
tx.Rollback()
skipCount++
continue
}
if err != gorm.ErrRecordNotFound {
log.Printf("[%d/%d] 检查任务是否存在时出错: task_uuid=%s, error=%v", i+1, len(tasksWithResults), item.TaskUUID, err)
tx.Rollback()
errorCount++
continue
}
// 创建新任务不指定ID让数据库自动生成
newTask := &dao.RecognitionTask{
UserID: item.UserID,
TaskUUID: item.TaskUUID,
FileName: item.FileName,
FileHash: item.FileHash,
FileURL: item.FileURL,
TaskType: dao.TaskType(item.TaskType),
Status: dao.TaskStatus(item.Status),
CompletedAt: item.CompletedAt,
Remark: item.Remark,
IP: item.IP,
}
// 保留原始时间戳
newTask.CreatedAt = item.TaskCreatedAt
newTask.UpdatedAt = item.TaskUpdatedAt
if err := tx.Create(newTask).Error; err != nil {
log.Printf("[%d/%d] 创建任务失败: task_uuid=%s, error=%v", i+1, len(tasksWithResults), item.TaskUUID, err)
tx.Rollback()
errorCount++
continue
}
log.Printf("[%d/%d] 创建任务成功: task_uuid=%s, 新ID=%d", i+1, len(tasksWithResults), item.TaskUUID, newTask.ID)
// 如果有结果数据,创建结果记录
if item.ResultID != nil {
// 处理可能为NULL的字段
latex := ""
if item.Latex != nil {
latex = *item.Latex
}
markdown := ""
if item.Markdown != nil {
markdown = *item.Markdown
}
mathml := ""
if item.MathML != nil {
mathml = *item.MathML
}
newResult := dao.RecognitionResult{
TaskID: newTask.ID, // 使用新任务的ID
TaskType: dao.TaskType(item.TaskType),
Latex: latex,
Markdown: markdown,
MathML: mathml,
}
// 保留原始时间戳
if item.ResultCreatedAt != nil {
newResult.CreatedAt = *item.ResultCreatedAt
}
if item.ResultUpdatedAt != nil {
newResult.UpdatedAt = *item.ResultUpdatedAt
}
if err := tx.Create(&newResult).Error; err != nil {
log.Printf("[%d/%d] 创建结果失败: task_id=%d, error=%v", i+1, len(tasksWithResults), newTask.ID, err)
tx.Rollback() // 回滚整个事务(包括任务)
errorCount++
continue
}
log.Printf("[%d/%d] 创建结果成功: task_id=%d", i+1, len(tasksWithResults), newTask.ID)
}
// 提交事务
if err := tx.Commit().Error; err != nil {
log.Printf("[%d/%d] 提交事务失败: task_uuid=%s, error=%v", i+1, len(tasksWithResults), item.TaskUUID, err)
errorCount++
continue
}
successCount++
}
log.Printf("迁移完成统计:")
log.Printf(" 成功: %d 条", successCount)
log.Printf(" 跳过: %d 条", skipCount)
log.Printf(" 失败: %d 条", errorCount)
return nil
}
// loadDatabaseConfig 从配置文件加载数据库配置
func loadDatabaseConfig(configPath string) (config.DatabaseConfig, error) {
v := viper.New()
v.SetConfigFile(configPath)
if err := v.ReadInConfig(); err != nil {
return config.DatabaseConfig{}, err
}
var dbConfig config.DatabaseConfig
if err := v.UnmarshalKey("database", &dbConfig); err != nil {
return config.DatabaseConfig{}, err
}
return dbConfig, nil
}

View File

@@ -1,18 +1,37 @@
package config package config
import ( import (
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/pkg/log"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
type Config struct { type Config struct {
Log log.LogConfig `mapstructure:"log"` Log log.LogConfig `mapstructure:"log"`
Server ServerConfig `mapstructure:"server"` Server ServerConfig `mapstructure:"server"`
Database DatabaseConfig `mapstructure:"database"` Database DatabaseConfig `mapstructure:"database"`
Redis RedisConfig `mapstructure:"redis"` Redis RedisConfig `mapstructure:"redis"`
UploadDir string `mapstructure:"upload_dir"` UploadDir string `mapstructure:"upload_dir"`
Limit LimitConfig `mapstructure:"limit"` Limit LimitConfig `mapstructure:"limit"`
Aliyun AliyunConfig `mapstructure:"aliyun"` Aliyun AliyunConfig `mapstructure:"aliyun"`
Mathpix MathpixConfig `mapstructure:"mathpix"`
BaiduOCR BaiduOCRConfig `mapstructure:"baidu_ocr"`
Google GoogleOAuthConfig `mapstructure:"google"`
}
type BaiduOCRConfig struct {
Token string `mapstructure:"token"`
}
type GoogleOAuthConfig struct {
ClientID string `mapstructure:"client_id"`
ClientSecret string `mapstructure:"client_secret"`
RedirectURI string `mapstructure:"redirect_uri"`
Proxy string `mapstructure:"proxy"`
}
type MathpixConfig struct {
AppID string `mapstructure:"app_id"`
AppKey string `mapstructure:"app_key"`
} }
type LimitConfig struct { type LimitConfig struct {

View File

@@ -4,16 +4,16 @@ server:
database: database:
driver: mysql driver: mysql
host: 182.92.150.161 host: localhost
port: 3006 port: 3006
username: root username: root
password: yoge@coder%%%123321! password: texpixel#pwd123!
dbname: doc_ai dbname: doc_ai
max_idle: 10 max_idle: 10
max_open: 100 max_open: 100
redis: redis:
addr: 182.92.150.161:6379 addr: localhost:6079
password: yoge@123321! password: yoge@123321!
db: 0 db: 0
@@ -22,7 +22,7 @@ limit:
log: log:
appName: document_ai appName: document_ai
level: info # debug, info, warn, error level: info
format: console # json, console format: console # json, console
outputPath: ./logs/app.log # 日志文件路径 outputPath: ./logs/app.log # 日志文件路径
maxSize: 2 # 单个日志文件最大尺寸单位MB maxSize: 2 # 单个日志文件最大尺寸单位MB
@@ -30,7 +30,6 @@ log:
maxBackups: 1 # 保留的旧日志文件最大数量 maxBackups: 1 # 保留的旧日志文件最大数量
compress: false # 是否压缩旧日志 compress: false # 是否压缩旧日志
aliyun: aliyun:
sms: sms:
access_key_id: "LTAI5tB9ur4ExCF4dYPq7hLz" access_key_id: "LTAI5tB9ur4ExCF4dYPq7hLz"
@@ -39,8 +38,21 @@ aliyun:
template_code: "SMS_291510729" template_code: "SMS_291510729"
oss: oss:
endpoint: oss-cn-beijing.aliyuncs.com endpoint: static.texpixel.com
inner_endpoint: oss-cn-beijing-internal.aliyuncs.com inner_endpoint: oss-cn-beijing-internal.aliyuncs.com
access_key_id: LTAI5tKogxeiBb4gJGWEePWN access_key_id: LTAI5t8qXhow6NCdYDtu1saF
access_key_secret: l4oCxtt5iLSQ1DAs40guTzKUfrxXwq access_key_secret: qZ2SwYsNCEBckCVSOszH31yYwXU44A
bucket_name: bitwsd-doc-ai bucket_name: texpixel-doc1
mathpix:
app_id: "ocr_eede6f_ea9b5c"
app_key: "fb72d251e33ac85c929bfd4eec40d78368d08d82fb2ee1cffb04a8bb967d1db5"
baidu_ocr:
token: "e3a47bd2438f1f38840c203fc5939d17a54482d1"
google:
client_id: "404402221037-nqdsk11bkpk5a7oh396mrg1ieh28u6q1.apps.googleusercontent.com"
client_secret: "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"
redirect_uri: "https://app.cloud.texpixel.com:10443/auth/google/callback"
proxy: "http://localhost:7890"

View File

@@ -4,21 +4,21 @@ server:
database: database:
driver: mysql driver: mysql
host: rm-bp1uh3e1qop18gz4wto.mysql.rds.aliyuncs.com host: 172.31.134.12
port: 3306 port: 3006
username: root username: root
password: bitwsdttestESAadb12@3341 password: yoge@coder%%%123321!
dbname: doc_ai dbname: doc_ai
max_idle: 10 max_idle: 10
max_open: 100 max_open: 30
redis: redis:
addr: 172.31.32.138:6379 addr: 172.31.134.12:6399
password: bitwsd@8912WE! password: bitwsd@8912WE!
db: 0 db: 0
limit: limit:
formula_recognition: 2 formula_recognition: 10
log: log:
appName: document_ai appName: document_ai
@@ -38,8 +38,21 @@ aliyun:
template_code: "SMS_291510729" template_code: "SMS_291510729"
oss: oss:
endpoint: oss-cn-beijing.aliyuncs.com endpoint: static.texpixel.com
inner_endpoint: oss-cn-beijing-internal.aliyuncs.com inner_endpoint: oss-cn-beijing-internal.aliyuncs.com
access_key_id: LTAI5tKogxeiBb4gJGWEePWN access_key_id: LTAI5t8qXhow6NCdYDtu1saF
access_key_secret: l4oCxtt5iLSQ1DAs40guTzKUfrxXwq access_key_secret: qZ2SwYsNCEBckCVSOszH31yYwXU44A
bucket_name: bitwsd-doc-ai bucket_name: texpixel-doc1
mathpix:
app_id: "ocr_eede6f_ea9b5c"
app_key: "fb72d251e33ac85c929bfd4eec40d78368d08d82fb2ee1cffb04a8bb967d1db5"
baidu_ocr:
token: "e3a47bd2438f1f38840c203fc5939d17a54482d1"
google:
client_id: "404402221037-nqdsk11bkpk5a7oh396mrg1ieh28u6q1.apps.googleusercontent.com"
client_secret: "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"
redirect_uri: "https://texpixel.com/auth/google/callback"
proxy: "http://100.115.184.74:7890"

13
deploy_dev.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
git push origin test
ssh ubuntu << 'ENDSSH'
cd /home/yoge/Dev/doc_ai_backed
git checkout test
git pull origin test
docker compose -f docker-compose.infra.yml up -d
docker compose down
docker image rm doc_ai_backed-doc_ai:latest
docker compose up -d
ENDSSH

10
deploy_prod.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/bash
docker build -t crpi-8s2ierii2xan4klg.cn-beijing.personal.cr.aliyuncs.com/texpixel/doc_ai_backend:latest . && docker push crpi-8s2ierii2xan4klg.cn-beijing.personal.cr.aliyuncs.com/texpixel/doc_ai_backend:latest
ssh ecs << 'ENDSSH'
docker stop doc_ai doc_ai_backend 2>/dev/null || true
docker rm doc_ai doc_ai_backend 2>/dev/null || true
docker pull crpi-8s2ierii2xan4klg.cn-beijing.personal.cr.aliyuncs.com/texpixel/doc_ai_backend:latest
docker run -d --name doc_ai -p 8024:8024 --restart unless-stopped crpi-8s2ierii2xan4klg.cn-beijing.personal.cr.aliyuncs.com/texpixel/doc_ai_backend:latest -env=prod
ENDSSH

32
docker-compose.infra.yml Normal file
View File

@@ -0,0 +1,32 @@
services:
mysql:
image: mysql:8.0
container_name: mysql
environment:
MYSQL_ROOT_PASSWORD: texpixel#pwd123!
MYSQL_DATABASE: doc_ai
MYSQL_USER: texpixel
MYSQL_PASSWORD: texpixel#pwd123!
ports:
- "3006:3306"
volumes:
- mysql_data:/var/lib/mysql
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-uroot", "-ptexpixel#pwd123!"]
interval: 5s
timeout: 5s
retries: 10
start_period: 30s
restart: always
redis:
image: redis:latest
container_name: redis
command: redis-server --requirepass "yoge@123321!"
ports:
- "6079:6379"
restart: always
volumes:
mysql_data:
driver: local

View File

@@ -1,36 +1,10 @@
version: '3.8'
services: services:
mysql: doc_ai:
image: mysql:8.0 build: .
container_name: mysql container_name: doc_ai
environment: network_mode: host
MYSQL_ROOT_PASSWORD: 123456 # 设置root用户密码
MYSQL_DATABASE: document_ai # 设置默认数据库名
MYSQL_USER: bitwsd_document # 设置数据库用户名
MYSQL_PASSWORD: 123456 # 设置数据库用户密码
ports:
- "3306:3306" # 映射宿主机的3306端口到容器内的3306
volumes: volumes:
- mysql_data:/var/lib/mysql # 持久化MySQL数据 - ./config:/app/config
networks: - ./logs:/app/logs
- backend command: ["-env", "dev"]
restart: always restart: always
redis:
image: redis:latest
container_name: redis
ports:
- "6379:6379" # 映射宿主机的6379端口到容器内的6379
networks:
- backend
restart: always
volumes:
mysql_data:
# 持久化MySQL数据卷
driver: local
networks:
backend:
driver: bridge

19
go.mod
View File

@@ -1,9 +1,8 @@
module gitea.com/bitwsd/document_ai module gitea.com/texpixel/document_ai
go 1.20 go 1.20
require ( require (
gitea.com/bitwsd/core v0.0.0-20241128075635-8d72a929b914
github.com/alibabacloud-go/darabonba-openapi v0.2.1 github.com/alibabacloud-go/darabonba-openapi v0.2.1
github.com/alibabacloud-go/dysmsapi-20170525/v2 v2.0.18 github.com/alibabacloud-go/dysmsapi-20170525/v2 v2.0.18
github.com/alibabacloud-go/tea v1.1.19 github.com/alibabacloud-go/tea v1.1.19
@@ -12,15 +11,21 @@ require (
github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/gin-gonic/gin v1.10.0 github.com/gin-gonic/gin v1.10.0
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/jtolds/gls v4.20.0+incompatible
github.com/redis/go-redis/v9 v9.7.0 github.com/redis/go-redis/v9 v9.7.0
github.com/rs/zerolog v1.33.0
github.com/spf13/viper v1.19.0 github.com/spf13/viper v1.19.0
golang.org/x/crypto v0.23.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gorm.io/datatypes v1.2.7
gorm.io/driver/mysql v1.5.7 gorm.io/driver/mysql v1.5.7
gorm.io/gorm v1.25.12 gorm.io/gorm v1.30.0
) )
require github.com/go-sql-driver/mysql v1.7.0 // indirect require github.com/go-sql-driver/mysql v1.8.1 // indirect
require ( require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect
github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 // indirect github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 // indirect
github.com/alibabacloud-go/endpoint-util v1.1.0 // indirect github.com/alibabacloud-go/endpoint-util v1.1.0 // indirect
@@ -41,6 +46,7 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.20.0 // indirect github.com/go-playground/validator/v10 v10.20.0 // indirect
github.com/goccy/go-json v0.10.2 // indirect github.com/goccy/go-json v0.10.2 // indirect
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect github.com/jinzhu/now v1.1.5 // indirect
@@ -54,7 +60,6 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/rs/zerolog v1.33.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect
@@ -68,14 +73,12 @@ require (
go.uber.org/atomic v1.9.0 // indirect go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect
golang.org/x/arch v0.8.0 // indirect golang.org/x/arch v0.8.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/net v0.25.0 // indirect golang.org/x/net v0.25.0 // indirect
golang.org/x/sys v0.20.0 // indirect golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect golang.org/x/text v0.20.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/protobuf v1.34.1 // indirect google.golang.org/protobuf v1.34.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

41
go.sum
View File

@@ -1,5 +1,5 @@
gitea.com/bitwsd/core v0.0.0-20241128075635-8d72a929b914 h1:3aRCeiuq/PWMr2yjEN9Y5NusfmpdMKiO4i/5tM5qc34= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
gitea.com/bitwsd/core v0.0.0-20241128075635-8d72a929b914/go.mod h1:hbEUo3t/AFGCnQbxwdG4oiw2IHdlRgK02cqd0yicP1Y= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo=
github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc=
github.com/alibabacloud-go/darabonba-openapi v0.1.18/go.mod h1:PB4HffMhJVmAgNKNq3wYbTUlFvPgxJpTzd1F5pTuUsc= github.com/alibabacloud-go/darabonba-openapi v0.1.18/go.mod h1:PB4HffMhJVmAgNKNq3wYbTUlFvPgxJpTzd1F5pTuUsc=
@@ -33,9 +33,7 @@ github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9
github.com/aliyun/credentials-go v1.1.2 h1:qU1vwGIBb3UJ8BwunHDRFtAhS6jnQLnde/yk0+Ih2GY= github.com/aliyun/credentials-go v1.1.2 h1:qU1vwGIBb3UJ8BwunHDRFtAhS6jnQLnde/yk0+Ih2GY=
github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
@@ -53,13 +51,11 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
@@ -69,27 +65,33 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
@@ -97,17 +99,16 @@ github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
@@ -118,6 +119,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -134,11 +137,9 @@ github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
@@ -209,6 +210,7 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -221,8 +223,8 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -247,10 +249,15 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/datatypes v1.2.7 h1:ww9GAhF1aGXZY3EB3cJPJ7//JiuQo7DlQA7NNlVaTdk=
gorm.io/datatypes v1.2.7/go.mod h1:M2iO+6S3hhi4nAyYe444Pcb0dcIiOMJ7QHaUXxyiNZY=
gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo= gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo=
gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM= gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU=
gorm.io/driver/sqlserver v1.6.0 h1:VZOBQVsVhkHU/NzNhRJKoANt5pZGQAS1Bwc6m6dgfnc=
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -0,0 +1,34 @@
package analytics
import "time"
// TrackEventRequest 埋点事件请求
type TrackEventRequest struct {
TaskNo string `json:"task_no" binding:"required"`
UserID int64 `json:"user_id"`
EventName string `json:"event_name" binding:"required"`
Properties map[string]interface{} `json:"properties"`
DeviceInfo map[string]interface{} `json:"device_info"`
MetaData map[string]interface{} `json:"meta_data"`
}
// BatchTrackEventRequest 批量埋点事件请求
type BatchTrackEventRequest struct {
Events []TrackEventRequest `json:"events" binding:"required,min=1,max=100"`
}
// QueryEventsRequest 查询事件请求
type QueryEventsRequest struct {
UserID *int64 `json:"user_id" form:"user_id"`
EventName string `json:"event_name" form:"event_name"`
StartTime *time.Time `json:"start_time" form:"start_time"`
EndTime *time.Time `json:"end_time" form:"end_time"`
Page int `json:"page" form:"page" binding:"required,min=1"`
PageSize int `json:"page_size" form:"page_size" binding:"required,min=1,max=100"`
}
// EventStatsRequest 事件统计请求
type EventStatsRequest struct {
StartTime time.Time `json:"start_time" form:"start_time" binding:"required"`
EndTime time.Time `json:"end_time" form:"end_time" binding:"required"`
}

View File

@@ -0,0 +1,36 @@
package analytics
import "time"
// EventResponse 事件响应
type EventResponse struct {
ID int64 `json:"id"`
UserID int64 `json:"user_id"`
EventName string `json:"event_name"`
Properties map[string]interface{} `json:"properties"`
DeviceInfo map[string]interface{} `json:"device_info"`
MetaData map[string]interface{} `json:"meta_data"`
CreatedAt time.Time `json:"created_at"`
}
// EventListResponse 事件列表响应
type EventListResponse struct {
Events []*EventResponse `json:"events"`
Total int64 `json:"total"`
Page int `json:"page"`
Size int `json:"size"`
}
// EventStatsResponse 事件统计响应
type EventStatsResponse struct {
EventName string `json:"event_name"`
Count int64 `json:"count"`
UniqueUsers int64 `json:"unique_users"`
}
// EventStatsListResponse 事件统计列表响应
type EventStatsListResponse struct {
Stats []*EventStatsResponse `json:"stats"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
}

View File

@@ -5,6 +5,7 @@ type CreateFormulaRecognitionRequest struct {
FileHash string `json:"file_hash" binding:"required"` // file hash FileHash string `json:"file_hash" binding:"required"` // file hash
FileName string `json:"file_name" binding:"required"` // file name FileName string `json:"file_name" binding:"required"` // file name
TaskType string `json:"task_type" binding:"required,oneof=FORMULA"` // task type TaskType string `json:"task_type" binding:"required,oneof=FORMULA"` // task type
UserID int64 `json:"user_id"` // user id
} }
type GetRecognitionStatusRequest struct { type GetRecognitionStatusRequest struct {

View File

@@ -6,8 +6,24 @@ type CreateTaskResponse struct {
} }
type GetFormulaTaskResponse struct { type GetFormulaTaskResponse struct {
TaskNo string `json:"task_no"` TaskNo string `json:"task_no"`
Status int `json:"status"` Status int `json:"status"`
Count int `json:"count"` Count int `json:"count"`
Latex string `json:"latex"` Latex string `json:"latex"`
Markdown string `json:"markdown"`
MathML string `json:"mathml"`
MML string `json:"mml"`
}
// FormulaRecognitionResponse 公式识别服务返回的响应
type FormulaRecognitionResponse struct {
Result string `json:"result"`
}
// ImageOCRResponse 图片OCR接口返回的响应
type ImageOCRResponse struct {
Markdown string `json:"markdown"` // Markdown 格式内容
Latex string `json:"latex"` // LaTeX 格式内容 (无公式时为空)
MathML string `json:"mathml"` // MathML 格式(无公式时为空)
MML string `json:"mml"` // MML 格式(无公式时为空)
} }

View File

@@ -11,26 +11,28 @@ type TaskListRequest struct {
TaskType string `json:"task_type" form:"task_type" binding:"required"` TaskType string `json:"task_type" form:"task_type" binding:"required"`
Page int `json:"page" form:"page"` Page int `json:"page" form:"page"`
PageSize int `json:"page_size" form:"page_size"` PageSize int `json:"page_size" form:"page_size"`
} UserID int64 `json:"-"`
type PdfInfo struct {
PageCount int `json:"page_count"`
PageWidth int `json:"page_width"`
PageHeight int `json:"page_height"`
} }
type TaskListDTO struct { type TaskListDTO struct {
TaskID string `json:"task_id"` TaskID string `json:"task_id"`
FileName string `json:"file_name"` FileName string `json:"file_name"`
Status string `json:"status"` Status int `json:"status"`
Path string `json:"path"` OriginURL string `json:"origin_url"`
TaskType string `json:"task_type"` TaskType string `json:"task_type"`
CreatedAt string `json:"created_at"` CreatedAt string `json:"created_at"`
PdfInfo PdfInfo `json:"pdf_info"` Latex string `json:"latex"`
Markdown string `json:"markdown"`
MathML string `json:"mathml"`
MML string `json:"mml"`
} }
type TaskListResponse struct { type TaskListResponse struct {
TaskList []*TaskListDTO `json:"task_list"` TaskList []*TaskListDTO `json:"task_list"`
HasMore bool `json:"has_more"` Total int64 `json:"total"`
NextPage int `json:"next_page"` }
type ExportTaskRequest struct {
TaskNo string `json:"task_no" binding:"required"`
Type string `json:"type" binding:"required,oneof=pdf docx"`
} }

View File

@@ -14,7 +14,8 @@ type PhoneLoginRequest struct {
} }
type PhoneLoginResponse struct { type PhoneLoginResponse struct {
Token string `json:"token"` Token string `json:"token"`
ExpiresAt int64 `json:"expires_at"`
} }
type UserInfoResponse struct { type UserInfoResponse struct {
@@ -22,3 +23,51 @@ type UserInfoResponse struct {
Phone string `json:"phone"` Phone string `json:"phone"`
Status int `json:"status"` // 0: not login, 1: login Status int `json:"status"` // 0: not login, 1: login
} }
type EmailRegisterRequest struct {
Email string `json:"email" binding:"required,email"`
Password string `json:"password" binding:"required,min=6"`
}
type EmailRegisterResponse struct {
Token string `json:"token"`
ExpiresAt int64 `json:"expires_at"`
}
type EmailLoginRequest struct {
Email string `json:"email" binding:"required,email"`
Password string `json:"password" binding:"required"`
}
type EmailLoginResponse struct {
Token string `json:"token"`
ExpiresAt int64 `json:"expires_at"`
}
type GoogleAuthUrlRequest struct {
RedirectURI string `form:"redirect_uri" binding:"required"`
State string `form:"state" binding:"required"`
}
type GoogleAuthUrlResponse struct {
AuthURL string `json:"auth_url"`
}
type GoogleOAuthCallbackRequest struct {
Code string `json:"code" binding:"required"`
State string `json:"state" binding:"required"`
RedirectURI string `json:"redirect_uri" binding:"required"`
}
type GoogleOAuthCallbackResponse struct {
Token string `json:"token"`
ExpiresAt int64 `json:"expires_at"`
}
type GoogleUserInfo struct {
ID string `json:"id"`
Email string `json:"email"`
Name string `json:"name"`
Picture string `json:"picture"`
VerifiedEmail bool `json:"verified_email"`
}

View File

@@ -0,0 +1,232 @@
package service
import (
"context"
"encoding/json"
"fmt"
"time"
"gitea.com/texpixel/document_ai/internal/model/analytics"
"gitea.com/texpixel/document_ai/internal/storage/dao"
"gitea.com/texpixel/document_ai/pkg/log"
"gorm.io/datatypes"
)
type AnalyticsService struct {
eventDao *dao.AnalyticsEventDao
}
func NewAnalyticsService() *AnalyticsService {
return &AnalyticsService{
eventDao: dao.NewAnalyticsEventDao(),
}
}
// TrackEvent 记录单个事件
func (s *AnalyticsService) TrackEvent(ctx context.Context, req *analytics.TrackEventRequest) error {
// 将 map 转换为 JSON
propertiesJSON, err := json.Marshal(req.Properties)
if err != nil {
log.Error(ctx, "marshal properties failed", "error", err)
return fmt.Errorf("invalid properties format")
}
deviceInfoJSON, err := json.Marshal(req.DeviceInfo)
if err != nil {
log.Error(ctx, "marshal device_info failed", "error", err)
return fmt.Errorf("invalid device_info format")
}
metaDataJSON, err := json.Marshal(req.MetaData)
if err != nil {
log.Error(ctx, "marshal meta_data failed", "error", err)
return fmt.Errorf("invalid meta_data format")
}
event := &dao.AnalyticsEvent{
UserID: req.UserID,
EventName: req.EventName,
Properties: datatypes.JSON(propertiesJSON),
DeviceInfo: datatypes.JSON(deviceInfoJSON),
MetaData: datatypes.JSON(metaDataJSON),
CreatedAt: time.Now(),
}
if err := s.eventDao.Create(dao.DB.WithContext(ctx), event); err != nil {
log.Error(ctx, "create analytics event failed", "error", err)
return fmt.Errorf("failed to track event")
}
log.Info(ctx, "event tracked successfully",
"event_id", event.ID,
"user_id", req.UserID,
"event_name", req.EventName)
return nil
}
// BatchTrackEvents 批量记录事件
func (s *AnalyticsService) BatchTrackEvents(ctx context.Context, req *analytics.BatchTrackEventRequest) error {
events := make([]*dao.AnalyticsEvent, 0, len(req.Events))
for _, eventReq := range req.Events {
propertiesJSON, err := json.Marshal(eventReq.Properties)
if err != nil {
log.Error(ctx, "marshal properties failed", "error", err)
continue
}
deviceInfoJSON, err := json.Marshal(eventReq.DeviceInfo)
if err != nil {
log.Error(ctx, "marshal device_info failed", "error", err)
continue
}
metaDataJSON, err := json.Marshal(eventReq.MetaData)
if err != nil {
log.Error(ctx, "marshal meta_data failed", "error", err)
continue
}
event := &dao.AnalyticsEvent{
UserID: eventReq.UserID,
EventName: eventReq.EventName,
Properties: datatypes.JSON(propertiesJSON),
DeviceInfo: datatypes.JSON(deviceInfoJSON),
MetaData: datatypes.JSON(metaDataJSON),
CreatedAt: time.Now(),
}
events = append(events, event)
}
if len(events) == 0 {
return fmt.Errorf("no valid events to track")
}
if err := s.eventDao.BatchCreate(dao.DB.WithContext(ctx), events); err != nil {
log.Error(ctx, "batch create analytics events failed", "error", err)
return fmt.Errorf("failed to batch track events")
}
log.Info(ctx, "batch events tracked successfully", "count", len(events))
return nil
}
// QueryEvents 查询事件
func (s *AnalyticsService) QueryEvents(ctx context.Context, req *analytics.QueryEventsRequest) (*analytics.EventListResponse, error) {
var events []*dao.AnalyticsEvent
var total int64
var err error
// 根据不同条件查询
if req.UserID != nil && req.EventName != "" {
// 查询用户的指定事件
events, total, err = s.eventDao.GetUserEventsByName(dao.DB.WithContext(ctx), *req.UserID, req.EventName, req.Page, req.PageSize)
} else if req.UserID != nil {
// 查询用户的所有事件
events, total, err = s.eventDao.GetUserEvents(dao.DB.WithContext(ctx), *req.UserID, req.Page, req.PageSize)
} else if req.EventName != "" {
// 查询指定事件
events, total, err = s.eventDao.GetEventsByName(dao.DB.WithContext(ctx), req.EventName, req.Page, req.PageSize)
} else if req.StartTime != nil && req.EndTime != nil {
// 查询时间范围内的事件
events, total, err = s.eventDao.GetEventsByTimeRange(dao.DB.WithContext(ctx), *req.StartTime, *req.EndTime, req.Page, req.PageSize)
} else {
return nil, fmt.Errorf("invalid query parameters")
}
if err != nil {
log.Error(ctx, "query events failed", "error", err)
return nil, fmt.Errorf("failed to query events")
}
// 转换为响应格式
eventResponses := make([]*analytics.EventResponse, 0, len(events))
for _, event := range events {
var properties, deviceInfo, metaData map[string]interface{}
if len(event.Properties) > 0 {
json.Unmarshal(event.Properties, &properties)
}
if len(event.DeviceInfo) > 0 {
json.Unmarshal(event.DeviceInfo, &deviceInfo)
}
if len(event.MetaData) > 0 {
json.Unmarshal(event.MetaData, &metaData)
}
eventResponses = append(eventResponses, &analytics.EventResponse{
ID: event.ID,
UserID: event.UserID,
EventName: event.EventName,
Properties: properties,
DeviceInfo: deviceInfo,
MetaData: metaData,
CreatedAt: event.CreatedAt,
})
}
return &analytics.EventListResponse{
Events: eventResponses,
Total: total,
Page: req.Page,
Size: req.PageSize,
}, nil
}
// GetEventStats 获取事件统计
func (s *AnalyticsService) GetEventStats(ctx context.Context, req *analytics.EventStatsRequest) (*analytics.EventStatsListResponse, error) {
results, err := s.eventDao.GetEventStats(dao.DB.WithContext(ctx), req.StartTime, req.EndTime)
if err != nil {
log.Error(ctx, "get event stats failed", "error", err)
return nil, fmt.Errorf("failed to get event stats")
}
stats := make([]*analytics.EventStatsResponse, 0, len(results))
for _, result := range results {
stats = append(stats, &analytics.EventStatsResponse{
EventName: result["event_name"].(string),
Count: result["count"].(int64),
UniqueUsers: result["unique_users"].(int64),
})
}
return &analytics.EventStatsListResponse{
Stats: stats,
StartTime: req.StartTime,
EndTime: req.EndTime,
}, nil
}
// CountUserEvents 统计用户事件数量
func (s *AnalyticsService) CountUserEvents(ctx context.Context, userID int64) (int64, error) {
count, err := s.eventDao.CountUserEvents(dao.DB.WithContext(ctx), userID)
if err != nil {
log.Error(ctx, "count user events failed", "error", err, "user_id", userID)
return 0, fmt.Errorf("failed to count user events")
}
return count, nil
}
// CountEventsByName 统计指定事件的数量
func (s *AnalyticsService) CountEventsByName(ctx context.Context, eventName string) (int64, error) {
count, err := s.eventDao.CountEventsByName(dao.DB.WithContext(ctx), eventName)
if err != nil {
log.Error(ctx, "count events by name failed", "error", err, "event_name", eventName)
return 0, fmt.Errorf("failed to count events")
}
return count, nil
}
// CleanOldEvents 清理旧数据(可以定时执行)
func (s *AnalyticsService) CleanOldEvents(ctx context.Context, retentionDays int) error {
beforeTime := time.Now().AddDate(0, 0, -retentionDays)
if err := s.eventDao.DeleteOldEvents(dao.DB.WithContext(ctx), beforeTime); err != nil {
log.Error(ctx, "clean old events failed", "error", err, "before_time", beforeTime)
return fmt.Errorf("failed to clean old events")
}
log.Info(ctx, "old events cleaned successfully", "retention_days", retentionDays)
return nil
}

View File

@@ -7,21 +7,23 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"mime/multipart"
"net/http" "net/http"
"strings" "strings"
"time" "time"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/config"
"gitea.com/bitwsd/document_ai/config" "gitea.com/texpixel/document_ai/internal/model/formula"
"gitea.com/bitwsd/document_ai/internal/model/formula" "gitea.com/texpixel/document_ai/internal/storage/cache"
"gitea.com/bitwsd/document_ai/internal/storage/cache" "gitea.com/texpixel/document_ai/internal/storage/dao"
"gitea.com/bitwsd/document_ai/internal/storage/dao" "gitea.com/texpixel/document_ai/pkg/log"
"gitea.com/bitwsd/document_ai/pkg/common" "gitea.com/texpixel/document_ai/pkg/common"
"gitea.com/bitwsd/document_ai/pkg/constant" "gitea.com/texpixel/document_ai/pkg/constant"
"gitea.com/bitwsd/document_ai/pkg/httpclient" "gitea.com/texpixel/document_ai/pkg/httpclient"
"gitea.com/bitwsd/document_ai/pkg/oss" "gitea.com/texpixel/document_ai/pkg/oss"
"gitea.com/bitwsd/document_ai/pkg/utils" "gitea.com/texpixel/document_ai/pkg/requestid"
"gitea.com/texpixel/document_ai/pkg/utils"
"gorm.io/gorm" "gorm.io/gorm"
) )
@@ -105,6 +107,7 @@ func (s *RecognitionService) CreateRecognitionTask(ctx context.Context, req *for
sess := dao.DB.WithContext(ctx) sess := dao.DB.WithContext(ctx)
taskDao := dao.NewRecognitionTaskDao() taskDao := dao.NewRecognitionTaskDao()
task := &dao.RecognitionTask{ task := &dao.RecognitionTask{
UserID: req.UserID,
TaskUUID: utils.NewUUID(), TaskUUID: utils.NewUUID(),
TaskType: dao.TaskType(req.TaskType), TaskType: dao.TaskType(req.TaskType),
Status: dao.TaskStatusPending, Status: dao.TaskStatusPending,
@@ -165,8 +168,21 @@ func (s *RecognitionService) GetFormualTask(ctx context.Context, taskNo string)
log.Error(ctx, "func", "GetFormualTask", "msg", "查询任务结果失败", "error", err, "task_no", taskNo) log.Error(ctx, "func", "GetFormualTask", "msg", "查询任务结果失败", "error", err, "task_no", taskNo)
return nil, common.NewError(common.CodeDBError, "查询任务结果失败", err) return nil, common.NewError(common.CodeDBError, "查询任务结果失败", err)
} }
latex := taskRet.NewContentCodec().GetContent().(string)
return &formula.GetFormulaTaskResponse{TaskNo: taskNo, Latex: latex, Status: int(task.Status)}, nil // 构建 Markdown 格式
markdown := taskRet.Markdown
if markdown == "" {
markdown = fmt.Sprintf("$$%s$$", taskRet.Latex)
}
return &formula.GetFormulaTaskResponse{
TaskNo: taskNo,
Latex: taskRet.Latex,
Markdown: markdown,
MathML: taskRet.MathML,
MML: taskRet.MML,
Status: int(task.Status),
}, nil
} }
func (s *RecognitionService) handleFormulaRecognition(ctx context.Context, taskID int64) error { func (s *RecognitionService) handleFormulaRecognition(ctx context.Context, taskID int64) error {
@@ -200,13 +216,230 @@ func (s *RecognitionService) processVLFormula(ctx context.Context, taskID int64)
log.Info(ctx, "func", "processVLFormulaQueue", "msg", "获取任务成功", "task_id", taskID) log.Info(ctx, "func", "processVLFormulaQueue", "msg", "获取任务成功", "task_id", taskID)
// 处理具体任务 // 处理具体任务
if err := s.processVLFormulaTask(ctx, taskID, task.FileURL); err != nil { if err := s.processVLFormulaTask(ctx, taskID, task.FileURL, utils.ModelVLQwen3VL32BInstruct); err != nil {
log.Error(ctx, "func", "processVLFormulaQueue", "msg", "处理任务失败", "error", err) log.Error(ctx, "func", "processVLFormulaQueue", "msg", "处理任务失败", "error", err)
return return
} }
log.Info(ctx, "func", "processVLFormulaQueue", "msg", "处理任务成功", "task_id", taskID) log.Info(ctx, "func", "processVLFormulaQueue", "msg", "处理任务成功", "task_id", taskID)
} }
// MathpixRequest Mathpix API /v3/text 完整请求结构
type MathpixRequest struct {
// 图片源URL 或 base64 编码
Src string `json:"src"`
// 元数据键值对
Metadata map[string]interface{} `json:"metadata"`
// 标签列表,用于标识结果
Tags []string `json:"tags"`
// 异步请求标志
Async bool `json:"async"`
// 回调配置
Callback *MathpixCallback `json:"callback"`
// 输出格式列表text, data, html, latex_styled
Formats []string `json:"formats"`
// 数据选项
DataOptions *MathpixDataOptions `json:"data_options,omitempty"`
// 返回检测到的字母表
IncludeDetectedAlphabets *bool `json:"include_detected_alphabets,omitempty"`
// 允许的字母表
AlphabetsAllowed *MathpixAlphabetsAllowed `json:"alphabets_allowed,omitempty"`
// 指定图片区域
Region *MathpixRegion `json:"region,omitempty"`
// 蓝色HSV过滤模式
EnableBlueHsvFilter bool `json:"enable_blue_hsv_filter"`
// 置信度阈值
ConfidenceThreshold float64 `json:"confidence_threshold"`
// 符号级别置信度阈值默认0.75
ConfidenceRateThreshold float64 `json:"confidence_rate_threshold"`
// 包含公式标签
IncludeEquationTags bool `json:"include_equation_tags"`
// 返回逐行信息
IncludeLineData bool `json:"include_line_data"`
// 返回逐词信息
IncludeWordData bool `json:"include_word_data"`
// 化学结构OCR
IncludeSmiles bool `json:"include_smiles"`
// InChI数据
IncludeInchi bool `json:"include_inchi"`
// 几何图形数据
IncludeGeometryData bool `json:"include_geometry_data"`
// 图表文本提取
IncludeDiagramText bool `json:"include_diagram_text"`
// 页面信息默认true
IncludePageInfo *bool `json:"include_page_info,omitempty"`
// 自动旋转置信度阈值默认0.99
AutoRotateConfidenceThreshold float64 `json:"auto_rotate_confidence_threshold"`
// 移除多余空格默认true
RmSpaces *bool `json:"rm_spaces,omitempty"`
// 移除字体命令默认false
RmFonts bool `json:"rm_fonts"`
// 使用aligned/gathered/cases代替array默认false
IdiomaticEqnArrays bool `json:"idiomatic_eqn_arrays"`
// 移除不必要的大括号默认false
IdiomaticBraces bool `json:"idiomatic_braces"`
// 数字始终为数学模式默认false
NumbersDefaultToMath bool `json:"numbers_default_to_math"`
// 数学字体始终为数学模式默认false
MathFontsDefaultToMath bool `json:"math_fonts_default_to_math"`
// 行内数学分隔符,默认 ["\\(", "\\)"]
MathInlineDelimiters []string `json:"math_inline_delimiters"`
// 行间数学分隔符,默认 ["\\[", "\\]"]
MathDisplayDelimiters []string `json:"math_display_delimiters"`
// 高级表格处理默认false
EnableTablesFallback bool `json:"enable_tables_fallback"`
// 全角标点null表示自动判断
FullwidthPunctuation *bool `json:"fullwidth_punctuation,omitempty"`
}
// MathpixCallback 回调配置
type MathpixCallback struct {
URL string `json:"url"`
Headers map[string]string `json:"headers"`
}
// MathpixDataOptions 数据选项
type MathpixDataOptions struct {
IncludeAsciimath bool `json:"include_asciimath"`
IncludeMathml bool `json:"include_mathml"`
IncludeLatex bool `json:"include_latex"`
IncludeTsv bool `json:"include_tsv"`
}
// MathpixAlphabetsAllowed 允许的字母表
type MathpixAlphabetsAllowed struct {
En bool `json:"en"`
Hi bool `json:"hi"`
Zh bool `json:"zh"`
Ja bool `json:"ja"`
Ko bool `json:"ko"`
Ru bool `json:"ru"`
Th bool `json:"th"`
Vi bool `json:"vi"`
}
// MathpixRegion 图片区域
type MathpixRegion struct {
TopLeftX int `json:"top_left_x"`
TopLeftY int `json:"top_left_y"`
Width int `json:"width"`
Height int `json:"height"`
}
// MathpixResponse Mathpix API /v3/text 完整响应结构
type MathpixResponse struct {
// 请求ID用于调试
RequestID string `json:"request_id"`
// Mathpix Markdown 格式文本
Text string `json:"text"`
// 带样式的LaTeX仅单个公式图片时返回
LatexStyled string `json:"latex_styled"`
// 置信度 [0,1]
Confidence float64 `json:"confidence"`
// 置信度比率 [0,1]
ConfidenceRate float64 `json:"confidence_rate"`
// 行数据
LineData []map[string]interface{} `json:"line_data"`
// 词数据
WordData []map[string]interface{} `json:"word_data"`
// 数据对象列表
Data []MathpixDataItem `json:"data"`
// HTML输出
HTML string `json:"html"`
// 检测到的字母表
DetectedAlphabets []map[string]interface{} `json:"detected_alphabets"`
// 是否打印内容
IsPrinted bool `json:"is_printed"`
// 是否手写内容
IsHandwritten bool `json:"is_handwritten"`
// 自动旋转置信度
AutoRotateConfidence float64 `json:"auto_rotate_confidence"`
// 几何数据
GeometryData []map[string]interface{} `json:"geometry_data"`
// 自动旋转角度 {0, 90, -90, 180}
AutoRotateDegrees int `json:"auto_rotate_degrees"`
// 图片宽度
ImageWidth int `json:"image_width"`
// 图片高度
ImageHeight int `json:"image_height"`
// 错误信息
Error string `json:"error"`
// 错误详情
ErrorInfo *MathpixErrorInfo `json:"error_info"`
// API版本
Version string `json:"version"`
}
// MathpixDataItem 数据项
type MathpixDataItem struct {
Type string `json:"type"`
Value string `json:"value"`
}
// MathpixErrorInfo 错误详情
type MathpixErrorInfo struct {
ID string `json:"id"`
Message string `json:"message"`
}
// BaiduOCRRequest 百度 OCR 版面分析请求结构
type BaiduOCRRequest struct {
// 文件内容 base64 编码
File string `json:"file"`
// 文件类型: 0=PDF, 1=图片
FileType int `json:"fileType"`
// 是否启用文档方向分类
UseDocOrientationClassify bool `json:"useDocOrientationClassify"`
// 是否启用文档扭曲矫正
UseDocUnwarping bool `json:"useDocUnwarping"`
// 是否启用图表识别
UseChartRecognition bool `json:"useChartRecognition"`
}
// BaiduOCRResponse 百度 OCR 版面分析响应结构
type BaiduOCRResponse struct {
ErrorCode int `json:"errorCode"`
ErrorMsg string `json:"errorMsg"`
Result *BaiduOCRResult `json:"result"`
}
// BaiduOCRResult 百度 OCR 响应结果
type BaiduOCRResult struct {
LayoutParsingResults []BaiduLayoutParsingResult `json:"layoutParsingResults"`
}
// BaiduLayoutParsingResult 单页版面解析结果
type BaiduLayoutParsingResult struct {
Markdown BaiduMarkdownResult `json:"markdown"`
OutputImages map[string]string `json:"outputImages"`
}
// BaiduMarkdownResult markdown 结果
type BaiduMarkdownResult struct {
Text string `json:"text"`
Images map[string]string `json:"images"`
}
// GetMathML 从响应中获取MathML
func (r *MathpixResponse) GetMathML() string {
for _, item := range r.Data {
if item.Type == "mathml" {
return item.Value
}
}
return ""
}
// GetAsciiMath 从响应中获取AsciiMath
func (r *MathpixResponse) GetAsciiMath() string {
for _, item := range r.Data {
if item.Type == "asciimath" {
return item.Value
}
}
return ""
}
func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int64, fileURL string) (err error) { func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int64, fileURL string) (err error) {
// 为整个任务处理添加超时控制 // 为整个任务处理添加超时控制
ctx, cancel := context.WithTimeout(ctx, 45*time.Second) ctx, cancel := context.WithTimeout(ctx, 45*time.Second)
@@ -263,19 +496,12 @@ func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int6
return err return err
} }
downloadURL, err := oss.GetDownloadURL(ctx, fileURL)
if err != nil {
log.Error(ctx, "func", "processFormulaTask", "msg", "获取下载URL失败", "error", err)
return err
}
// 将图片转为base64编码 // 将图片转为base64编码
base64Image := base64.StdEncoding.EncodeToString(imageData) base64Image := base64.StdEncoding.EncodeToString(imageData)
// 创建JSON请求 // 创建JSON请求
requestData := map[string]string{ requestData := map[string]string{
"image_base64": base64Image, "image_base64": base64Image,
"img_url": downloadURL,
} }
jsonData, err := json.Marshal(requestData) jsonData, err := json.Marshal(requestData)
@@ -287,8 +513,8 @@ func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int6
// 设置Content-Type头为application/json // 设置Content-Type头为application/json
headers := map[string]string{"Content-Type": "application/json", utils.RequestIDHeaderKey: utils.GetRequestIDFromContext(ctx)} headers := map[string]string{"Content-Type": "application/json", utils.RequestIDHeaderKey: utils.GetRequestIDFromContext(ctx)}
// 发送请求时会使用带超时的context // 发送请求到新的 OCR 接口
resp, err := s.httpClient.RequestWithRetry(ctx, http.MethodPost, s.getURL(ctx), bytes.NewReader(jsonData), headers) resp, err := s.httpClient.RequestWithRetry(ctx, http.MethodPost, "https://cloud.texpixel.com:10443/doc_process/v1/image/ocr", bytes.NewReader(jsonData), headers)
if err != nil { if err != nil {
if ctx.Err() == context.DeadlineExceeded { if ctx.Err() == context.DeadlineExceeded {
log.Error(ctx, "func", "processFormulaTask", "msg", "请求超时") log.Error(ctx, "func", "processFormulaTask", "msg", "请求超时")
@@ -299,30 +525,38 @@ func (s *RecognitionService) processFormulaTask(ctx context.Context, taskID int6
} }
defer resp.Body.Close() defer resp.Body.Close()
log.Info(ctx, "func", "processFormulaTask", "msg", "请求成功", "resp", resp.Body) log.Info(ctx, "func", "processFormulaTask", "msg", "请求成功")
body := &bytes.Buffer{} body := &bytes.Buffer{}
if _, err = body.ReadFrom(resp.Body); err != nil { if _, err = body.ReadFrom(resp.Body); err != nil {
log.Error(ctx, "func", "processFormulaTask", "msg", "读取响应体失败", "error", err) log.Error(ctx, "func", "processFormulaTask", "msg", "读取响应体失败", "error", err)
return err return err
} }
katex := utils.ToKatex(body.String()) log.Info(ctx, "func", "processFormulaTask", "msg", "响应内容", "body", body.String())
content := &dao.FormulaRecognitionContent{Latex: katex}
b, _ := json.Marshal(content) // 解析 JSON 响应
// Save recognition result var ocrResp formula.ImageOCRResponse
result := &dao.RecognitionResult{ if err := json.Unmarshal(body.Bytes(), &ocrResp); err != nil {
log.Error(ctx, "func", "processFormulaTask", "msg", "解析响应JSON失败", "error", err)
return err
}
err = resultDao.Create(tx, dao.RecognitionResult{
TaskID: taskID, TaskID: taskID,
TaskType: dao.TaskTypeFormula, TaskType: dao.TaskTypeFormula,
Content: b, Latex: ocrResp.Latex,
} Markdown: ocrResp.Markdown,
if err := resultDao.Create(tx, *result); err != nil { MathML: ocrResp.MathML,
MML: ocrResp.MML,
})
if err != nil {
log.Error(ctx, "func", "processFormulaTask", "msg", "保存任务结果失败", "error", err) log.Error(ctx, "func", "processFormulaTask", "msg", "保存任务结果失败", "error", err)
return err return err
} }
isSuccess = true isSuccess = true
return nil return nil
} }
func (s *RecognitionService) processVLFormulaTask(ctx context.Context, taskID int64, fileURL string) error { func (s *RecognitionService) processVLFormulaTask(ctx context.Context, taskID int64, fileURL string, model string) error {
isSuccess := false isSuccess := false
defer func() { defer func() {
if !isSuccess { if !isSuccess {
@@ -349,28 +583,11 @@ func (s *RecognitionService) processVLFormulaTask(ctx context.Context, taskID in
log.Error(ctx, "func", "processVLFormulaTask", "msg", "读取图片数据失败", "error", err) log.Error(ctx, "func", "processVLFormulaTask", "msg", "读取图片数据失败", "error", err)
return err return err
} }
prompt := ` prompt := `Please perform OCR on the image and output only LaTeX code.`
Please perform OCR on the image and output only LaTeX code.
Important instructions:
* "The image contains mathematical formulas, no plain text."
* "Preserve all layout, symbols, subscripts, summations, parentheses, etc., exactly as shown."
* "Use \[ ... \] or align environments to represent multiline math expressions."
* "Use adaptive symbols such as \left and \right where applicable."
* "Do not include any extra commentary, template answers, or unrelated equations."
* "Only output valid LaTeX code based on the actual content of the image, and not change the original mathematical expression."
* "The output result must be can render by better-react-mathjax."
`
base64Image := base64.StdEncoding.EncodeToString(imageData) base64Image := base64.StdEncoding.EncodeToString(imageData)
requestBody := formula.VLFormulaRequest{ requestBody := formula.VLFormulaRequest{
Model: "Qwen/Qwen2.5-VL-32B-Instruct", Model: model,
Stream: false, Stream: false,
MaxTokens: 512, MaxTokens: 512,
Temperature: 0.1, Temperature: 0.1,
@@ -439,39 +656,21 @@ Important instructions:
} }
resultDao := dao.NewRecognitionResultDao() resultDao := dao.NewRecognitionResultDao()
var formulaRes *dao.FormulaRecognitionContent
result, err := resultDao.GetByTaskID(dao.DB.WithContext(ctx), taskID) result, err := resultDao.GetByTaskID(dao.DB.WithContext(ctx), taskID)
if err != nil { if err != nil {
log.Error(ctx, "func", "processVLFormulaTask", "msg", "获取任务结果失败", "error", err) log.Error(ctx, "func", "processVLFormulaTask", "msg", "获取任务结果失败", "error", err)
return err return err
} }
if result == nil { if result == nil {
formulaRes = &dao.FormulaRecognitionContent{EnhanceLatex: latex} formulaRes := &dao.RecognitionResult{TaskID: taskID, TaskType: dao.TaskTypeFormula, Latex: latex}
b, err := formulaRes.Encode() err = resultDao.Create(dao.DB.WithContext(ctx), *formulaRes)
if err != nil {
log.Error(ctx, "func", "processVLFormulaTask", "msg", "编码任务结果失败", "error", err)
return err
}
err = resultDao.Create(dao.DB.WithContext(ctx), dao.RecognitionResult{TaskID: taskID, TaskType: dao.TaskTypeFormula, Content: b})
if err != nil { if err != nil {
log.Error(ctx, "func", "processVLFormulaTask", "msg", "创建任务结果失败", "error", err) log.Error(ctx, "func", "processVLFormulaTask", "msg", "创建任务结果失败", "error", err)
return err return err
} }
} else { } else {
formulaRes = result.NewContentCodec().(*dao.FormulaRecognitionContent) result.Latex = latex
err = formulaRes.Decode() err = resultDao.Update(dao.DB.WithContext(ctx), result.ID, map[string]interface{}{"latex": latex})
if err != nil {
log.Error(ctx, "func", "processVLFormulaTask", "msg", "解码任务结果失败", "error", err)
return err
}
formulaRes.EnhanceLatex = latex
b, err := formulaRes.Encode()
if err != nil {
log.Error(ctx, "func", "processVLFormulaTask", "msg", "编码任务结果失败", "error", err)
return err
}
err = resultDao.Update(dao.DB.WithContext(ctx), result.ID, map[string]interface{}{"content": b})
if err != nil { if err != nil {
log.Error(ctx, "func", "processVLFormulaTask", "msg", "更新任务结果失败", "error", err) log.Error(ctx, "func", "processVLFormulaTask", "msg", "更新任务结果失败", "error", err)
return err return err
@@ -515,26 +714,408 @@ func (s *RecognitionService) processOneTask(ctx context.Context) {
} }
ctx = context.WithValue(ctx, utils.RequestIDKey, task.TaskUUID) ctx = context.WithValue(ctx, utils.RequestIDKey, task.TaskUUID)
log.Info(ctx, "func", "processFormulaQueue", "msg", "获取任务成功", "task_id", taskID)
// 处理具体任务 // 使用 gls 设置 request_id确保在整个任务处理过程中可用
if err := s.processFormulaTask(ctx, taskID, task.FileURL); err != nil { requestid.SetRequestID(task.TaskUUID, func() {
log.Error(ctx, "func", "processFormulaQueue", "msg", "处理任务失败", "error", err) log.Info(ctx, "func", "processFormulaQueue", "msg", "获取任务成功", "task_id", taskID)
return
}
log.Info(ctx, "func", "processFormulaQueue", "msg", "处理任务成功", "task_id", taskID) err = s.processFormulaTask(ctx, taskID, task.FileURL)
if err != nil {
log.Error(ctx, "func", "processFormulaQueue", "msg", "处理任务失败", "error", err)
return
}
log.Info(ctx, "func", "processFormulaQueue", "msg", "处理任务成功", "task_id", taskID)
})
} }
func (s *RecognitionService) getURL(ctx context.Context) string { // processMathpixTask 使用 Mathpix API 处理公式识别任务(用于增强识别)
return "http://cloud.srcstar.com:8045/formula/predict" func (s *RecognitionService) processMathpixTask(ctx context.Context, taskID int64, fileURL string) error {
count, err := cache.IncrURLCount(ctx) isSuccess := false
logDao := dao.NewRecognitionLogDao()
defer func() {
if !isSuccess {
err := dao.NewRecognitionTaskDao().Update(dao.DB.WithContext(ctx), map[string]interface{}{"id": taskID}, map[string]interface{}{"status": dao.TaskStatusFailed})
if err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "更新任务状态失败", "error", err)
}
return
}
err := dao.NewRecognitionTaskDao().Update(dao.DB.WithContext(ctx), map[string]interface{}{"id": taskID}, map[string]interface{}{"status": dao.TaskStatusCompleted})
if err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "更新任务状态失败", "error", err)
}
}()
// 下载图片
imageUrl, err := oss.GetDownloadURL(ctx, fileURL)
if err != nil { if err != nil {
log.Error(ctx, "func", "getURL", "msg", "获取URL计数失败", "error", err) log.Error(ctx, "func", "processMathpixTask", "msg", "获取图片URL失败", "error", err)
return "http://cloud.srcstar.com:8026/formula/predict" return err
} }
if count%2 == 0 {
return "http://cloud.srcstar.com:8026/formula/predict" // 创建 Mathpix API 请求
mathpixReq := MathpixRequest{
Src: imageUrl,
Formats: []string{
"text",
"latex_styled",
"data",
"html",
},
DataOptions: &MathpixDataOptions{
IncludeMathml: true,
IncludeAsciimath: true,
IncludeLatex: true,
IncludeTsv: true,
},
MathInlineDelimiters: []string{"$", "$"},
MathDisplayDelimiters: []string{"$$", "$$"},
RmSpaces: &[]bool{true}[0],
} }
return "https://cloud.texpixel.com:1080/formula/predict"
jsonData, err := json.Marshal(mathpixReq)
if err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "JSON编码失败", "error", err)
return err
}
headers := map[string]string{
"Content-Type": "application/json",
"app_id": config.GlobalConfig.Mathpix.AppID,
"app_key": config.GlobalConfig.Mathpix.AppKey,
}
endpoint := "https://api.mathpix.com/v3/text"
startTime := time.Now()
log.Info(ctx, "func", "processMathpixTask", "msg", "MathpixApi_Start", "start_time", startTime)
resp, err := s.httpClient.RequestWithRetry(ctx, http.MethodPost, endpoint, bytes.NewReader(jsonData), headers)
if err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "Mathpix API 请求失败", "error", err)
return err
}
defer resp.Body.Close()
log.Info(ctx, "func", "processMathpixTask", "msg", "MathpixApi_End", "end_time", time.Now(), "duration", time.Since(startTime))
body := &bytes.Buffer{}
if _, err = body.ReadFrom(resp.Body); err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "读取响应体失败", "error", err)
return err
}
// 创建日志记录
recognitionLog := &dao.RecognitionLog{
TaskID: taskID,
Provider: dao.ProviderMathpix,
RequestBody: string(jsonData),
ResponseBody: body.String(),
}
// 解析响应
var mathpixResp MathpixResponse
if err := json.Unmarshal(body.Bytes(), &mathpixResp); err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "解析响应失败", "error", err)
return err
}
// 检查错误
if mathpixResp.Error != "" {
errMsg := mathpixResp.Error
if mathpixResp.ErrorInfo != nil {
errMsg = fmt.Sprintf("%s: %s", mathpixResp.ErrorInfo.ID, mathpixResp.ErrorInfo.Message)
}
log.Error(ctx, "func", "processMathpixTask", "msg", "Mathpix API 返回错误", "error", errMsg)
return fmt.Errorf("mathpix error: %s", errMsg)
}
// 保存日志
err = logDao.Create(dao.DB.WithContext(ctx), recognitionLog)
if err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "保存日志失败", "error", err)
}
// 更新或创建识别结果
resultDao := dao.NewRecognitionResultDao()
result, err := resultDao.GetByTaskID(dao.DB.WithContext(ctx), taskID)
if err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "获取任务结果失败", "error", err)
return err
}
log.Info(ctx, "func", "processMathpixTask", "msg", "saveLog", "end_time", time.Now(), "duration", time.Since(startTime))
if result == nil {
// 创建新结果
err = resultDao.Create(dao.DB.WithContext(ctx), dao.RecognitionResult{
TaskID: taskID,
TaskType: dao.TaskTypeFormula,
Latex: mathpixResp.LatexStyled,
Markdown: mathpixResp.Text,
MathML: mathpixResp.GetMathML(),
})
if err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "创建任务结果失败", "error", err)
return err
}
} else {
// 更新现有结果
err = resultDao.Update(dao.DB.WithContext(ctx), result.ID, map[string]interface{}{
"latex": mathpixResp.LatexStyled,
"markdown": mathpixResp.Text,
"mathml": mathpixResp.GetMathML(),
})
if err != nil {
log.Error(ctx, "func", "processMathpixTask", "msg", "更新任务结果失败", "error", err)
return err
}
}
isSuccess = true
return nil
}
func (s *RecognitionService) processBaiduOCRTask(ctx context.Context, taskID int64, fileURL string) error {
isSuccess := false
logDao := dao.NewRecognitionLogDao()
defer func() {
if !isSuccess {
err := dao.NewRecognitionTaskDao().Update(dao.DB.WithContext(ctx), map[string]interface{}{"id": taskID}, map[string]interface{}{"status": dao.TaskStatusFailed})
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "更新任务状态失败", "error", err)
}
return
}
err := dao.NewRecognitionTaskDao().Update(dao.DB.WithContext(ctx), map[string]interface{}{"id": taskID}, map[string]interface{}{"status": dao.TaskStatusCompleted})
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "更新任务状态失败", "error", err)
}
}()
// 从 OSS 下载文件
reader, err := oss.DownloadFile(ctx, fileURL)
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "从OSS下载文件失败", "error", err)
return err
}
defer reader.Close()
// 读取文件内容
fileBytes, err := io.ReadAll(reader)
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "读取文件内容失败", "error", err)
return err
}
// Base64 编码
fileData := base64.StdEncoding.EncodeToString(fileBytes)
// 根据文件扩展名确定 fileType: 0=PDF, 1=图片
fileType := 1 // 默认为图片
lowerFileURL := strings.ToLower(fileURL)
if strings.HasSuffix(lowerFileURL, ".pdf") {
fileType = 0
}
// 创建百度 OCR API 请求
baiduReq := BaiduOCRRequest{
File: fileData,
FileType: fileType,
UseDocOrientationClassify: false,
UseDocUnwarping: false,
UseChartRecognition: false,
}
jsonData, err := json.Marshal(baiduReq)
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "JSON编码失败", "error", err)
return err
}
headers := map[string]string{
"Content-Type": "application/json",
"Authorization": fmt.Sprintf("token %s", config.GlobalConfig.BaiduOCR.Token),
}
endpoint := "https://j5veh2l2r6ubk6cb.aistudio-app.com/layout-parsing"
startTime := time.Now()
log.Info(ctx, "func", "processBaiduOCRTask", "msg", "BaiduOCRApi_Start", "start_time", startTime)
resp, err := s.httpClient.RequestWithRetry(ctx, http.MethodPost, endpoint, bytes.NewReader(jsonData), headers)
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "百度 OCR API 请求失败", "error", err)
return err
}
defer resp.Body.Close()
log.Info(ctx, "func", "processBaiduOCRTask", "msg", "BaiduOCRApi_End", "end_time", time.Now(), "duration", time.Since(startTime))
body := &bytes.Buffer{}
if _, err = body.ReadFrom(resp.Body); err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "读取响应体失败", "error", err)
return err
}
// 创建日志记录(不记录请求体中的 base64 数据以节省存储)
requestLogData := map[string]interface{}{
"fileType": fileType,
"useDocOrientationClassify": false,
"useDocUnwarping": false,
"useChartRecognition": false,
"fileSize": len(fileBytes),
}
requestLogBytes, _ := json.Marshal(requestLogData)
recognitionLog := &dao.RecognitionLog{
TaskID: taskID,
Provider: dao.ProviderBaiduOCR,
RequestBody: string(requestLogBytes),
ResponseBody: body.String(),
}
// 解析响应
var baiduResp BaiduOCRResponse
if err := json.Unmarshal(body.Bytes(), &baiduResp); err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "解析响应失败", "error", err)
return err
}
// 检查错误
if baiduResp.ErrorCode != 0 {
errMsg := fmt.Sprintf("errorCode: %d, errorMsg: %s", baiduResp.ErrorCode, baiduResp.ErrorMsg)
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "百度 OCR API 返回错误", "error", errMsg)
return fmt.Errorf("baidu ocr error: %s", errMsg)
}
// 保存日志
err = logDao.Create(dao.DB.WithContext(ctx), recognitionLog)
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "保存日志失败", "error", err)
}
// 合并所有页面的 markdown 结果
var markdownTexts []string
if baiduResp.Result != nil && len(baiduResp.Result.LayoutParsingResults) > 0 {
for _, res := range baiduResp.Result.LayoutParsingResults {
if res.Markdown.Text != "" {
markdownTexts = append(markdownTexts, res.Markdown.Text)
}
}
}
markdownResult := strings.Join(markdownTexts, "\n\n---\n\n")
latex, mml, e := s.HandleConvert(ctx, markdownResult)
if e != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "转换失败", "error", err)
}
// 更新或创建识别结果
resultDao := dao.NewRecognitionResultDao()
result, err := resultDao.GetByTaskID(dao.DB.WithContext(ctx), taskID)
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "获取任务结果失败", "error", err)
return err
}
log.Info(ctx, "func", "processBaiduOCRTask", "msg", "saveLog", "end_time", time.Now(), "duration", time.Since(startTime))
if result == nil {
// 创建新结果
err = resultDao.Create(dao.DB.WithContext(ctx), dao.RecognitionResult{
TaskID: taskID,
TaskType: dao.TaskTypeFormula,
Markdown: markdownResult,
Latex: latex,
MathML: mml,
})
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "创建任务结果失败", "error", err)
return err
}
} else {
// 更新现有结果
err = resultDao.Update(dao.DB.WithContext(ctx), result.ID, map[string]interface{}{
"markdown": markdownResult,
"latex": latex,
"mathml": mml,
})
if err != nil {
log.Error(ctx, "func", "processBaiduOCRTask", "msg", "更新任务结果失败", "error", err)
return err
}
}
isSuccess = true
return nil
}
func (s *RecognitionService) TestProcessMathpixTask(ctx context.Context, taskID int64) error {
task, err := dao.NewRecognitionTaskDao().GetTaskByID(dao.DB.WithContext(ctx), taskID)
if err != nil {
log.Error(ctx, "func", "TestProcessMathpixTask", "msg", "获取任务失败", "error", err)
return err
}
if task == nil {
log.Error(ctx, "func", "TestProcessMathpixTask", "msg", "任务不存在", "task_id", taskID)
return err
}
return s.processMathpixTask(ctx, taskID, task.FileURL)
}
// ConvertResponse Python 接口返回结构
type ConvertResponse struct {
Latex string `json:"latex"`
MathML string `json:"mathml"`
Error string `json:"error,omitempty"`
}
func (s *RecognitionService) HandleConvert(ctx context.Context, markdown string) (latex string, mml string, err error) {
url := "https://cloud.texpixel.com:10443/doc_converter/v1/convert"
// 构建 multipart form
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
_ = writer.WriteField("markdown_input", markdown)
writer.Close()
// 使用正确的 Content-Type包含 boundary
headers := map[string]string{
"Content-Type": writer.FormDataContentType(),
}
resp, err := s.httpClient.RequestWithRetry(ctx, http.MethodPost, url, body, headers)
if err != nil {
return "", "", err
}
defer resp.Body.Close()
// 读取响应体
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return "", "", err
}
// 检查 HTTP 状态码
if resp.StatusCode != http.StatusOK {
return "", "", fmt.Errorf("convert failed: status %d, body: %s", resp.StatusCode, string(respBody))
}
// 解析 JSON 响应
var convertResp ConvertResponse
if err := json.Unmarshal(respBody, &convertResp); err != nil {
return "", "", fmt.Errorf("unmarshal response failed: %v, body: %s", err, string(respBody))
}
// 检查业务错误
if convertResp.Error != "" {
return "", "", fmt.Errorf("convert error: %s", convertResp.Error)
}
return convertResp.Latex, convertResp.MathML, nil
} }

View File

@@ -1,27 +1,37 @@
package service package service
import ( import (
"bytes"
"context" "context"
"encoding/json"
"errors" "errors"
"fmt"
"io"
"net/http"
"strings" "strings"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/internal/model/task"
"gitea.com/bitwsd/document_ai/internal/model/task" "gitea.com/texpixel/document_ai/internal/storage/dao"
"gitea.com/bitwsd/document_ai/internal/storage/dao" "gitea.com/texpixel/document_ai/pkg/log"
"gorm.io/gorm" "gitea.com/texpixel/document_ai/pkg/oss"
) )
type TaskService struct { type TaskService struct {
db *gorm.DB recognitionTaskDao *dao.RecognitionTaskDao
evaluateTaskDao *dao.EvaluateTaskDao
recognitionResultDao *dao.RecognitionResultDao
} }
func NewTaskService() *TaskService { func NewTaskService() *TaskService {
return &TaskService{dao.DB} return &TaskService{
recognitionTaskDao: dao.NewRecognitionTaskDao(),
evaluateTaskDao: dao.NewEvaluateTaskDao(),
recognitionResultDao: dao.NewRecognitionResultDao(),
}
} }
func (svc *TaskService) EvaluateTask(ctx context.Context, req *task.EvaluateTaskRequest) error { func (svc *TaskService) EvaluateTask(ctx context.Context, req *task.EvaluateTaskRequest) error {
taskDao := dao.NewRecognitionTaskDao() task, err := svc.recognitionTaskDao.GetByTaskNo(dao.DB.WithContext(ctx), req.TaskNo)
task, err := taskDao.GetByTaskNo(svc.db.WithContext(ctx), req.TaskNo)
if err != nil { if err != nil {
log.Error(ctx, "func", "EvaluateTask", "msg", "get task by task no failed", "error", err) log.Error(ctx, "func", "EvaluateTask", "msg", "get task by task no failed", "error", err)
return err return err
@@ -36,14 +46,13 @@ func (svc *TaskService) EvaluateTask(ctx context.Context, req *task.EvaluateTask
return errors.New("task not finished") return errors.New("task not finished")
} }
evaluateTaskDao := dao.NewEvaluateTaskDao()
evaluateTask := &dao.EvaluateTask{ evaluateTask := &dao.EvaluateTask{
TaskID: task.ID, TaskID: task.ID,
Satisfied: req.Satisfied, Satisfied: req.Satisfied,
Feedback: req.Feedback, Feedback: req.Feedback,
Comment: strings.Join(req.Suggestion, ","), Comment: strings.Join(req.Suggestion, ","),
} }
err = evaluateTaskDao.Create(svc.db.WithContext(ctx), evaluateTask) err = svc.evaluateTaskDao.Create(dao.DB.WithContext(ctx), evaluateTask)
if err != nil { if err != nil {
log.Error(ctx, "func", "EvaluateTask", "msg", "create evaluate task failed", "error", err) log.Error(ctx, "func", "EvaluateTask", "msg", "create evaluate task failed", "error", err)
return err return err
@@ -53,26 +62,143 @@ func (svc *TaskService) EvaluateTask(ctx context.Context, req *task.EvaluateTask
} }
func (svc *TaskService) GetTaskList(ctx context.Context, req *task.TaskListRequest) (*task.TaskListResponse, error) { func (svc *TaskService) GetTaskList(ctx context.Context, req *task.TaskListRequest) (*task.TaskListResponse, error) {
taskDao := dao.NewRecognitionTaskDao() tasks, total, err := svc.recognitionTaskDao.GetTaskList(dao.DB.WithContext(ctx), req.UserID, dao.TaskType(req.TaskType), req.Page, req.PageSize)
tasks, err := taskDao.GetTaskList(svc.db.WithContext(ctx), dao.TaskType(req.TaskType), req.Page, req.PageSize)
if err != nil { if err != nil {
log.Error(ctx, "func", "GetTaskList", "msg", "get task list failed", "error", err) log.Error(ctx, "func", "GetTaskList", "msg", "get task list failed", "error", err)
return nil, err return nil, err
} }
taskIDs := make([]int64, 0, len(tasks))
for _, item := range tasks {
taskIDs = append(taskIDs, item.ID)
}
recognitionResults, err := svc.recognitionResultDao.GetByTaskIDs(dao.DB.WithContext(ctx), taskIDs)
if err != nil {
log.Error(ctx, "func", "GetTaskList", "msg", "get recognition results failed", "error", err)
return nil, err
}
recognitionResultMap := make(map[int64]*dao.RecognitionResult)
for _, item := range recognitionResults {
recognitionResultMap[item.TaskID] = item
}
resp := &task.TaskListResponse{ resp := &task.TaskListResponse{
TaskList: make([]*task.TaskListDTO, 0, len(tasks)), TaskList: make([]*task.TaskListDTO, 0, len(tasks)),
HasMore: false, Total: total,
NextPage: 0,
} }
for _, item := range tasks { for _, item := range tasks {
var latex string
var markdown string
var mathML string
var mml string
recognitionResult := recognitionResultMap[item.ID]
if recognitionResult != nil {
latex = recognitionResult.Latex
markdown = recognitionResult.Markdown
mathML = recognitionResult.MathML
mml = recognitionResult.MML
}
originURL, err := oss.GetDownloadURL(ctx, item.FileURL)
if err != nil {
log.Error(ctx, "func", "GetTaskList", "msg", "get origin url failed", "error", err)
}
resp.TaskList = append(resp.TaskList, &task.TaskListDTO{ resp.TaskList = append(resp.TaskList, &task.TaskListDTO{
Latex: latex,
Markdown: markdown,
MathML: mathML,
MML: mml,
TaskID: item.TaskUUID, TaskID: item.TaskUUID,
FileName: item.FileName, FileName: item.FileName,
Status: item.Status.String(), Status: int(item.Status),
Path: item.FileURL, OriginURL: originURL,
TaskType: item.TaskType.String(), TaskType: item.TaskType.String(),
CreatedAt: item.CreatedAt.Format("2006-01-02 15:04:05"), CreatedAt: item.CreatedAt.Format("2006-01-02 15:04:05"),
}) })
} }
return resp, nil return resp, nil
} }
func (svc *TaskService) ExportTask(ctx context.Context, req *task.ExportTaskRequest) ([]byte, string, error) {
recognitionTask, err := svc.recognitionTaskDao.GetByTaskNo(dao.DB.WithContext(ctx), req.TaskNo)
if err != nil {
log.Error(ctx, "func", "ExportTask", "msg", "get task by task id failed", "error", err)
return nil, "", err
}
if recognitionTask == nil {
log.Error(ctx, "func", "ExportTask", "msg", "task not found")
return nil, "", errors.New("task not found")
}
if recognitionTask.Status != dao.TaskStatusCompleted {
log.Error(ctx, "func", "ExportTask", "msg", "task not finished")
return nil, "", errors.New("task not finished")
}
recognitionResult, err := svc.recognitionResultDao.GetByTaskID(dao.DB.WithContext(ctx), recognitionTask.ID)
if err != nil {
log.Error(ctx, "func", "ExportTask", "msg", "get recognition result by task id failed", "error", err)
return nil, "", err
}
if recognitionResult == nil {
log.Error(ctx, "func", "ExportTask", "msg", "recognition result not found")
return nil, "", errors.New("recognition result not found")
}
markdown := recognitionResult.Markdown
if markdown == "" {
log.Error(ctx, "func", "ExportTask", "msg", "markdown not found")
return nil, "", errors.New("markdown not found")
}
// 获取文件名(去掉扩展名)
filename := strings.TrimSuffix(recognitionTask.FileName, "."+strings.ToLower(strings.Split(recognitionTask.FileName, ".")[len(strings.Split(recognitionTask.FileName, "."))-1]))
if filename == "" {
filename = "texpixel"
}
// 构建 JSON 请求体
requestBody := map[string]string{
"markdown": markdown,
"filename": filename,
}
jsonData, err := json.Marshal(requestBody)
if err != nil {
log.Error(ctx, "func", "ExportTask", "msg", "json marshal failed", "error", err)
return nil, "", err
}
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://cloud.texpixel.com:10443/doc_process/v1/convert/file", bytes.NewReader(jsonData))
if err != nil {
log.Error(ctx, "func", "ExportTask", "msg", "create http request failed", "error", err)
return nil, "", err
}
httpReq.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(httpReq)
if err != nil {
log.Error(ctx, "func", "ExportTask", "msg", "http request failed", "error", err)
return nil, "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Error(ctx, "func", "ExportTask", "msg", "http request failed", "status", resp.StatusCode)
return nil, "", fmt.Errorf("export service returned status: %d", resp.StatusCode)
}
fileData, err := io.ReadAll(resp.Body)
if err != nil {
log.Error(ctx, "func", "ExportTask", "msg", "read response body failed", "error", err)
return nil, "", err
}
// 新接口只返回 DOCX 格式
contentType := "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
return fileData, contentType, nil
}

View File

@@ -2,14 +2,21 @@ package service
import ( import (
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"math/rand" "math/rand"
"net/http"
"net/url"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/config"
"gitea.com/bitwsd/document_ai/internal/storage/cache" model "gitea.com/texpixel/document_ai/internal/model/user"
"gitea.com/bitwsd/document_ai/internal/storage/dao" "gitea.com/texpixel/document_ai/internal/storage/cache"
"gitea.com/bitwsd/document_ai/pkg/sms" "gitea.com/texpixel/document_ai/internal/storage/dao"
"gitea.com/texpixel/document_ai/pkg/common"
"gitea.com/texpixel/document_ai/pkg/log"
"gitea.com/texpixel/document_ai/pkg/sms"
"golang.org/x/crypto/bcrypt"
) )
type UserService struct { type UserService struct {
@@ -107,3 +114,176 @@ func (svc *UserService) GetUserInfo(ctx context.Context, uid int64) (*dao.User,
return user, nil return user, nil
} }
func (svc *UserService) RegisterByEmail(ctx context.Context, email, password string) (uid int64, err error) {
existingUser, err := svc.userDao.GetByEmail(dao.DB.WithContext(ctx), email)
if err != nil {
log.Error(ctx, "func", "RegisterByEmail", "msg", "get user by email error", "error", err)
return 0, err
}
if existingUser != nil {
log.Warn(ctx, "func", "RegisterByEmail", "msg", "email already registered", "email", email)
return 0, common.ErrEmailExists
}
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
log.Error(ctx, "func", "RegisterByEmail", "msg", "hash password error", "error", err)
return 0, err
}
user := &dao.User{
Email: email,
Password: string(hashedPassword),
}
err = svc.userDao.Create(dao.DB.WithContext(ctx), user)
if err != nil {
log.Error(ctx, "func", "RegisterByEmail", "msg", "create user error", "error", err)
return 0, err
}
return user.ID, nil
}
func (svc *UserService) LoginByEmail(ctx context.Context, email, password string) (uid int64, err error) {
user, err := svc.userDao.GetByEmail(dao.DB.WithContext(ctx), email)
if err != nil {
log.Error(ctx, "func", "LoginByEmail", "msg", "get user by email error", "error", err)
return 0, err
}
if user == nil {
log.Warn(ctx, "func", "LoginByEmail", "msg", "user not found", "email", email)
return 0, common.ErrEmailNotFound
}
err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
if err != nil {
log.Warn(ctx, "func", "LoginByEmail", "msg", "password mismatch", "email", email)
return 0, common.ErrPasswordMismatch
}
return user.ID, nil
}
type googleTokenResponse struct {
AccessToken string `json:"access_token"`
IDToken string `json:"id_token"`
ExpiresIn int `json:"expires_in"`
TokenType string `json:"token_type"`
}
func (svc *UserService) googleHTTPClient() *http.Client {
if config.GlobalConfig.Google.Proxy == "" {
return &http.Client{}
}
proxyURL, err := url.Parse(config.GlobalConfig.Google.Proxy)
if err != nil {
return &http.Client{}
}
return &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(proxyURL)}}
}
func (svc *UserService) ExchangeGoogleCodeAndGetUserInfo(ctx context.Context, clientID, clientSecret, code, redirectURI string) (*model.GoogleUserInfo, error) {
tokenURL := "https://oauth2.googleapis.com/token"
formData := url.Values{
"client_id": {clientID},
"client_secret": {clientSecret},
"code": {code},
"grant_type": {"authorization_code"},
"redirect_uri": {redirectURI},
}
client := svc.googleHTTPClient()
resp, err := client.PostForm(tokenURL, formData)
if err != nil {
log.Error(ctx, "func", "ExchangeGoogleCodeAndGetUserInfo", "msg", "exchange code failed", "error", err)
return nil, err
}
defer resp.Body.Close()
var tokenResp googleTokenResponse
if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil {
log.Error(ctx, "func", "ExchangeGoogleCodeAndGetUserInfo", "msg", "decode token response failed", "error", err)
return nil, err
}
if tokenResp.AccessToken == "" {
log.Error(ctx, "func", "ExchangeGoogleCodeAndGetUserInfo", "msg", "no access token in response")
return nil, errors.New("no access token in response")
}
userInfo, err := svc.getGoogleUserInfo(ctx, tokenResp.AccessToken)
if err != nil {
log.Error(ctx, "func", "ExchangeGoogleCodeAndGetUserInfo", "msg", "get user info failed", "error", err)
return nil, err
}
return &model.GoogleUserInfo{
ID: userInfo.ID,
Email: userInfo.Email,
Name: userInfo.Name,
}, nil
}
func (svc *UserService) getGoogleUserInfo(ctx context.Context, accessToken string) (*model.GoogleUserInfo, error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://www.googleapis.com/oauth2/v2/userinfo", nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+accessToken)
client := svc.googleHTTPClient()
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var userInfo model.GoogleUserInfo
if err := json.NewDecoder(resp.Body).Decode(&userInfo); err != nil {
return nil, err
}
return &userInfo, nil
}
func (svc *UserService) FindOrCreateGoogleUser(ctx context.Context, userInfo *model.GoogleUserInfo) (uid int64, err error) {
existingUser, err := svc.userDao.GetByGoogleID(dao.DB.WithContext(ctx), userInfo.ID)
if err != nil {
log.Error(ctx, "func", "FindOrCreateGoogleUser", "msg", "get user by google id error", "error", err)
return 0, err
}
if existingUser != nil {
return existingUser.ID, nil
}
existingUser, err = svc.userDao.GetByEmail(dao.DB.WithContext(ctx), userInfo.Email)
if err != nil {
log.Error(ctx, "func", "FindOrCreateGoogleUser", "msg", "get user by email error", "error", err)
return 0, err
}
if existingUser != nil {
existingUser.GoogleID = userInfo.ID
err = svc.userDao.Update(dao.DB.WithContext(ctx), existingUser)
if err != nil {
log.Error(ctx, "func", "FindOrCreateGoogleUser", "msg", "update user google id error", "error", err)
return 0, err
}
return existingUser.ID, nil
}
user := &dao.User{
Email: userInfo.Email,
GoogleID: userInfo.ID,
Username: userInfo.Name,
}
err = svc.userDao.Create(dao.DB.WithContext(ctx), user)
if err != nil {
log.Error(ctx, "func", "FindOrCreateGoogleUser", "msg", "create user error", "error", err)
return 0, err
}
return user.ID, nil
}

View File

@@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"time" "time"
"gitea.com/bitwsd/document_ai/config" "gitea.com/texpixel/document_ai/config"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
) )

View File

@@ -0,0 +1,170 @@
package dao
import (
"time"
"gorm.io/datatypes"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// AnalyticsEvent 数据埋点事件表
type AnalyticsEvent struct {
ID int64 `gorm:"bigint;primaryKey;autoIncrement;column:id;comment:主键ID" json:"id"`
UserID int64 `gorm:"column:user_id;not null;index:idx_user_id;comment:用户ID" json:"user_id"`
EventName string `gorm:"column:event_name;varchar(128);not null;index:idx_event_name;comment:事件名称" json:"event_name"`
Properties datatypes.JSON `gorm:"column:properties;type:json;comment:事件属性(JSON)" json:"properties"`
DeviceInfo datatypes.JSON `gorm:"column:device_info;type:json;comment:设备信息(JSON)" json:"device_info"`
MetaData datatypes.JSON `gorm:"column:meta_data;type:json;comment:元数据(JSON包含task_id等)" json:"meta_data"`
CreatedAt time.Time `gorm:"column:created_at;comment:创建时间;not null;default:current_timestamp;index:idx_created_at" json:"created_at"`
}
func (e *AnalyticsEvent) TableName() string {
return "analytics_events"
}
// AnalyticsEventDao 数据埋点事件DAO
type AnalyticsEventDao struct{}
func NewAnalyticsEventDao() *AnalyticsEventDao {
return &AnalyticsEventDao{}
}
// Create 创建事件记录
func (dao *AnalyticsEventDao) Create(tx *gorm.DB, event *AnalyticsEvent) error {
return tx.Create(event).Error
}
// BatchCreate 批量创建事件记录
func (dao *AnalyticsEventDao) BatchCreate(tx *gorm.DB, events []*AnalyticsEvent) error {
if len(events) == 0 {
return nil
}
return tx.CreateInBatches(events, 100).Error
}
// GetByID 根据ID获取事件
func (dao *AnalyticsEventDao) GetByID(tx *gorm.DB, id int64) (*AnalyticsEvent, error) {
event := &AnalyticsEvent{}
err := tx.Where("id = ?", id).First(event).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, err
}
return event, nil
}
// GetUserEvents 获取用户的事件列表
func (dao *AnalyticsEventDao) GetUserEvents(tx *gorm.DB, userID int64, page, pageSize int) ([]*AnalyticsEvent, int64, error) {
var events []*AnalyticsEvent
var total int64
offset := (page - 1) * pageSize
query := tx.Model(&AnalyticsEvent{}).Where("user_id = ?", userID)
err := query.Count(&total).Error
if err != nil {
return nil, 0, err
}
err = query.Offset(offset).Limit(pageSize).
Order(clause.OrderByColumn{Column: clause.Column{Name: "created_at"}, Desc: true}).
Find(&events).Error
return events, total, err
}
// GetEventsByName 根据事件名称获取事件列表
func (dao *AnalyticsEventDao) GetEventsByName(tx *gorm.DB, eventName string, page, pageSize int) ([]*AnalyticsEvent, int64, error) {
var events []*AnalyticsEvent
var total int64
offset := (page - 1) * pageSize
query := tx.Model(&AnalyticsEvent{}).Where("event_name = ?", eventName)
err := query.Count(&total).Error
if err != nil {
return nil, 0, err
}
err = query.Offset(offset).Limit(pageSize).
Order(clause.OrderByColumn{Column: clause.Column{Name: "created_at"}, Desc: true}).
Find(&events).Error
return events, total, err
}
// GetUserEventsByName 获取用户指定事件的列表
func (dao *AnalyticsEventDao) GetUserEventsByName(tx *gorm.DB, userID int64, eventName string, page, pageSize int) ([]*AnalyticsEvent, int64, error) {
var events []*AnalyticsEvent
var total int64
offset := (page - 1) * pageSize
query := tx.Model(&AnalyticsEvent{}).Where("user_id = ? AND event_name = ?", userID, eventName)
err := query.Count(&total).Error
if err != nil {
return nil, 0, err
}
err = query.Offset(offset).Limit(pageSize).
Order(clause.OrderByColumn{Column: clause.Column{Name: "created_at"}, Desc: true}).
Find(&events).Error
return events, total, err
}
// GetEventsByTimeRange 根据时间范围获取事件列表
func (dao *AnalyticsEventDao) GetEventsByTimeRange(tx *gorm.DB, startTime, endTime time.Time, page, pageSize int) ([]*AnalyticsEvent, int64, error) {
var events []*AnalyticsEvent
var total int64
offset := (page - 1) * pageSize
query := tx.Model(&AnalyticsEvent{}).Where("created_at BETWEEN ? AND ?", startTime, endTime)
err := query.Count(&total).Error
if err != nil {
return nil, 0, err
}
err = query.Offset(offset).Limit(pageSize).
Order(clause.OrderByColumn{Column: clause.Column{Name: "created_at"}, Desc: true}).
Find(&events).Error
return events, total, err
}
// CountEventsByName 统计指定事件的数量
func (dao *AnalyticsEventDao) CountEventsByName(tx *gorm.DB, eventName string) (int64, error) {
var count int64
err := tx.Model(&AnalyticsEvent{}).Where("event_name = ?", eventName).Count(&count).Error
return count, err
}
// CountUserEvents 统计用户的事件数量
func (dao *AnalyticsEventDao) CountUserEvents(tx *gorm.DB, userID int64) (int64, error) {
var count int64
err := tx.Model(&AnalyticsEvent{}).Where("user_id = ?", userID).Count(&count).Error
return count, err
}
// GetEventStats 获取事件统计信息(按事件名称分组)
func (dao *AnalyticsEventDao) GetEventStats(tx *gorm.DB, startTime, endTime time.Time) ([]map[string]interface{}, error) {
var results []map[string]interface{}
err := tx.Model(&AnalyticsEvent{}).
Select("event_name, COUNT(*) as count, COUNT(DISTINCT user_id) as unique_users").
Where("created_at BETWEEN ? AND ?", startTime, endTime).
Group("event_name").
Order("count DESC").
Find(&results).Error
return results, err
}
// DeleteOldEvents 删除旧事件(数据清理)
func (dao *AnalyticsEventDao) DeleteOldEvents(tx *gorm.DB, beforeTime time.Time) error {
return tx.Where("created_at < ?", beforeTime).Delete(&AnalyticsEvent{}).Error
}

View File

@@ -3,16 +3,19 @@ package dao
import ( import (
"fmt" "fmt"
"gitea.com/bitwsd/document_ai/config" "gitea.com/texpixel/document_ai/config"
"gorm.io/driver/mysql" "gorm.io/driver/mysql"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/logger"
) )
var DB *gorm.DB var DB *gorm.DB
func InitDB(conf config.DatabaseConfig) { func InitDB(conf config.DatabaseConfig) {
dns := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Asia%%2FShanghai", conf.Username, conf.Password, conf.Host, conf.Port, conf.DBName) dns := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Asia%%2FShanghai", conf.Username, conf.Password, conf.Host, conf.Port, conf.DBName)
db, err := gorm.Open(mysql.Open(dns), &gorm.Config{}) db, err := gorm.Open(mysql.Open(dns), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent), // 禁用 GORM 日志输出
})
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@@ -0,0 +1,53 @@
package dao
import (
"gorm.io/gorm"
)
// RecognitionLogProvider 第三方服务提供商
type RecognitionLogProvider string
const (
ProviderMathpix RecognitionLogProvider = "mathpix"
ProviderSiliconflow RecognitionLogProvider = "siliconflow"
ProviderTexpixel RecognitionLogProvider = "texpixel"
ProviderBaiduOCR RecognitionLogProvider = "baidu_ocr"
)
// RecognitionLog 识别调用日志表记录第三方API调用请求和响应
type RecognitionLog struct {
BaseModel
TaskID int64 `gorm:"column:task_id;bigint;not null;default:0;index;comment:关联任务ID" json:"task_id"`
Provider RecognitionLogProvider `gorm:"column:provider;varchar(32);not null;comment:服务提供商" json:"provider"`
RequestBody string `gorm:"column:request_body;type:longtext;comment:请求体" json:"request_body"`
ResponseBody string `gorm:"column:response_body;type:longtext;comment:响应体" json:"response_body"`
}
func (RecognitionLog) TableName() string {
return "recognition_log"
}
type RecognitionLogDao struct{}
func NewRecognitionLogDao() *RecognitionLogDao {
return &RecognitionLogDao{}
}
// Create 创建日志记录
func (d *RecognitionLogDao) Create(tx *gorm.DB, log *RecognitionLog) error {
return tx.Create(log).Error
}
// GetByTaskID 根据任务ID获取日志
func (d *RecognitionLogDao) GetByTaskID(tx *gorm.DB, taskID int64) ([]*RecognitionLog, error) {
var logs []*RecognitionLog
err := tx.Where("task_id = ?", taskID).Order("created_at DESC").Find(&logs).Error
return logs, err
}
// GetByProvider 根据提供商获取日志
func (d *RecognitionLogDao) GetByProvider(tx *gorm.DB, provider RecognitionLogProvider, limit int) ([]*RecognitionLog, error) {
var logs []*RecognitionLog
err := tx.Where("provider = ?", provider).Order("created_at DESC").Limit(limit).Find(&logs).Error
return logs, err
}

View File

@@ -1,66 +1,17 @@
package dao package dao
import ( import (
"encoding/json"
"gorm.io/gorm" "gorm.io/gorm"
) )
type JSON []byte
// ContentCodec 定义内容编解码接口
type ContentCodec interface {
Encode() (JSON, error)
Decode() error
GetContent() interface{} // 更明确的方法名
}
type FormulaRecognitionContent struct {
content JSON
Latex string `json:"latex"`
AdjustLatex string `json:"adjust_latex"`
EnhanceLatex string `json:"enhance_latex"`
}
func (c *FormulaRecognitionContent) Encode() (JSON, error) {
b, err := json.Marshal(c)
if err != nil {
return nil, err
}
return b, nil
}
func (c *FormulaRecognitionContent) Decode() error {
return json.Unmarshal(c.content, c)
}
// GetPreferredContent 按优先级返回公式内容
func (c *FormulaRecognitionContent) GetContent() interface{} {
c.Decode()
if c.EnhanceLatex != "" {
return c.EnhanceLatex
} else if c.AdjustLatex != "" {
return c.AdjustLatex
} else {
return c.Latex
}
}
type RecognitionResult struct { type RecognitionResult struct {
BaseModel BaseModel
TaskID int64 `gorm:"column:task_id;bigint;not null;default:0;comment:任务ID" json:"task_id"` TaskID int64 `gorm:"column:task_id;bigint;not null;default:0;comment:任务ID" json:"task_id"`
TaskType TaskType `gorm:"column:task_type;varchar(16);not null;comment:任务类型;default:''" json:"task_type"` TaskType TaskType `gorm:"column:task_type;varchar(16);not null;comment:任务类型;default:''" json:"task_type"`
Content JSON `gorm:"column:content;type:json;not null;comment:识别内容" json:"content"` Latex string `json:"latex" gorm:"column:latex;type:text;not null;default:''"`
} Markdown string `json:"markdown" gorm:"column:markdown;type:text;not null;default:''"` // Markdown 格式
MathML string `json:"mathml" gorm:"column:mathml;type:text;not null;default:''"` // MathML 格式
// NewContentCodec 创建对应任务类型的内容编解码器 MML string `json:"mml" gorm:"column:mml;type:text;not null;default:''"` // MML 格式
func (r *RecognitionResult) NewContentCodec() ContentCodec {
switch r.TaskType {
case TaskTypeFormula:
return &FormulaRecognitionContent{content: r.Content}
default:
return nil
}
} }
type RecognitionResultDao struct { type RecognitionResultDao struct {
@@ -84,6 +35,11 @@ func (dao *RecognitionResultDao) GetByTaskID(tx *gorm.DB, taskID int64) (result
return return
} }
func (dao *RecognitionResultDao) GetByTaskIDs(tx *gorm.DB, taskIDs []int64) (results []*RecognitionResult, err error) {
err = tx.Where("task_id IN (?)", taskIDs).Find(&results).Error
return
}
func (dao *RecognitionResultDao) Update(tx *gorm.DB, id int64, updates map[string]interface{}) error { func (dao *RecognitionResultDao) Update(tx *gorm.DB, id int64, updates map[string]interface{}) error {
return tx.Model(&RecognitionResult{}).Where("id = ?", id).Updates(updates).Error return tx.Model(&RecognitionResult{}).Where("id = ?", id).Updates(updates).Error
} }

View File

@@ -69,9 +69,9 @@ func (dao *RecognitionTaskDao) GetByTaskNo(tx *gorm.DB, taskUUID string) (task *
return return
} }
func (dao *RecognitionTaskDao) GetTaskByFileURL(tx *gorm.DB, userID int64, fileHash string) (task *RecognitionTask, err error) { func (dao *RecognitionTaskDao) GetTaskByFileURL(tx *gorm.DB, fileHash string) (task *RecognitionTask, err error) {
task = &RecognitionTask{} task = &RecognitionTask{}
err = tx.Model(RecognitionTask{}).Where("user_id = ? AND file_hash = ?", userID, fileHash).First(task).Error err = tx.Model(RecognitionTask{}).Where("file_hash = ?", fileHash).Last(task).Error
return return
} }
@@ -87,8 +87,13 @@ func (dao *RecognitionTaskDao) GetTaskByID(tx *gorm.DB, id int64) (task *Recogni
return task, nil return task, nil
} }
func (dao *RecognitionTaskDao) GetTaskList(tx *gorm.DB, taskType TaskType, page int, pageSize int) (tasks []*RecognitionTask, err error) { func (dao *RecognitionTaskDao) GetTaskList(tx *gorm.DB, userID int64, taskType TaskType, page int, pageSize int) (tasks []*RecognitionTask, total int64, err error) {
offset := (page - 1) * pageSize offset := (page - 1) * pageSize
err = tx.Model(RecognitionTask{}).Where("task_type = ?", taskType).Offset(offset).Limit(pageSize).Order(clause.OrderByColumn{Column: clause.Column{Name: "id"}, Desc: true}).Find(&tasks).Error query := tx.Model(RecognitionTask{}).Where("user_id = ? AND task_type = ?", userID, taskType)
return err = query.Count(&total).Error
if err != nil {
return nil, 0, err
}
err = query.Offset(offset).Limit(pageSize).Order(clause.OrderByColumn{Column: clause.Column{Name: "id"}, Desc: true}).Find(&tasks).Error
return tasks, total, err
} }

View File

@@ -10,9 +10,11 @@ type User struct {
BaseModel BaseModel
Username string `gorm:"column:username" json:"username"` Username string `gorm:"column:username" json:"username"`
Phone string `gorm:"column:phone" json:"phone"` Phone string `gorm:"column:phone" json:"phone"`
Email string `gorm:"column:email" json:"email"`
Password string `gorm:"column:password" json:"password"` Password string `gorm:"column:password" json:"password"`
WechatOpenID string `gorm:"column:wechat_open_id" json:"wechat_open_id"` WechatOpenID string `gorm:"column:wechat_open_id" json:"wechat_open_id"`
WechatUnionID string `gorm:"column:wechat_union_id" json:"wechat_union_id"` WechatUnionID string `gorm:"column:wechat_union_id" json:"wechat_union_id"`
GoogleID string `gorm:"column:google_id" json:"google_id"`
} }
func (u *User) TableName() string { func (u *User) TableName() string {
@@ -51,3 +53,29 @@ func (dao *UserDao) GetByID(tx *gorm.DB, id int64) (*User, error) {
} }
return &user, nil return &user, nil
} }
func (dao *UserDao) GetByEmail(tx *gorm.DB, email string) (*User, error) {
var user User
if err := tx.Where("email = ?", email).First(&user).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return &user, nil
}
func (dao *UserDao) GetByGoogleID(tx *gorm.DB, googleID string) (*User, error) {
var user User
if err := tx.Where("google_id = ?", googleID).First(&user).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return &user, nil
}
func (dao *UserDao) Update(tx *gorm.DB, user *User) error {
return tx.Save(user).Error
}

33
main.go
View File

@@ -10,23 +10,26 @@ import (
"syscall" "syscall"
"time" "time"
"gitea.com/bitwsd/core/common/cors" "gitea.com/texpixel/document_ai/api"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/config"
"gitea.com/bitwsd/core/common/middleware" "gitea.com/texpixel/document_ai/internal/storage/cache"
"gitea.com/bitwsd/document_ai/api" "gitea.com/texpixel/document_ai/internal/storage/dao"
"gitea.com/bitwsd/document_ai/config" "gitea.com/texpixel/document_ai/pkg/common"
"gitea.com/bitwsd/document_ai/internal/storage/cache" "gitea.com/texpixel/document_ai/pkg/cors"
"gitea.com/bitwsd/document_ai/internal/storage/dao" "gitea.com/texpixel/document_ai/pkg/log"
"gitea.com/bitwsd/document_ai/pkg/common" "gitea.com/texpixel/document_ai/pkg/middleware"
"gitea.com/bitwsd/document_ai/pkg/sms" "gitea.com/texpixel/document_ai/pkg/sms"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
func main() { func main() {
// 加载配置 // 加载配置
env := "dev" env := ""
flag.StringVar(&env, "env", "dev", "environment (dev/prod)") flag.StringVar(&env, "env", "dev", "environment (dev/prod)")
flag.Parse() flag.Parse()
fmt.Println("env:", env)
configPath := fmt.Sprintf("./config/config_%s.yaml", env) configPath := fmt.Sprintf("./config/config_%s.yaml", env)
if err := config.Init(configPath); err != nil { if err := config.Init(configPath); err != nil {
panic(err) panic(err)
@@ -42,14 +45,6 @@ func main() {
cache.InitRedisClient(config.GlobalConfig.Redis) cache.InitRedisClient(config.GlobalConfig.Redis)
sms.InitSmsClient() sms.InitSmsClient()
// 初始化Redis
// cache.InitRedis(config.GlobalConfig.Redis.Addr)
// 初始化OSS客户端
// if err := oss.InitOSS(config.GlobalConfig.OSS); err != nil {
// logger.Fatal("Failed to init OSS client", logger.Fields{"error": err})
// }
// 设置gin模式 // 设置gin模式
gin.SetMode(config.GlobalConfig.Server.Mode) gin.SetMode(config.GlobalConfig.Server.Mode)
@@ -78,6 +73,6 @@ func main() {
if err := srv.Shutdown(context.Background()); err != nil { if err := srv.Shutdown(context.Background()); err != nil {
panic(err) panic(err)
} }
time.Sleep(time.Second * 3) time.Sleep(time.Second * 5)
dao.CloseDB() dao.CloseDB()
} }

View File

@@ -0,0 +1,18 @@
-- 数据埋点事件表
CREATE TABLE IF NOT EXISTS `analytics_events` (
`id` BIGINT NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`user_id` BIGINT NOT NULL COMMENT '用户ID',
`event_name` VARCHAR(128) NOT NULL COMMENT '事件名称',
`properties` JSON DEFAULT NULL COMMENT '事件属性(JSON)',
`device_info` JSON DEFAULT NULL COMMENT '设备信息(JSON)',
`meta_data` JSON DEFAULT NULL COMMENT '元数据(JSON包含task_id等)',
`created_at` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
PRIMARY KEY (`id`),
INDEX `idx_user_id` (`user_id`),
INDEX `idx_event_name` (`event_name`),
INDEX `idx_created_at` (`created_at`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='数据埋点事件表';
-- 创建复合索引以提高查询性能
CREATE INDEX `idx_user_event` ON `analytics_events` (`user_id`, `event_name`);
CREATE INDEX `idx_event_time` ON `analytics_events` (`event_name`, `created_at`);

View File

@@ -3,31 +3,39 @@ package common
type ErrorCode int type ErrorCode int
const ( const (
CodeSuccess = 200 CodeSuccess = 200
CodeParamError = 400 CodeParamError = 400
CodeUnauthorized = 401 CodeUnauthorized = 401
CodeForbidden = 403 CodeTokenExpired = 4011
CodeNotFound = 404 CodeForbidden = 403
CodeInvalidStatus = 405 CodeNotFound = 404
CodeDBError = 500 CodeInvalidStatus = 405
CodeSystemError = 501 CodeDBError = 500
CodeTaskNotComplete = 1001 CodeSystemError = 501
CodeRecordRepeat = 1002 CodeTaskNotComplete = 1001
CodeSmsCodeError = 1003 CodeRecordRepeat = 1002
CodeSmsCodeError = 1003
CodeEmailExists = 1004
CodeEmailNotFound = 1005
CodePasswordMismatch = 1006
) )
const ( const (
CodeSuccessMsg = "success" CodeSuccessMsg = "success"
CodeParamErrorMsg = "param error" CodeParamErrorMsg = "param error"
CodeUnauthorizedMsg = "unauthorized" CodeUnauthorizedMsg = "unauthorized"
CodeForbiddenMsg = "forbidden" CodeTokenExpiredMsg = "token expired"
CodeNotFoundMsg = "not found" CodeForbiddenMsg = "forbidden"
CodeInvalidStatusMsg = "invalid status" CodeNotFoundMsg = "not found"
CodeDBErrorMsg = "database error" CodeInvalidStatusMsg = "invalid status"
CodeSystemErrorMsg = "system error" CodeDBErrorMsg = "database error"
CodeTaskNotCompleteMsg = "task not complete" CodeSystemErrorMsg = "system error"
CodeRecordRepeatMsg = "record repeat" CodeTaskNotCompleteMsg = "task not complete"
CodeSmsCodeErrorMsg = "sms code error" CodeRecordRepeatMsg = "record repeat"
CodeSmsCodeErrorMsg = "sms code error"
CodeEmailExistsMsg = "email already registered"
CodeEmailNotFoundMsg = "email not found"
CodePasswordMismatchMsg = "password mismatch"
) )
type BusinessError struct { type BusinessError struct {
@@ -47,3 +55,10 @@ func NewError(code ErrorCode, message string, err error) *BusinessError {
Err: err, Err: err,
} }
} }
// 预定义业务错误
var (
ErrEmailExists = NewError(CodeEmailExists, CodeEmailExistsMsg, nil)
ErrEmailNotFound = NewError(CodeEmailNotFound, CodeEmailNotFoundMsg, nil)
ErrPasswordMismatch = NewError(CodePasswordMismatch, CodePasswordMismatchMsg, nil)
)

View File

@@ -4,9 +4,10 @@ import (
"context" "context"
"net/http" "net/http"
"strings" "strings"
"time"
"gitea.com/bitwsd/document_ai/pkg/constant" "gitea.com/texpixel/document_ai/pkg/constant"
"gitea.com/bitwsd/document_ai/pkg/jwt" "gitea.com/texpixel/document_ai/pkg/jwt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
@@ -45,6 +46,30 @@ func AuthMiddleware(ctx *gin.Context) {
ctx.Set(constant.ContextUserID, claims.UserId) ctx.Set(constant.ContextUserID, claims.UserId)
} }
func MustAuthMiddleware() gin.HandlerFunc {
return func(ctx *gin.Context) {
token := ctx.GetHeader("Authorization")
if token == "" {
ctx.JSON(http.StatusOK, ErrorResponse(ctx, CodeUnauthorized, CodeUnauthorizedMsg))
ctx.Abort()
return
}
token = strings.TrimPrefix(token, "Bearer ")
claims, err := jwt.ParseToken(token)
if err != nil || claims == nil {
ctx.JSON(http.StatusOK, ErrorResponse(ctx, CodeUnauthorized, CodeUnauthorizedMsg))
ctx.Abort()
return
}
if claims.ExpiresAt < time.Now().Unix() {
ctx.JSON(http.StatusOK, ErrorResponse(ctx, CodeTokenExpired, CodeTokenExpiredMsg))
ctx.Abort()
return
}
ctx.Set(constant.ContextUserID, claims.UserId)
}
}
func GetAuthMiddleware() gin.HandlerFunc { func GetAuthMiddleware() gin.HandlerFunc {
return func(ctx *gin.Context) { return func(ctx *gin.Context) {
token := ctx.GetHeader("Authorization") token := ctx.GetHeader("Authorization")

View File

@@ -3,7 +3,7 @@ package common
import ( import (
"context" "context"
"gitea.com/bitwsd/document_ai/pkg/constant" "gitea.com/texpixel/document_ai/pkg/constant"
) )
type Response struct { type Response struct {

View File

@@ -19,9 +19,9 @@ type Config struct {
func DefaultConfig() Config { func DefaultConfig() Config {
return Config{ return Config{
AllowOrigins: []string{"*"}, AllowOrigins: []string{"*"},
AllowMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, AllowMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"},
AllowHeaders: []string{"Origin", "Content-Type", "Accept"}, AllowHeaders: []string{"Origin", "Content-Type", "Accept", "Authorization", "X-Requested-With"},
ExposeHeaders: []string{"Content-Length"}, ExposeHeaders: []string{"Content-Length", "Content-Type"},
AllowCredentials: true, AllowCredentials: true,
MaxAge: 86400, // 24 hours MaxAge: 86400, // 24 hours
} }
@@ -30,16 +30,30 @@ func DefaultConfig() Config {
func Cors(config Config) gin.HandlerFunc { func Cors(config Config) gin.HandlerFunc {
return func(c *gin.Context) { return func(c *gin.Context) {
origin := c.Request.Header.Get("Origin") origin := c.Request.Header.Get("Origin")
if origin == "" {
c.Next()
return
}
// 检查是否允许该来源 // 检查是否允许该来源
allowOrigin := "*" allowOrigin := ""
for _, o := range config.AllowOrigins { for _, o := range config.AllowOrigins {
if o == "*" {
// 通配符时,回显实际 origin兼容 credentials
allowOrigin = origin
break
}
if o == origin { if o == origin {
allowOrigin = origin allowOrigin = origin
break break
} }
} }
if allowOrigin == "" {
c.Next()
return
}
c.Header("Access-Control-Allow-Origin", allowOrigin) c.Header("Access-Control-Allow-Origin", allowOrigin)
c.Header("Access-Control-Allow-Methods", strings.Join(config.AllowMethods, ",")) c.Header("Access-Control-Allow-Methods", strings.Join(config.AllowMethods, ","))
c.Header("Access-Control-Allow-Headers", strings.Join(config.AllowHeaders, ",")) c.Header("Access-Control-Allow-Headers", strings.Join(config.AllowHeaders, ","))
@@ -58,3 +72,4 @@ func Cors(config Config) gin.HandlerFunc {
c.Next() c.Next()
} }
} }

View File

@@ -10,7 +10,7 @@ import (
"net/http" "net/http"
"time" "time"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/pkg/log"
) )
// RetryConfig 重试配置 // RetryConfig 重试配置
@@ -23,9 +23,9 @@ type RetryConfig struct {
// DefaultRetryConfig 默认重试配置 // DefaultRetryConfig 默认重试配置
var DefaultRetryConfig = RetryConfig{ var DefaultRetryConfig = RetryConfig{
MaxRetries: 2, MaxRetries: 1,
InitialInterval: 100 * time.Millisecond, InitialInterval: 100 * time.Millisecond,
MaxInterval: 5 * time.Second, MaxInterval: 30 * time.Second,
SkipTLSVerify: true, SkipTLSVerify: true,
} }

View File

@@ -18,7 +18,12 @@ type CustomClaims struct {
jwt.StandardClaims jwt.StandardClaims
} }
func CreateToken(user User) (string, error) { type TokenResult struct {
Token string `json:"token"`
ExpiresAt int64 `json:"expires_at"`
}
func CreateToken(user User) (*TokenResult, error) {
expire := time.Now().Add(time.Duration(ValidTime) * time.Second) expire := time.Now().Add(time.Duration(ValidTime) * time.Second)
claims := &CustomClaims{ claims := &CustomClaims{
User: user, User: user,
@@ -32,10 +37,13 @@ func CreateToken(user User) (string, error) {
t, err := token.SignedString(JwtKey) t, err := token.SignedString(JwtKey)
if err != nil { if err != nil {
return "", err return nil, err
} }
return "Bearer " + t, nil return &TokenResult{
Token: "Bearer " + t,
ExpiresAt: expire.Unix(),
}, nil
} }
func ParseToken(signToken string) (*CustomClaims, error) { func ParseToken(signToken string) (*CustomClaims, error) {

View File

@@ -27,3 +27,4 @@ func DefaultLogConfig() *LogConfig {
Compress: true, Compress: true,
} }
} }

View File

@@ -8,6 +8,8 @@ import (
"runtime" "runtime"
"time" "time"
"gitea.com/texpixel/document_ai/pkg/requestid"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"gopkg.in/natefinch/lumberjack.v2" "gopkg.in/natefinch/lumberjack.v2"
) )
@@ -67,8 +69,13 @@ func log(ctx context.Context, level zerolog.Level, logType LogType, kv ...interf
// 添加日志类型 // 添加日志类型
event.Str("type", string(logType)) event.Str("type", string(logType))
// 添加请求ID reqID := requestid.GetRequestID()
if reqID, exists := ctx.Value("request_id").(string); exists { if reqID == "" {
if id, exists := ctx.Value("request_id").(string); exists {
reqID = id
}
}
if reqID != "" {
event.Str("request_id", reqID) event.Str("request_id", reqID)
} }

View File

@@ -6,7 +6,7 @@ import (
"strings" "strings"
"time" "time"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/pkg/log"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )

View File

@@ -0,0 +1,23 @@
package middleware
import (
"gitea.com/texpixel/document_ai/pkg/requestid"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
func RequestID() gin.HandlerFunc {
return func(c *gin.Context) {
reqID := c.Request.Header.Get("X-Request-ID")
if reqID == "" {
reqID = uuid.New().String()
}
c.Request.Header.Set("X-Request-ID", reqID)
c.Set("request_id", reqID)
requestid.SetRequestID(reqID, func() {
c.Next()
})
}
}

View File

@@ -12,8 +12,8 @@ import (
"strings" "strings"
"time" "time"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/config"
"gitea.com/bitwsd/document_ai/config" "gitea.com/texpixel/document_ai/pkg/log"
"github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/aliyun/aliyun-oss-go-sdk/oss"
) )
@@ -64,8 +64,7 @@ func GetPolicyToken() (string, error) {
} }
func GetPolicyURL(ctx context.Context, path string) (string, error) { func GetPolicyURL(ctx context.Context, path string) (string, error) {
// Create OSS client client, err := oss.New(config.GlobalConfig.Aliyun.OSS.Endpoint, config.GlobalConfig.Aliyun.OSS.AccessKeyID, config.GlobalConfig.Aliyun.OSS.AccessKeySecret, oss.UseCname(true))
client, err := oss.New(config.GlobalConfig.Aliyun.OSS.Endpoint, config.GlobalConfig.Aliyun.OSS.AccessKeyID, config.GlobalConfig.Aliyun.OSS.AccessKeySecret)
if err != nil { if err != nil {
log.Error(ctx, "func", "GetPolicyURL", "msg", "create oss client failed", "error", err) log.Error(ctx, "func", "GetPolicyURL", "msg", "create oss client failed", "error", err)
return "", err return "", err
@@ -120,14 +119,16 @@ func GetPolicyURL(ctx context.Context, path string) (string, error) {
// DownloadFile downloads a file from OSS and returns the reader, caller should close the reader // DownloadFile downloads a file from OSS and returns the reader, caller should close the reader
func DownloadFile(ctx context.Context, ossPath string) (io.ReadCloser, error) { func DownloadFile(ctx context.Context, ossPath string) (io.ReadCloser, error) {
endpoint := config.GlobalConfig.Aliyun.OSS.InnerEndpoint endpoint := config.GlobalConfig.Aliyun.OSS.InnerEndpoint
useCname := false
if config.GlobalConfig.Server.IsDebug() { if config.GlobalConfig.Server.IsDebug() {
endpoint = config.GlobalConfig.Aliyun.OSS.Endpoint endpoint = config.GlobalConfig.Aliyun.OSS.Endpoint
useCname = true
} }
log.Info(ctx, "func", "DownloadFile", "msg", "endpoint", endpoint, "ossPath", ossPath)
// Create OSS client // Create OSS client
client, err := oss.New(endpoint, client, err := oss.New(endpoint, config.GlobalConfig.Aliyun.OSS.AccessKeyID, config.GlobalConfig.Aliyun.OSS.AccessKeySecret, oss.UseCname(useCname))
config.GlobalConfig.Aliyun.OSS.AccessKeyID,
config.GlobalConfig.Aliyun.OSS.AccessKeySecret)
if err != nil { if err != nil {
log.Error(ctx, "func", "DownloadFile", "msg", "create oss client failed", "error", err) log.Error(ctx, "func", "DownloadFile", "msg", "create oss client failed", "error", err)
return nil, err return nil, err
@@ -153,7 +154,7 @@ func DownloadFile(ctx context.Context, ossPath string) (io.ReadCloser, error) {
func GetDownloadURL(ctx context.Context, ossPath string) (string, error) { func GetDownloadURL(ctx context.Context, ossPath string) (string, error) {
endpoint := config.GlobalConfig.Aliyun.OSS.Endpoint endpoint := config.GlobalConfig.Aliyun.OSS.Endpoint
client, err := oss.New(endpoint, config.GlobalConfig.Aliyun.OSS.AccessKeyID, config.GlobalConfig.Aliyun.OSS.AccessKeySecret) client, err := oss.New(endpoint, config.GlobalConfig.Aliyun.OSS.AccessKeyID, config.GlobalConfig.Aliyun.OSS.AccessKeySecret, oss.UseCname(true))
if err != nil { if err != nil {
log.Error(ctx, "func", "GetDownloadURL", "msg", "create oss client failed", "error", err) log.Error(ctx, "func", "GetDownloadURL", "msg", "create oss client failed", "error", err)
return "", err return "", err
@@ -165,11 +166,13 @@ func GetDownloadURL(ctx context.Context, ossPath string) (string, error) {
return "", err return "", err
} }
signURL, err := bucket.SignURL(ossPath, oss.HTTPGet, 60) signURL, err := bucket.SignURL(ossPath, oss.HTTPGet, 3600)
if err != nil { if err != nil {
log.Error(ctx, "func", "GetDownloadURL", "msg", "get object failed", "error", err) log.Error(ctx, "func", "GetDownloadURL", "msg", "get object failed", "error", err)
return "", err return "", err
} }
signURL = strings.Replace(signURL, "http://", "https://", 1)
return signURL, nil return signURL, nil
} }

View File

@@ -0,0 +1,27 @@
package requestid
import (
"github.com/jtolds/gls"
)
// requestIDKey 是 gls 中存储 request_id 的 key
var requestIDKey = gls.GenSym()
// glsMgr 是 gls 管理器
var glsMgr = gls.NewContextManager()
// SetRequestID 在 gls 中设置 request_id并在 fn 执行期间保持有效
func SetRequestID(requestID string, fn func()) {
glsMgr.SetValues(gls.Values{requestIDKey: requestID}, fn)
}
// GetRequestID 从 gls 中获取当前 goroutine 的 request_id
func GetRequestID() string {
val, ok := glsMgr.GetValue(requestIDKey)
if !ok {
return ""
}
reqID, _ := val.(string)
return reqID
}

View File

@@ -4,7 +4,7 @@ import (
"errors" "errors"
"sync" "sync"
"gitea.com/bitwsd/document_ai/config" "gitea.com/texpixel/document_ai/config"
openapi "github.com/alibabacloud-go/darabonba-openapi/client" openapi "github.com/alibabacloud-go/darabonba-openapi/client"
dysmsapi "github.com/alibabacloud-go/dysmsapi-20170525/v2/client" dysmsapi "github.com/alibabacloud-go/dysmsapi-20170525/v2/client"
aliutil "github.com/alibabacloud-go/tea-utils/service" aliutil "github.com/alibabacloud-go/tea-utils/service"

View File

@@ -23,6 +23,8 @@ func rmDollarSurr(text string) string {
func ToKatex(formula string) string { func ToKatex(formula string) string {
res := formula res := formula
res = strings.ReplaceAll(res, "\n", "")
// Remove mbox surrounding // Remove mbox surrounding
res = changeAll(res, `\mbox `, " ", "{", "}", "", "") res = changeAll(res, `\mbox `, " ", "{", "}", "", "")
res = changeAll(res, `\mbox`, " ", "{", "}", "", "") res = changeAll(res, `\mbox`, " ", "{", "}", "", "")

6
pkg/utils/model.go Normal file
View File

@@ -0,0 +1,6 @@
package utils
const (
ModelVLDeepSeekOCR = "deepseek-ai/DeepSeek-OCR"
ModelVLQwen3VL32BInstruct = "Qwen/Qwen3-VL-32B-Instruct"
)

View File

@@ -3,7 +3,7 @@ package utils
import ( import (
"context" "context"
"gitea.com/bitwsd/core/common/log" "gitea.com/texpixel/document_ai/pkg/log"
) )
func SafeGo(fn func()) { func SafeGo(fn func()) {

View File

@@ -1,5 +1,5 @@
package utils package utils
const ( const (
SiliconFlowToken = "Bearer sk-akbroznlbxikkbiouzasspbbzwgxubnjjtqlujxmxsnvpmhn" SiliconFlowToken = "Bearer sk-wiggxqscvjdveqvwcdywwpipcinglkzkewkcfjnrgjqbdbmc"
) )

View File

@@ -1,18 +0,0 @@
package middleware
import (
"github.com/gin-gonic/gin"
"github.com/google/uuid"
)
func RequestID() gin.HandlerFunc {
return func(c *gin.Context) {
requestID := c.Request.Header.Get("X-Request-ID")
if requestID == "" {
requestID = uuid.New().String()
}
c.Request.Header.Set("X-Request-ID", requestID)
c.Set("request_id", requestID)
c.Next()
}
}

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,305 +0,0 @@
// This file is auto-generated, don't edit it. Thanks.
package client
import (
"io"
"github.com/alibabacloud-go/tea/tea"
credential "github.com/aliyun/credentials-go/credentials"
)
type InterceptorContext struct {
Request *InterceptorContextRequest `json:"request,omitempty" xml:"request,omitempty" require:"true" type:"Struct"`
Configuration *InterceptorContextConfiguration `json:"configuration,omitempty" xml:"configuration,omitempty" require:"true" type:"Struct"`
Response *InterceptorContextResponse `json:"response,omitempty" xml:"response,omitempty" require:"true" type:"Struct"`
}
func (s InterceptorContext) String() string {
return tea.Prettify(s)
}
func (s InterceptorContext) GoString() string {
return s.String()
}
func (s *InterceptorContext) SetRequest(v *InterceptorContextRequest) *InterceptorContext {
s.Request = v
return s
}
func (s *InterceptorContext) SetConfiguration(v *InterceptorContextConfiguration) *InterceptorContext {
s.Configuration = v
return s
}
func (s *InterceptorContext) SetResponse(v *InterceptorContextResponse) *InterceptorContext {
s.Response = v
return s
}
type InterceptorContextRequest struct {
Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"`
Query map[string]*string `json:"query,omitempty" xml:"query,omitempty"`
Body interface{} `json:"body,omitempty" xml:"body,omitempty"`
Stream io.Reader `json:"stream,omitempty" xml:"stream,omitempty"`
HostMap map[string]*string `json:"hostMap,omitempty" xml:"hostMap,omitempty"`
Pathname *string `json:"pathname,omitempty" xml:"pathname,omitempty" require:"true"`
ProductId *string `json:"productId,omitempty" xml:"productId,omitempty" require:"true"`
Action *string `json:"action,omitempty" xml:"action,omitempty" require:"true"`
Version *string `json:"version,omitempty" xml:"version,omitempty" require:"true"`
Protocol *string `json:"protocol,omitempty" xml:"protocol,omitempty" require:"true"`
Method *string `json:"method,omitempty" xml:"method,omitempty" require:"true"`
AuthType *string `json:"authType,omitempty" xml:"authType,omitempty" require:"true"`
BodyType *string `json:"bodyType,omitempty" xml:"bodyType,omitempty" require:"true"`
ReqBodyType *string `json:"reqBodyType,omitempty" xml:"reqBodyType,omitempty" require:"true"`
Style *string `json:"style,omitempty" xml:"style,omitempty"`
Credential credential.Credential `json:"credential,omitempty" xml:"credential,omitempty" require:"true"`
SignatureVersion *string `json:"signatureVersion,omitempty" xml:"signatureVersion,omitempty"`
SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty" xml:"signatureAlgorithm,omitempty"`
UserAgent *string `json:"userAgent,omitempty" xml:"userAgent,omitempty" require:"true"`
}
func (s InterceptorContextRequest) String() string {
return tea.Prettify(s)
}
func (s InterceptorContextRequest) GoString() string {
return s.String()
}
func (s *InterceptorContextRequest) SetHeaders(v map[string]*string) *InterceptorContextRequest {
s.Headers = v
return s
}
func (s *InterceptorContextRequest) SetQuery(v map[string]*string) *InterceptorContextRequest {
s.Query = v
return s
}
func (s *InterceptorContextRequest) SetBody(v interface{}) *InterceptorContextRequest {
s.Body = v
return s
}
func (s *InterceptorContextRequest) SetStream(v io.Reader) *InterceptorContextRequest {
s.Stream = v
return s
}
func (s *InterceptorContextRequest) SetHostMap(v map[string]*string) *InterceptorContextRequest {
s.HostMap = v
return s
}
func (s *InterceptorContextRequest) SetPathname(v string) *InterceptorContextRequest {
s.Pathname = &v
return s
}
func (s *InterceptorContextRequest) SetProductId(v string) *InterceptorContextRequest {
s.ProductId = &v
return s
}
func (s *InterceptorContextRequest) SetAction(v string) *InterceptorContextRequest {
s.Action = &v
return s
}
func (s *InterceptorContextRequest) SetVersion(v string) *InterceptorContextRequest {
s.Version = &v
return s
}
func (s *InterceptorContextRequest) SetProtocol(v string) *InterceptorContextRequest {
s.Protocol = &v
return s
}
func (s *InterceptorContextRequest) SetMethod(v string) *InterceptorContextRequest {
s.Method = &v
return s
}
func (s *InterceptorContextRequest) SetAuthType(v string) *InterceptorContextRequest {
s.AuthType = &v
return s
}
func (s *InterceptorContextRequest) SetBodyType(v string) *InterceptorContextRequest {
s.BodyType = &v
return s
}
func (s *InterceptorContextRequest) SetReqBodyType(v string) *InterceptorContextRequest {
s.ReqBodyType = &v
return s
}
func (s *InterceptorContextRequest) SetStyle(v string) *InterceptorContextRequest {
s.Style = &v
return s
}
func (s *InterceptorContextRequest) SetCredential(v credential.Credential) *InterceptorContextRequest {
s.Credential = v
return s
}
func (s *InterceptorContextRequest) SetSignatureVersion(v string) *InterceptorContextRequest {
s.SignatureVersion = &v
return s
}
func (s *InterceptorContextRequest) SetSignatureAlgorithm(v string) *InterceptorContextRequest {
s.SignatureAlgorithm = &v
return s
}
func (s *InterceptorContextRequest) SetUserAgent(v string) *InterceptorContextRequest {
s.UserAgent = &v
return s
}
type InterceptorContextConfiguration struct {
RegionId *string `json:"regionId,omitempty" xml:"regionId,omitempty" require:"true"`
Endpoint *string `json:"endpoint,omitempty" xml:"endpoint,omitempty"`
EndpointRule *string `json:"endpointRule,omitempty" xml:"endpointRule,omitempty"`
EndpointMap map[string]*string `json:"endpointMap,omitempty" xml:"endpointMap,omitempty"`
EndpointType *string `json:"endpointType,omitempty" xml:"endpointType,omitempty"`
Network *string `json:"network,omitempty" xml:"network,omitempty"`
Suffix *string `json:"suffix,omitempty" xml:"suffix,omitempty"`
}
func (s InterceptorContextConfiguration) String() string {
return tea.Prettify(s)
}
func (s InterceptorContextConfiguration) GoString() string {
return s.String()
}
func (s *InterceptorContextConfiguration) SetRegionId(v string) *InterceptorContextConfiguration {
s.RegionId = &v
return s
}
func (s *InterceptorContextConfiguration) SetEndpoint(v string) *InterceptorContextConfiguration {
s.Endpoint = &v
return s
}
func (s *InterceptorContextConfiguration) SetEndpointRule(v string) *InterceptorContextConfiguration {
s.EndpointRule = &v
return s
}
func (s *InterceptorContextConfiguration) SetEndpointMap(v map[string]*string) *InterceptorContextConfiguration {
s.EndpointMap = v
return s
}
func (s *InterceptorContextConfiguration) SetEndpointType(v string) *InterceptorContextConfiguration {
s.EndpointType = &v
return s
}
func (s *InterceptorContextConfiguration) SetNetwork(v string) *InterceptorContextConfiguration {
s.Network = &v
return s
}
func (s *InterceptorContextConfiguration) SetSuffix(v string) *InterceptorContextConfiguration {
s.Suffix = &v
return s
}
type InterceptorContextResponse struct {
StatusCode *int `json:"statusCode,omitempty" xml:"statusCode,omitempty"`
Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"`
Body io.Reader `json:"body,omitempty" xml:"body,omitempty"`
DeserializedBody interface{} `json:"deserializedBody,omitempty" xml:"deserializedBody,omitempty"`
}
func (s InterceptorContextResponse) String() string {
return tea.Prettify(s)
}
func (s InterceptorContextResponse) GoString() string {
return s.String()
}
func (s *InterceptorContextResponse) SetStatusCode(v int) *InterceptorContextResponse {
s.StatusCode = &v
return s
}
func (s *InterceptorContextResponse) SetHeaders(v map[string]*string) *InterceptorContextResponse {
s.Headers = v
return s
}
func (s *InterceptorContextResponse) SetBody(v io.Reader) *InterceptorContextResponse {
s.Body = v
return s
}
func (s *InterceptorContextResponse) SetDeserializedBody(v interface{}) *InterceptorContextResponse {
s.DeserializedBody = v
return s
}
type AttributeMap struct {
Attributes map[string]interface{} `json:"attributes,omitempty" xml:"attributes,omitempty" require:"true"`
Key map[string]*string `json:"key,omitempty" xml:"key,omitempty" require:"true"`
}
func (s AttributeMap) String() string {
return tea.Prettify(s)
}
func (s AttributeMap) GoString() string {
return s.String()
}
func (s *AttributeMap) SetAttributes(v map[string]interface{}) *AttributeMap {
s.Attributes = v
return s
}
func (s *AttributeMap) SetKey(v map[string]*string) *AttributeMap {
s.Key = v
return s
}
type ClientInterface interface {
ModifyConfiguration(context *InterceptorContext, attributeMap *AttributeMap) error
ModifyRequest(context *InterceptorContext, attributeMap *AttributeMap) error
ModifyResponse(context *InterceptorContext, attributeMap *AttributeMap) error
}
type Client struct {
}
func NewClient() (*Client, error) {
client := new(Client)
err := client.Init()
return client, err
}
func (client *Client) Init() (_err error) {
return nil
}
func (client *Client) ModifyConfiguration(context *InterceptorContext, attributeMap *AttributeMap) (_err error) {
panic("No Support!")
}
func (client *Client) ModifyRequest(context *InterceptorContext, attributeMap *AttributeMap) (_err error) {
panic("No Support!")
}
func (client *Client) ModifyResponse(context *InterceptorContext, attributeMap *AttributeMap) (_err error) {
panic("No Support!")
}

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load Diff

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,12 +0,0 @@
package debug
import (
"reflect"
"testing"
)
func assertEqual(t *testing.T, a, b interface{}) {
if !reflect.DeepEqual(a, b) {
t.Errorf("%v != %v", a, b)
}
}

View File

@@ -1,36 +0,0 @@
package debug
import (
"fmt"
"os"
"strings"
)
type Debug func(format string, v ...interface{})
var hookGetEnv = func() string {
return os.Getenv("DEBUG")
}
var hookPrint = func(input string) {
fmt.Println(input)
}
func Init(flag string) Debug {
enable := false
env := hookGetEnv()
parts := strings.Split(env, ",")
for _, part := range parts {
if part == flag {
enable = true
break
}
}
return func(format string, v ...interface{}) {
if enable {
hookPrint(fmt.Sprintf(format, v...))
}
}
}

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load Diff

View File

@@ -1,41 +0,0 @@
// This file is auto-generated, don't edit it. Thanks.
/**
* Get endpoint
* @return string
*/
package service
import (
"fmt"
"strings"
"github.com/alibabacloud-go/tea/tea"
)
func GetEndpointRules(product, regionId, endpointType, network, suffix *string) (_result *string, _err error) {
if tea.StringValue(endpointType) == "regional" {
if tea.StringValue(regionId) == "" {
_err = fmt.Errorf("RegionId is empty, please set a valid RegionId")
return tea.String(""), _err
}
_result = tea.String(strings.Replace("<product><suffix><network>.<region_id>.aliyuncs.com",
"<region_id>", tea.StringValue(regionId), 1))
} else {
_result = tea.String("<product><suffix><network>.aliyuncs.com")
}
_result = tea.String(strings.Replace(tea.StringValue(_result),
"<product>", strings.ToLower(tea.StringValue(product)), 1))
if tea.StringValue(network) == "" || tea.StringValue(network) == "public" {
_result = tea.String(strings.Replace(tea.StringValue(_result), "<network>", "", 1))
} else {
_result = tea.String(strings.Replace(tea.StringValue(_result),
"<network>", "-"+tea.StringValue(network), 1))
}
if tea.StringValue(suffix) == "" {
_result = tea.String(strings.Replace(tea.StringValue(_result), "<suffix>", "", 1))
} else {
_result = tea.String(strings.Replace(tea.StringValue(_result),
"<suffix>", "-"+tea.StringValue(suffix), 1))
}
return _result, nil
}

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,635 +0,0 @@
// This file is auto-generated, don't edit it. Thanks.
/**
* This is for OpenApi Util
*/
package service
import (
"bytes"
"crypto"
"crypto/hmac"
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"hash"
"io"
"net/http"
"net/textproto"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"time"
util "github.com/alibabacloud-go/tea-utils/service"
"github.com/alibabacloud-go/tea/tea"
"github.com/tjfoc/gmsm/sm3"
)
const (
PEM_BEGIN = "-----BEGIN RSA PRIVATE KEY-----\n"
PEM_END = "\n-----END RSA PRIVATE KEY-----"
)
type Sorter struct {
Keys []string
Vals []string
}
func newSorter(m map[string]string) *Sorter {
hs := &Sorter{
Keys: make([]string, 0, len(m)),
Vals: make([]string, 0, len(m)),
}
for k, v := range m {
hs.Keys = append(hs.Keys, k)
hs.Vals = append(hs.Vals, v)
}
return hs
}
// Sort is an additional function for function SignHeader.
func (hs *Sorter) Sort() {
sort.Sort(hs)
}
// Len is an additional function for function SignHeader.
func (hs *Sorter) Len() int {
return len(hs.Vals)
}
// Less is an additional function for function SignHeader.
func (hs *Sorter) Less(i, j int) bool {
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
}
// Swap is an additional function for function SignHeader.
func (hs *Sorter) Swap(i, j int) {
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
}
/**
* Convert all params of body other than type of readable into content
* @param body source Model
* @param content target Model
* @return void
*/
func Convert(body interface{}, content interface{}) {
res := make(map[string]interface{})
val := reflect.ValueOf(body).Elem()
dataType := val.Type()
for i := 0; i < dataType.NumField(); i++ {
field := dataType.Field(i)
name, _ := field.Tag.Lookup("json")
name = strings.Split(name, ",omitempty")[0]
_, ok := val.Field(i).Interface().(io.Reader)
if !ok {
res[name] = val.Field(i).Interface()
}
}
byt, _ := json.Marshal(res)
json.Unmarshal(byt, content)
}
/**
* Get the string to be signed according to request
* @param request which contains signed messages
* @return the signed string
*/
func GetStringToSign(request *tea.Request) (_result *string) {
return tea.String(getStringToSign(request))
}
func getStringToSign(request *tea.Request) string {
resource := tea.StringValue(request.Pathname)
queryParams := request.Query
// sort QueryParams by key
var queryKeys []string
for key := range queryParams {
queryKeys = append(queryKeys, key)
}
sort.Strings(queryKeys)
tmp := ""
for i := 0; i < len(queryKeys); i++ {
queryKey := queryKeys[i]
v := tea.StringValue(queryParams[queryKey])
if v != "" {
tmp = tmp + "&" + queryKey + "=" + v
} else {
tmp = tmp + "&" + queryKey
}
}
if tmp != "" {
tmp = strings.TrimLeft(tmp, "&")
resource = resource + "?" + tmp
}
return getSignedStr(request, resource)
}
func getSignedStr(req *tea.Request, canonicalizedResource string) string {
temp := make(map[string]string)
for k, v := range req.Headers {
if strings.HasPrefix(strings.ToLower(k), "x-acs-") {
temp[strings.ToLower(k)] = tea.StringValue(v)
}
}
hs := newSorter(temp)
// Sort the temp by the ascending order
hs.Sort()
// Get the canonicalizedOSSHeaders
canonicalizedOSSHeaders := ""
for i := range hs.Keys {
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
}
// Give other parameters values
// when sign URL, date is expires
date := tea.StringValue(req.Headers["date"])
accept := tea.StringValue(req.Headers["accept"])
contentType := tea.StringValue(req.Headers["content-type"])
contentMd5 := tea.StringValue(req.Headers["content-md5"])
signStr := tea.StringValue(req.Method) + "\n" + accept + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
return signStr
}
/**
* Get signature according to stringToSign, secret
* @param stringToSign the signed string
* @param secret accesskey secret
* @return the signature
*/
func GetROASignature(stringToSign *string, secret *string) (_result *string) {
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(tea.StringValue(secret)))
io.WriteString(h, tea.StringValue(stringToSign))
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
return tea.String(signedStr)
}
func GetEndpoint(endpoint *string, server *bool, endpointType *string) *string {
if tea.StringValue(endpointType) == "internal" {
strs := strings.Split(tea.StringValue(endpoint), ".")
strs[0] += "-internal"
endpoint = tea.String(strings.Join(strs, "."))
}
if tea.BoolValue(server) && tea.StringValue(endpointType) == "accelerate" {
return tea.String("oss-accelerate.aliyuncs.com")
}
return endpoint
}
func HexEncode(raw []byte) *string {
return tea.String(hex.EncodeToString(raw))
}
func Hash(raw []byte, signatureAlgorithm *string) []byte {
signType := tea.StringValue(signatureAlgorithm)
if signType == "ACS3-HMAC-SHA256" || signType == "ACS3-RSA-SHA256" {
h := sha256.New()
h.Write(raw)
return h.Sum(nil)
} else if signType == "ACS3-HMAC-SM3" {
h := sm3.New()
h.Write(raw)
return h.Sum(nil)
}
return nil
}
func GetEncodePath(path *string) *string {
uri := tea.StringValue(path)
strs := strings.Split(uri, "/")
for i, v := range strs {
strs[i] = url.QueryEscape(v)
}
uri = strings.Join(strs, "/")
uri = strings.Replace(uri, "+", "%20", -1)
uri = strings.Replace(uri, "*", "%2A", -1)
uri = strings.Replace(uri, "%7E", "~", -1)
return tea.String(uri)
}
func GetEncodeParam(param *string) *string {
uri := tea.StringValue(param)
uri = url.QueryEscape(uri)
uri = strings.Replace(uri, "+", "%20", -1)
uri = strings.Replace(uri, "*", "%2A", -1)
uri = strings.Replace(uri, "%7E", "~", -1)
return tea.String(uri)
}
func GetAuthorization(request *tea.Request, signatureAlgorithm, payload, acesskey, secret *string) *string {
canonicalURI := tea.StringValue(request.Pathname)
if canonicalURI == "" {
canonicalURI = "/"
}
canonicalURI = strings.Replace(canonicalURI, "+", "%20", -1)
canonicalURI = strings.Replace(canonicalURI, "*", "%2A", -1)
canonicalURI = strings.Replace(canonicalURI, "%7E", "~", -1)
method := tea.StringValue(request.Method)
canonicalQueryString := getCanonicalQueryString(request.Query)
canonicalheaders, signedHeaders := getCanonicalHeaders(request.Headers)
canonicalRequest := method + "\n" + canonicalURI + "\n" + canonicalQueryString + "\n" + canonicalheaders + "\n" +
strings.Join(signedHeaders, ";") + "\n" + tea.StringValue(payload)
signType := tea.StringValue(signatureAlgorithm)
StringToSign := signType + "\n" + tea.StringValue(HexEncode(Hash([]byte(canonicalRequest), signatureAlgorithm)))
signature := tea.StringValue(HexEncode(SignatureMethod(tea.StringValue(secret), StringToSign, signType)))
auth := signType + " Credential=" + tea.StringValue(acesskey) + ",SignedHeaders=" +
strings.Join(signedHeaders, ";") + ",Signature=" + signature
return tea.String(auth)
}
func SignatureMethod(secret, source, signatureAlgorithm string) []byte {
if signatureAlgorithm == "ACS3-HMAC-SHA256" {
h := hmac.New(sha256.New, []byte(secret))
h.Write([]byte(source))
return h.Sum(nil)
} else if signatureAlgorithm == "ACS3-HMAC-SM3" {
h := hmac.New(sm3.New, []byte(secret))
h.Write([]byte(source))
return h.Sum(nil)
} else if signatureAlgorithm == "ACS3-RSA-SHA256" {
return rsaSign(source, secret)
}
return nil
}
func rsaSign(content, secret string) []byte {
h := crypto.SHA256.New()
h.Write([]byte(content))
hashed := h.Sum(nil)
priv, err := parsePrivateKey(secret)
if err != nil {
return nil
}
sign, err := rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA256, hashed)
if err != nil {
return nil
}
return sign
}
func parsePrivateKey(privateKey string) (*rsa.PrivateKey, error) {
privateKey = formatPrivateKey(privateKey)
block, _ := pem.Decode([]byte(privateKey))
if block == nil {
return nil, errors.New("PrivateKey is invalid")
}
priKey, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
return nil, err
}
switch priKey.(type) {
case *rsa.PrivateKey:
return priKey.(*rsa.PrivateKey), nil
default:
return nil, nil
}
}
func formatPrivateKey(privateKey string) string {
if !strings.HasPrefix(privateKey, PEM_BEGIN) {
privateKey = PEM_BEGIN + privateKey
}
if !strings.HasSuffix(privateKey, PEM_END) {
privateKey += PEM_END
}
return privateKey
}
func getCanonicalHeaders(headers map[string]*string) (string, []string) {
tmp := make(map[string]string)
tmpHeader := http.Header{}
for k, v := range headers {
if strings.HasPrefix(strings.ToLower(k), "x-acs-") || strings.ToLower(k) == "host" ||
strings.ToLower(k) == "content-type" {
tmp[strings.ToLower(k)] = strings.TrimSpace(tea.StringValue(v))
tmpHeader.Add(strings.ToLower(k), strings.TrimSpace(tea.StringValue(v)))
}
}
hs := newSorter(tmp)
// Sort the temp by the ascending order
hs.Sort()
canonicalheaders := ""
for _, key := range hs.Keys {
vals := tmpHeader[textproto.CanonicalMIMEHeaderKey(key)]
sort.Strings(vals)
canonicalheaders += key + ":" + strings.Join(vals, ",") + "\n"
}
return canonicalheaders, hs.Keys
}
func getCanonicalQueryString(query map[string]*string) string {
canonicalQueryString := ""
if tea.BoolValue(util.IsUnset(query)) {
return canonicalQueryString
}
tmp := make(map[string]string)
for k, v := range query {
tmp[k] = tea.StringValue(v)
}
hs := newSorter(tmp)
// Sort the temp by the ascending order
hs.Sort()
for i := range hs.Keys {
if hs.Vals[i] != "" {
canonicalQueryString += "&" + hs.Keys[i] + "=" + url.QueryEscape(hs.Vals[i])
} else {
canonicalQueryString += "&" + hs.Keys[i] + "="
}
}
canonicalQueryString = strings.Replace(canonicalQueryString, "+", "%20", -1)
canonicalQueryString = strings.Replace(canonicalQueryString, "*", "%2A", -1)
canonicalQueryString = strings.Replace(canonicalQueryString, "%7E", "~", -1)
if canonicalQueryString != "" {
canonicalQueryString = strings.TrimLeft(canonicalQueryString, "&")
}
return canonicalQueryString
}
/**
* Parse filter into a form string
* @param filter object
* @return the string
*/
func ToForm(filter map[string]interface{}) (_result *string) {
tmp := make(map[string]interface{})
byt, _ := json.Marshal(filter)
d := json.NewDecoder(bytes.NewReader(byt))
d.UseNumber()
_ = d.Decode(&tmp)
result := make(map[string]*string)
for key, value := range tmp {
filterValue := reflect.ValueOf(value)
flatRepeatedList(filterValue, result, key)
}
m := util.AnyifyMapValue(result)
return util.ToFormString(m)
}
func flatRepeatedList(dataValue reflect.Value, result map[string]*string, prefix string) {
if !dataValue.IsValid() {
return
}
dataType := dataValue.Type()
if dataType.Kind().String() == "slice" {
handleRepeatedParams(dataValue, result, prefix)
} else if dataType.Kind().String() == "map" {
handleMap(dataValue, result, prefix)
} else {
result[prefix] = tea.String(fmt.Sprintf("%v", dataValue.Interface()))
}
}
func handleRepeatedParams(repeatedFieldValue reflect.Value, result map[string]*string, prefix string) {
if repeatedFieldValue.IsValid() && !repeatedFieldValue.IsNil() {
for m := 0; m < repeatedFieldValue.Len(); m++ {
elementValue := repeatedFieldValue.Index(m)
key := prefix + "." + strconv.Itoa(m+1)
fieldValue := reflect.ValueOf(elementValue.Interface())
if fieldValue.Kind().String() == "map" {
handleMap(fieldValue, result, key)
} else {
result[key] = tea.String(fmt.Sprintf("%v", fieldValue.Interface()))
}
}
}
}
func handleMap(valueField reflect.Value, result map[string]*string, prefix string) {
if valueField.IsValid() && valueField.String() != "" {
valueFieldType := valueField.Type()
if valueFieldType.Kind().String() == "map" {
var byt []byte
byt, _ = json.Marshal(valueField.Interface())
cache := make(map[string]interface{})
d := json.NewDecoder(bytes.NewReader(byt))
d.UseNumber()
_ = d.Decode(&cache)
for key, value := range cache {
pre := ""
if prefix != "" {
pre = prefix + "." + key
} else {
pre = key
}
fieldValue := reflect.ValueOf(value)
flatRepeatedList(fieldValue, result, pre)
}
}
}
}
/**
* Get timestamp
* @return the timestamp string
*/
func GetTimestamp() (_result *string) {
gmt := time.FixedZone("GMT", 0)
return tea.String(time.Now().In(gmt).Format("2006-01-02T15:04:05Z"))
}
/**
* Parse filter into a object which's type is map[string]string
* @param filter query param
* @return the object
*/
func Query(filter interface{}) (_result map[string]*string) {
tmp := make(map[string]interface{})
byt, _ := json.Marshal(filter)
d := json.NewDecoder(bytes.NewReader(byt))
d.UseNumber()
_ = d.Decode(&tmp)
result := make(map[string]*string)
for key, value := range tmp {
filterValue := reflect.ValueOf(value)
flatRepeatedList(filterValue, result, key)
}
return result
}
/**
* Get signature according to signedParams, method and secret
* @param signedParams params which need to be signed
* @param method http method e.g. GET
* @param secret AccessKeySecret
* @return the signature
*/
func GetRPCSignature(signedParams map[string]*string, method *string, secret *string) (_result *string) {
stringToSign := buildRpcStringToSign(signedParams, tea.StringValue(method))
signature := sign(stringToSign, tea.StringValue(secret), "&")
return tea.String(signature)
}
/**
* Parse array into a string with specified style
* @param array the array
* @param prefix the prefix string
* @style specified style e.g. repeatList
* @return the string
*/
func ArrayToStringWithSpecifiedStyle(array interface{}, prefix *string, style *string) (_result *string) {
if tea.BoolValue(util.IsUnset(array)) {
return tea.String("")
}
sty := tea.StringValue(style)
if sty == "repeatList" {
tmp := map[string]interface{}{
tea.StringValue(prefix): array,
}
return flatRepeatList(tmp)
} else if sty == "simple" || sty == "spaceDelimited" || sty == "pipeDelimited" {
return flatArray(array, sty)
} else if sty == "json" {
return util.ToJSONString(array)
}
return tea.String("")
}
func ParseToMap(in interface{}) map[string]interface{} {
if tea.BoolValue(util.IsUnset(in)) {
return nil
}
tmp := make(map[string]interface{})
byt, _ := json.Marshal(in)
d := json.NewDecoder(bytes.NewReader(byt))
d.UseNumber()
err := d.Decode(&tmp)
if err != nil {
return nil
}
return tmp
}
func flatRepeatList(filter map[string]interface{}) (_result *string) {
tmp := make(map[string]interface{})
byt, _ := json.Marshal(filter)
d := json.NewDecoder(bytes.NewReader(byt))
d.UseNumber()
_ = d.Decode(&tmp)
result := make(map[string]*string)
for key, value := range tmp {
filterValue := reflect.ValueOf(value)
flatRepeatedList(filterValue, result, key)
}
res := make(map[string]string)
for k, v := range result {
res[k] = tea.StringValue(v)
}
hs := newSorter(res)
hs.Sort()
// Get the canonicalizedOSSHeaders
t := ""
for i := range hs.Keys {
if i == len(hs.Keys)-1 {
t += hs.Keys[i] + "=" + hs.Vals[i]
} else {
t += hs.Keys[i] + "=" + hs.Vals[i] + "&&"
}
}
return tea.String(t)
}
func flatArray(array interface{}, sty string) *string {
t := reflect.ValueOf(array)
strs := make([]string, 0)
for i := 0; i < t.Len(); i++ {
tmp := t.Index(i)
if tmp.Kind() == reflect.Ptr || tmp.Kind() == reflect.Interface {
tmp = tmp.Elem()
}
if tmp.Kind() == reflect.Ptr {
tmp = tmp.Elem()
}
if tmp.Kind() == reflect.String {
strs = append(strs, tmp.String())
} else {
inter := tmp.Interface()
byt, _ := json.Marshal(inter)
strs = append(strs, string(byt))
}
}
str := ""
if sty == "simple" {
str = strings.Join(strs, ",")
} else if sty == "spaceDelimited" {
str = strings.Join(strs, " ")
} else if sty == "pipeDelimited" {
str = strings.Join(strs, "|")
}
return tea.String(str)
}
func buildRpcStringToSign(signedParam map[string]*string, method string) (stringToSign string) {
signParams := make(map[string]string)
for key, value := range signedParam {
signParams[key] = tea.StringValue(value)
}
stringToSign = getUrlFormedMap(signParams)
stringToSign = strings.Replace(stringToSign, "+", "%20", -1)
stringToSign = strings.Replace(stringToSign, "*", "%2A", -1)
stringToSign = strings.Replace(stringToSign, "%7E", "~", -1)
stringToSign = url.QueryEscape(stringToSign)
stringToSign = method + "&%2F&" + stringToSign
return
}
func getUrlFormedMap(source map[string]string) (urlEncoded string) {
urlEncoder := url.Values{}
for key, value := range source {
urlEncoder.Add(key, value)
}
urlEncoded = urlEncoder.Encode()
return
}
func sign(stringToSign, accessKeySecret, secretSuffix string) string {
secret := accessKeySecret + secretSuffix
signedBytes := shaHmac1(stringToSign, secret)
signedString := base64.StdEncoding.EncodeToString(signedBytes)
return signedString
}
func shaHmac1(source, secret string) []byte {
key := []byte(secret)
hmac := hmac.New(sha1.New, key)
hmac.Write([]byte(source))
return hmac.Sum(nil)
}

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,468 +0,0 @@
package service
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"runtime"
"strconv"
"strings"
"time"
"github.com/alibabacloud-go/tea/tea"
)
var defaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s TeaDSL/1", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), "0.01")
type RuntimeOptions struct {
Autoretry *bool `json:"autoretry" xml:"autoretry"`
IgnoreSSL *bool `json:"ignoreSSL" xml:"ignoreSSL"`
MaxAttempts *int `json:"maxAttempts" xml:"maxAttempts"`
BackoffPolicy *string `json:"backoffPolicy" xml:"backoffPolicy"`
BackoffPeriod *int `json:"backoffPeriod" xml:"backoffPeriod"`
ReadTimeout *int `json:"readTimeout" xml:"readTimeout"`
ConnectTimeout *int `json:"connectTimeout" xml:"connectTimeout"`
LocalAddr *string `json:"localAddr" xml:"localAddr"`
HttpProxy *string `json:"httpProxy" xml:"httpProxy"`
HttpsProxy *string `json:"httpsProxy" xml:"httpsProxy"`
NoProxy *string `json:"noProxy" xml:"noProxy"`
MaxIdleConns *int `json:"maxIdleConns" xml:"maxIdleConns"`
Socks5Proxy *string `json:"socks5Proxy" xml:"socks5Proxy"`
Socks5NetWork *string `json:"socks5NetWork" xml:"socks5NetWork"`
KeepAlive *bool `json:"keepAlive" xml:"keepAlive"`
}
func (s RuntimeOptions) String() string {
return tea.Prettify(s)
}
func (s RuntimeOptions) GoString() string {
return s.String()
}
func (s *RuntimeOptions) SetAutoretry(v bool) *RuntimeOptions {
s.Autoretry = &v
return s
}
func (s *RuntimeOptions) SetIgnoreSSL(v bool) *RuntimeOptions {
s.IgnoreSSL = &v
return s
}
func (s *RuntimeOptions) SetMaxAttempts(v int) *RuntimeOptions {
s.MaxAttempts = &v
return s
}
func (s *RuntimeOptions) SetBackoffPolicy(v string) *RuntimeOptions {
s.BackoffPolicy = &v
return s
}
func (s *RuntimeOptions) SetBackoffPeriod(v int) *RuntimeOptions {
s.BackoffPeriod = &v
return s
}
func (s *RuntimeOptions) SetReadTimeout(v int) *RuntimeOptions {
s.ReadTimeout = &v
return s
}
func (s *RuntimeOptions) SetConnectTimeout(v int) *RuntimeOptions {
s.ConnectTimeout = &v
return s
}
func (s *RuntimeOptions) SetHttpProxy(v string) *RuntimeOptions {
s.HttpProxy = &v
return s
}
func (s *RuntimeOptions) SetHttpsProxy(v string) *RuntimeOptions {
s.HttpsProxy = &v
return s
}
func (s *RuntimeOptions) SetNoProxy(v string) *RuntimeOptions {
s.NoProxy = &v
return s
}
func (s *RuntimeOptions) SetMaxIdleConns(v int) *RuntimeOptions {
s.MaxIdleConns = &v
return s
}
func (s *RuntimeOptions) SetLocalAddr(v string) *RuntimeOptions {
s.LocalAddr = &v
return s
}
func (s *RuntimeOptions) SetSocks5Proxy(v string) *RuntimeOptions {
s.Socks5Proxy = &v
return s
}
func (s *RuntimeOptions) SetSocks5NetWork(v string) *RuntimeOptions {
s.Socks5NetWork = &v
return s
}
func (s *RuntimeOptions) SetKeepAlive(v bool) *RuntimeOptions {
s.KeepAlive = &v
return s
}
func ReadAsString(body io.Reader) (*string, error) {
byt, err := ioutil.ReadAll(body)
if err != nil {
return tea.String(""), err
}
r, ok := body.(io.ReadCloser)
if ok {
r.Close()
}
return tea.String(string(byt)), nil
}
func StringifyMapValue(a map[string]interface{}) map[string]*string {
res := make(map[string]*string)
for key, value := range a {
if value != nil {
switch value.(type) {
case string:
res[key] = tea.String(value.(string))
default:
byt, _ := json.Marshal(value)
res[key] = tea.String(string(byt))
}
}
}
return res
}
func AnyifyMapValue(a map[string]*string) map[string]interface{} {
res := make(map[string]interface{})
for key, value := range a {
res[key] = tea.StringValue(value)
}
return res
}
func ReadAsBytes(body io.Reader) ([]byte, error) {
byt, err := ioutil.ReadAll(body)
if err != nil {
return nil, err
}
r, ok := body.(io.ReadCloser)
if ok {
r.Close()
}
return byt, nil
}
func DefaultString(reaStr, defaultStr *string) *string {
if reaStr == nil {
return defaultStr
}
return reaStr
}
func ToJSONString(a interface{}) *string {
switch v := a.(type) {
case *string:
return v
case string:
return tea.String(v)
case []byte:
return tea.String(string(v))
case io.Reader:
byt, err := ioutil.ReadAll(v)
if err != nil {
return nil
}
return tea.String(string(byt))
}
byt, err := json.Marshal(a)
if err != nil {
return nil
}
return tea.String(string(byt))
}
func DefaultNumber(reaNum, defaultNum *int) *int {
if reaNum == nil {
return defaultNum
}
return reaNum
}
func ReadAsJSON(body io.Reader) (result interface{}, err error) {
byt, err := ioutil.ReadAll(body)
if err != nil {
return
}
if string(byt) == "" {
return
}
r, ok := body.(io.ReadCloser)
if ok {
r.Close()
}
d := json.NewDecoder(bytes.NewReader(byt))
d.UseNumber()
err = d.Decode(&result)
return
}
func GetNonce() *string {
return tea.String(getUUID())
}
func Empty(val *string) *bool {
return tea.Bool(val == nil || tea.StringValue(val) == "")
}
func ValidateModel(a interface{}) error {
if a == nil {
return nil
}
err := tea.Validate(a)
return err
}
func EqualString(val1, val2 *string) *bool {
return tea.Bool(tea.StringValue(val1) == tea.StringValue(val2))
}
func EqualNumber(val1, val2 *int) *bool {
return tea.Bool(tea.IntValue(val1) == tea.IntValue(val2))
}
func IsUnset(val interface{}) *bool {
if val == nil {
return tea.Bool(true)
}
v := reflect.ValueOf(val)
if v.Kind() == reflect.Ptr || v.Kind() == reflect.Slice || v.Kind() == reflect.Map {
return tea.Bool(v.IsNil())
}
valType := reflect.TypeOf(val)
valZero := reflect.Zero(valType)
return tea.Bool(valZero == v)
}
func ToBytes(a *string) []byte {
return []byte(tea.StringValue(a))
}
func AssertAsMap(a interface{}) map[string]interface{} {
r := reflect.ValueOf(a)
if r.Kind().String() != "map" {
panic(fmt.Sprintf("%v is not a map[string]interface{}", a))
}
res := make(map[string]interface{})
tmp := r.MapKeys()
for _, key := range tmp {
res[key.String()] = r.MapIndex(key).Interface()
}
return res
}
func AssertAsNumber(a interface{}) *int {
res := 0
switch a.(type) {
case int:
tmp := a.(int)
res = tmp
case *int:
tmp := a.(*int)
res = tea.IntValue(tmp)
default:
panic(fmt.Sprintf("%v is not a int", a))
}
return tea.Int(res)
}
func AssertAsBoolean(a interface{}) *bool {
res := false
switch a.(type) {
case bool:
tmp := a.(bool)
res = tmp
case *bool:
tmp := a.(*bool)
res = tea.BoolValue(tmp)
default:
panic(fmt.Sprintf("%v is not a bool", a))
}
return tea.Bool(res)
}
func AssertAsString(a interface{}) *string {
res := ""
switch a.(type) {
case string:
tmp := a.(string)
res = tmp
case *string:
tmp := a.(*string)
res = tea.StringValue(tmp)
default:
panic(fmt.Sprintf("%v is not a string", a))
}
return tea.String(res)
}
func AssertAsBytes(a interface{}) []byte {
res, ok := a.([]byte)
if !ok {
panic(fmt.Sprintf("%v is not []byte", a))
}
return res
}
func AssertAsReadable(a interface{}) io.Reader {
res, ok := a.(io.Reader)
if !ok {
panic(fmt.Sprintf("%v is not reader", a))
}
return res
}
func AssertAsArray(a interface{}) []interface{} {
r := reflect.ValueOf(a)
if r.Kind().String() != "array" && r.Kind().String() != "slice" {
panic(fmt.Sprintf("%v is not a [x]interface{}", a))
}
aLen := r.Len()
res := make([]interface{}, 0)
for i := 0; i < aLen; i++ {
res = append(res, r.Index(i).Interface())
}
return res
}
func ParseJSON(a *string) interface{} {
mapTmp := make(map[string]interface{})
d := json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a))))
d.UseNumber()
err := d.Decode(&mapTmp)
if err == nil {
return mapTmp
}
sliceTmp := make([]interface{}, 0)
d = json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a))))
d.UseNumber()
err = d.Decode(&sliceTmp)
if err == nil {
return sliceTmp
}
if num, err := strconv.Atoi(tea.StringValue(a)); err == nil {
return num
}
if ok, err := strconv.ParseBool(tea.StringValue(a)); err == nil {
return ok
}
if floa64tVal, err := strconv.ParseFloat(tea.StringValue(a), 64); err == nil {
return floa64tVal
}
return nil
}
func ToString(a []byte) *string {
return tea.String(string(a))
}
func ToMap(in interface{}) map[string]interface{} {
if in == nil {
return nil
}
res := tea.ToMap(in)
return res
}
func ToFormString(a map[string]interface{}) *string {
if a == nil {
return tea.String("")
}
res := ""
urlEncoder := url.Values{}
for key, value := range a {
v := fmt.Sprintf("%v", value)
urlEncoder.Add(key, v)
}
res = urlEncoder.Encode()
return tea.String(res)
}
func GetDateUTCString() *string {
return tea.String(time.Now().UTC().Format(http.TimeFormat))
}
func GetUserAgent(userAgent *string) *string {
if userAgent != nil && tea.StringValue(userAgent) != "" {
return tea.String(defaultUserAgent + " " + tea.StringValue(userAgent))
}
return tea.String(defaultUserAgent)
}
func Is2xx(code *int) *bool {
tmp := tea.IntValue(code)
return tea.Bool(tmp >= 200 && tmp < 300)
}
func Is3xx(code *int) *bool {
tmp := tea.IntValue(code)
return tea.Bool(tmp >= 300 && tmp < 400)
}
func Is4xx(code *int) *bool {
tmp := tea.IntValue(code)
return tea.Bool(tmp >= 400 && tmp < 500)
}
func Is5xx(code *int) *bool {
tmp := tea.IntValue(code)
return tea.Bool(tmp >= 500 && tmp < 600)
}
func Sleep(millisecond *int) error {
ms := tea.IntValue(millisecond)
time.Sleep(time.Duration(ms) * time.Millisecond)
return nil
}
func ToArray(in interface{}) []map[string]interface{} {
if tea.BoolValue(IsUnset(in)) {
return nil
}
tmp := make([]map[string]interface{}, 0)
byt, _ := json.Marshal(in)
d := json.NewDecoder(bytes.NewReader(byt))
d.UseNumber()
err := d.Decode(&tmp)
if err != nil {
return nil
}
return tmp
}

View File

@@ -1,52 +0,0 @@
package service
import (
"crypto/md5"
"crypto/rand"
"encoding/hex"
"hash"
rand2 "math/rand"
)
type UUID [16]byte
const numBytes = "1234567890"
func getUUID() (uuidHex string) {
uuid := newUUID()
uuidHex = hex.EncodeToString(uuid[:])
return
}
func randStringBytes(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = numBytes[rand2.Intn(len(numBytes))]
}
return string(b)
}
func newUUID() UUID {
ns := UUID{}
safeRandom(ns[:])
u := newFromHash(md5.New(), ns, randStringBytes(16))
u[6] = (u[6] & 0x0f) | (byte(2) << 4)
u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
return u
}
func newFromHash(h hash.Hash, ns UUID, name string) UUID {
u := UUID{}
h.Write(ns[:])
h.Write([]byte(name))
copy(u[:], h.Sum(nil))
return u
}
func safeRandom(dest []byte) {
if _, err := rand.Read(dest); err != nil {
panic(err)
}
}

View File

@@ -1,105 +0,0 @@
package service
import (
"bytes"
"encoding/xml"
"fmt"
"reflect"
"strings"
"github.com/alibabacloud-go/tea/tea"
v2 "github.com/clbanning/mxj/v2"
)
func ToXML(obj map[string]interface{}) *string {
return tea.String(mapToXML(obj))
}
func ParseXml(val *string, result interface{}) map[string]interface{} {
resp := make(map[string]interface{})
start := getStartElement([]byte(tea.StringValue(val)))
if result == nil {
vm, err := v2.NewMapXml([]byte(tea.StringValue(val)))
if err != nil {
return nil
}
return vm
}
out, err := xmlUnmarshal([]byte(tea.StringValue(val)), result)
if err != nil {
return resp
}
resp[start] = out
return resp
}
func mapToXML(val map[string]interface{}) string {
res := ""
for key, value := range val {
switch value.(type) {
case []interface{}:
for _, v := range value.([]interface{}) {
switch v.(type) {
case map[string]interface{}:
res += `<` + key + `>`
res += mapToXML(v.(map[string]interface{}))
res += `</` + key + `>`
default:
if fmt.Sprintf("%v", v) != `<nil>` {
res += `<` + key + `>`
res += fmt.Sprintf("%v", v)
res += `</` + key + `>`
}
}
}
case map[string]interface{}:
res += `<` + key + `>`
res += mapToXML(value.(map[string]interface{}))
res += `</` + key + `>`
default:
if fmt.Sprintf("%v", value) != `<nil>` {
res += `<` + key + `>`
res += fmt.Sprintf("%v", value)
res += `</` + key + `>`
}
}
}
return res
}
func getStartElement(body []byte) string {
d := xml.NewDecoder(bytes.NewReader(body))
for {
tok, err := d.Token()
if err != nil {
return ""
}
if t, ok := tok.(xml.StartElement); ok {
return t.Name.Local
}
}
}
func xmlUnmarshal(body []byte, result interface{}) (interface{}, error) {
start := getStartElement(body)
dataValue := reflect.ValueOf(result).Elem()
dataType := dataValue.Type()
for i := 0; i < dataType.NumField(); i++ {
field := dataType.Field(i)
name, containsNameTag := field.Tag.Lookup("xml")
name = strings.Replace(name, ",omitempty", "", -1)
if containsNameTag {
if name == start {
realType := dataValue.Field(i).Type()
realValue := reflect.New(realType).Interface()
err := xml.Unmarshal(body, realValue)
if err != nil {
return nil, err
}
return realValue, nil
}
}
}
return nil, nil
}

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,333 +0,0 @@
package tea
import (
"encoding/json"
"io"
"math"
"reflect"
"strconv"
"strings"
"unsafe"
jsoniter "github.com/json-iterator/go"
"github.com/modern-go/reflect2"
)
const maxUint = ^uint(0)
const maxInt = int(maxUint >> 1)
const minInt = -maxInt - 1
var jsonParser jsoniter.API
func init() {
jsonParser = jsoniter.Config{
EscapeHTML: true,
SortMapKeys: true,
ValidateJsonRawMessage: true,
CaseSensitive: true,
}.Froze()
jsonParser.RegisterExtension(newBetterFuzzyExtension())
}
func newBetterFuzzyExtension() jsoniter.DecoderExtension {
return jsoniter.DecoderExtension{
reflect2.DefaultTypeOfKind(reflect.String): &nullableFuzzyStringDecoder{},
reflect2.DefaultTypeOfKind(reflect.Bool): &fuzzyBoolDecoder{},
reflect2.DefaultTypeOfKind(reflect.Float32): &nullableFuzzyFloat32Decoder{},
reflect2.DefaultTypeOfKind(reflect.Float64): &nullableFuzzyFloat64Decoder{},
reflect2.DefaultTypeOfKind(reflect.Int): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(maxInt) || val < float64(minInt) {
iter.ReportError("fuzzy decode int", "exceed range")
return
}
*((*int)(ptr)) = int(val)
} else {
*((*int)(ptr)) = iter.ReadInt()
}
}},
reflect2.DefaultTypeOfKind(reflect.Uint): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(maxUint) || val < 0 {
iter.ReportError("fuzzy decode uint", "exceed range")
return
}
*((*uint)(ptr)) = uint(val)
} else {
*((*uint)(ptr)) = iter.ReadUint()
}
}},
reflect2.DefaultTypeOfKind(reflect.Int8): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(math.MaxInt8) || val < float64(math.MinInt8) {
iter.ReportError("fuzzy decode int8", "exceed range")
return
}
*((*int8)(ptr)) = int8(val)
} else {
*((*int8)(ptr)) = iter.ReadInt8()
}
}},
reflect2.DefaultTypeOfKind(reflect.Uint8): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(math.MaxUint8) || val < 0 {
iter.ReportError("fuzzy decode uint8", "exceed range")
return
}
*((*uint8)(ptr)) = uint8(val)
} else {
*((*uint8)(ptr)) = iter.ReadUint8()
}
}},
reflect2.DefaultTypeOfKind(reflect.Int16): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(math.MaxInt16) || val < float64(math.MinInt16) {
iter.ReportError("fuzzy decode int16", "exceed range")
return
}
*((*int16)(ptr)) = int16(val)
} else {
*((*int16)(ptr)) = iter.ReadInt16()
}
}},
reflect2.DefaultTypeOfKind(reflect.Uint16): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(math.MaxUint16) || val < 0 {
iter.ReportError("fuzzy decode uint16", "exceed range")
return
}
*((*uint16)(ptr)) = uint16(val)
} else {
*((*uint16)(ptr)) = iter.ReadUint16()
}
}},
reflect2.DefaultTypeOfKind(reflect.Int32): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(math.MaxInt32) || val < float64(math.MinInt32) {
iter.ReportError("fuzzy decode int32", "exceed range")
return
}
*((*int32)(ptr)) = int32(val)
} else {
*((*int32)(ptr)) = iter.ReadInt32()
}
}},
reflect2.DefaultTypeOfKind(reflect.Uint32): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(math.MaxUint32) || val < 0 {
iter.ReportError("fuzzy decode uint32", "exceed range")
return
}
*((*uint32)(ptr)) = uint32(val)
} else {
*((*uint32)(ptr)) = iter.ReadUint32()
}
}},
reflect2.DefaultTypeOfKind(reflect.Int64): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(math.MaxInt64) || val < float64(math.MinInt64) {
iter.ReportError("fuzzy decode int64", "exceed range")
return
}
*((*int64)(ptr)) = int64(val)
} else {
*((*int64)(ptr)) = iter.ReadInt64()
}
}},
reflect2.DefaultTypeOfKind(reflect.Uint64): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if isFloat {
val := iter.ReadFloat64()
if val > float64(math.MaxUint64) || val < 0 {
iter.ReportError("fuzzy decode uint64", "exceed range")
return
}
*((*uint64)(ptr)) = uint64(val)
} else {
*((*uint64)(ptr)) = iter.ReadUint64()
}
}},
}
}
type nullableFuzzyStringDecoder struct {
}
func (decoder *nullableFuzzyStringDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
valueType := iter.WhatIsNext()
switch valueType {
case jsoniter.NumberValue:
var number json.Number
iter.ReadVal(&number)
*((*string)(ptr)) = string(number)
case jsoniter.StringValue:
*((*string)(ptr)) = iter.ReadString()
case jsoniter.BoolValue:
*((*string)(ptr)) = strconv.FormatBool(iter.ReadBool())
case jsoniter.NilValue:
iter.ReadNil()
*((*string)(ptr)) = ""
default:
iter.ReportError("fuzzyStringDecoder", "not number or string or bool")
}
}
type fuzzyBoolDecoder struct {
}
func (decoder *fuzzyBoolDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
valueType := iter.WhatIsNext()
switch valueType {
case jsoniter.BoolValue:
*((*bool)(ptr)) = iter.ReadBool()
case jsoniter.NumberValue:
var number json.Number
iter.ReadVal(&number)
num, err := number.Int64()
if err != nil {
iter.ReportError("fuzzyBoolDecoder", "get value from json.number failed")
}
if num == 0 {
*((*bool)(ptr)) = false
} else {
*((*bool)(ptr)) = true
}
case jsoniter.StringValue:
strValue := strings.ToLower(iter.ReadString())
if strValue == "true" {
*((*bool)(ptr)) = true
} else if strValue == "false" || strValue == "" {
*((*bool)(ptr)) = false
} else {
iter.ReportError("fuzzyBoolDecoder", "unsupported bool value: "+strValue)
}
case jsoniter.NilValue:
iter.ReadNil()
*((*bool)(ptr)) = false
default:
iter.ReportError("fuzzyBoolDecoder", "not number or string or nil")
}
}
type nullableFuzzyIntegerDecoder struct {
fun func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator)
}
func (decoder *nullableFuzzyIntegerDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
valueType := iter.WhatIsNext()
var str string
switch valueType {
case jsoniter.NumberValue:
var number json.Number
iter.ReadVal(&number)
str = string(number)
case jsoniter.StringValue:
str = iter.ReadString()
// support empty string
if str == "" {
str = "0"
}
case jsoniter.BoolValue:
if iter.ReadBool() {
str = "1"
} else {
str = "0"
}
case jsoniter.NilValue:
iter.ReadNil()
str = "0"
default:
iter.ReportError("fuzzyIntegerDecoder", "not number or string")
}
newIter := iter.Pool().BorrowIterator([]byte(str))
defer iter.Pool().ReturnIterator(newIter)
isFloat := strings.IndexByte(str, '.') != -1
decoder.fun(isFloat, ptr, newIter)
if newIter.Error != nil && newIter.Error != io.EOF {
iter.Error = newIter.Error
}
}
type nullableFuzzyFloat32Decoder struct {
}
func (decoder *nullableFuzzyFloat32Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
valueType := iter.WhatIsNext()
var str string
switch valueType {
case jsoniter.NumberValue:
*((*float32)(ptr)) = iter.ReadFloat32()
case jsoniter.StringValue:
str = iter.ReadString()
// support empty string
if str == "" {
*((*float32)(ptr)) = 0
return
}
newIter := iter.Pool().BorrowIterator([]byte(str))
defer iter.Pool().ReturnIterator(newIter)
*((*float32)(ptr)) = newIter.ReadFloat32()
if newIter.Error != nil && newIter.Error != io.EOF {
iter.Error = newIter.Error
}
case jsoniter.BoolValue:
// support bool to float32
if iter.ReadBool() {
*((*float32)(ptr)) = 1
} else {
*((*float32)(ptr)) = 0
}
case jsoniter.NilValue:
iter.ReadNil()
*((*float32)(ptr)) = 0
default:
iter.ReportError("nullableFuzzyFloat32Decoder", "not number or string")
}
}
type nullableFuzzyFloat64Decoder struct {
}
func (decoder *nullableFuzzyFloat64Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
valueType := iter.WhatIsNext()
var str string
switch valueType {
case jsoniter.NumberValue:
*((*float64)(ptr)) = iter.ReadFloat64()
case jsoniter.StringValue:
str = iter.ReadString()
// support empty string
if str == "" {
*((*float64)(ptr)) = 0
return
}
newIter := iter.Pool().BorrowIterator([]byte(str))
defer iter.Pool().ReturnIterator(newIter)
*((*float64)(ptr)) = newIter.ReadFloat64()
if newIter.Error != nil && newIter.Error != io.EOF {
iter.Error = newIter.Error
}
case jsoniter.BoolValue:
// support bool to float64
if iter.ReadBool() {
*((*float64)(ptr)) = 1
} else {
*((*float64)(ptr)) = 0
}
case jsoniter.NilValue:
// support empty string
iter.ReadNil()
*((*float64)(ptr)) = 0
default:
iter.ReportError("nullableFuzzyFloat64Decoder", "not number or string")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,491 +0,0 @@
package tea
func String(a string) *string {
return &a
}
func StringValue(a *string) string {
if a == nil {
return ""
}
return *a
}
func Int(a int) *int {
return &a
}
func IntValue(a *int) int {
if a == nil {
return 0
}
return *a
}
func Int8(a int8) *int8 {
return &a
}
func Int8Value(a *int8) int8 {
if a == nil {
return 0
}
return *a
}
func Int16(a int16) *int16 {
return &a
}
func Int16Value(a *int16) int16 {
if a == nil {
return 0
}
return *a
}
func Int32(a int32) *int32 {
return &a
}
func Int32Value(a *int32) int32 {
if a == nil {
return 0
}
return *a
}
func Int64(a int64) *int64 {
return &a
}
func Int64Value(a *int64) int64 {
if a == nil {
return 0
}
return *a
}
func Bool(a bool) *bool {
return &a
}
func BoolValue(a *bool) bool {
if a == nil {
return false
}
return *a
}
func Uint(a uint) *uint {
return &a
}
func UintValue(a *uint) uint {
if a == nil {
return 0
}
return *a
}
func Uint8(a uint8) *uint8 {
return &a
}
func Uint8Value(a *uint8) uint8 {
if a == nil {
return 0
}
return *a
}
func Uint16(a uint16) *uint16 {
return &a
}
func Uint16Value(a *uint16) uint16 {
if a == nil {
return 0
}
return *a
}
func Uint32(a uint32) *uint32 {
return &a
}
func Uint32Value(a *uint32) uint32 {
if a == nil {
return 0
}
return *a
}
func Uint64(a uint64) *uint64 {
return &a
}
func Uint64Value(a *uint64) uint64 {
if a == nil {
return 0
}
return *a
}
func Float32(a float32) *float32 {
return &a
}
func Float32Value(a *float32) float32 {
if a == nil {
return 0
}
return *a
}
func Float64(a float64) *float64 {
return &a
}
func Float64Value(a *float64) float64 {
if a == nil {
return 0
}
return *a
}
func IntSlice(a []int) []*int {
if a == nil {
return nil
}
res := make([]*int, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func IntValueSlice(a []*int) []int {
if a == nil {
return nil
}
res := make([]int, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Int8Slice(a []int8) []*int8 {
if a == nil {
return nil
}
res := make([]*int8, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Int8ValueSlice(a []*int8) []int8 {
if a == nil {
return nil
}
res := make([]int8, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Int16Slice(a []int16) []*int16 {
if a == nil {
return nil
}
res := make([]*int16, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Int16ValueSlice(a []*int16) []int16 {
if a == nil {
return nil
}
res := make([]int16, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Int32Slice(a []int32) []*int32 {
if a == nil {
return nil
}
res := make([]*int32, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Int32ValueSlice(a []*int32) []int32 {
if a == nil {
return nil
}
res := make([]int32, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Int64Slice(a []int64) []*int64 {
if a == nil {
return nil
}
res := make([]*int64, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Int64ValueSlice(a []*int64) []int64 {
if a == nil {
return nil
}
res := make([]int64, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func UintSlice(a []uint) []*uint {
if a == nil {
return nil
}
res := make([]*uint, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func UintValueSlice(a []*uint) []uint {
if a == nil {
return nil
}
res := make([]uint, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Uint8Slice(a []uint8) []*uint8 {
if a == nil {
return nil
}
res := make([]*uint8, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Uint8ValueSlice(a []*uint8) []uint8 {
if a == nil {
return nil
}
res := make([]uint8, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Uint16Slice(a []uint16) []*uint16 {
if a == nil {
return nil
}
res := make([]*uint16, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Uint16ValueSlice(a []*uint16) []uint16 {
if a == nil {
return nil
}
res := make([]uint16, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Uint32Slice(a []uint32) []*uint32 {
if a == nil {
return nil
}
res := make([]*uint32, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Uint32ValueSlice(a []*uint32) []uint32 {
if a == nil {
return nil
}
res := make([]uint32, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Uint64Slice(a []uint64) []*uint64 {
if a == nil {
return nil
}
res := make([]*uint64, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Uint64ValueSlice(a []*uint64) []uint64 {
if a == nil {
return nil
}
res := make([]uint64, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Float32Slice(a []float32) []*float32 {
if a == nil {
return nil
}
res := make([]*float32, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Float32ValueSlice(a []*float32) []float32 {
if a == nil {
return nil
}
res := make([]float32, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func Float64Slice(a []float64) []*float64 {
if a == nil {
return nil
}
res := make([]*float64, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func Float64ValueSlice(a []*float64) []float64 {
if a == nil {
return nil
}
res := make([]float64, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func StringSlice(a []string) []*string {
if a == nil {
return nil
}
res := make([]*string, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func StringSliceValue(a []*string) []string {
if a == nil {
return nil
}
res := make([]string, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}
func BoolSlice(a []bool) []*bool {
if a == nil {
return nil
}
res := make([]*bool, len(a))
for i := 0; i < len(a); i++ {
res[i] = &a[i]
}
return res
}
func BoolSliceValue(a []*bool) []bool {
if a == nil {
return nil
}
res := make([]bool, len(a))
for i := 0; i < len(a); i++ {
if a[i] != nil {
res[i] = *a[i]
}
}
return res
}

View File

@@ -1,64 +0,0 @@
package utils
import (
"reflect"
"strings"
"testing"
)
func isNil(object interface{}) bool {
if object == nil {
return true
}
value := reflect.ValueOf(object)
kind := value.Kind()
isNilableKind := containsKind(
[]reflect.Kind{
reflect.Chan, reflect.Func,
reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice},
kind)
if isNilableKind && value.IsNil() {
return true
}
return false
}
func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
for i := 0; i < len(kinds); i++ {
if kind == kinds[i] {
return true
}
}
return false
}
func AssertEqual(t *testing.T, a, b interface{}) {
if !reflect.DeepEqual(a, b) {
t.Errorf("%v != %v", a, b)
}
}
func AssertNil(t *testing.T, object interface{}) {
if !isNil(object) {
t.Errorf("%v is not nil", object)
}
}
func AssertNotNil(t *testing.T, object interface{}) {
if isNil(object) {
t.Errorf("%v is nil", object)
}
}
func AssertContains(t *testing.T, contains string, msgAndArgs ...string) {
for _, value := range msgAndArgs {
if ok := strings.Contains(contains, value); !ok {
t.Errorf("%s does not contain %s", contains, value)
}
}
}

View File

@@ -1,109 +0,0 @@
package utils
import (
"io"
"log"
"strings"
"time"
)
type Logger struct {
*log.Logger
formatTemplate string
isOpen bool
lastLogMsg string
}
var defaultLoggerTemplate = `{time} {channel}: "{method} {uri} HTTP/{version}" {code} {cost} {hostname}`
var loggerParam = []string{"{time}", "{start_time}", "{ts}", "{channel}", "{pid}", "{host}", "{method}", "{uri}", "{version}", "{target}", "{hostname}", "{code}", "{error}", "{req_headers}", "{res_body}", "{res_headers}", "{cost}"}
var logChannel string
func InitLogMsg(fieldMap map[string]string) {
for _, value := range loggerParam {
fieldMap[value] = ""
}
}
func (logger *Logger) SetFormatTemplate(template string) {
logger.formatTemplate = template
}
func (logger *Logger) GetFormatTemplate() string {
return logger.formatTemplate
}
func NewLogger(level string, channel string, out io.Writer, template string) *Logger {
if level == "" {
level = "info"
}
logChannel = "AlibabaCloud"
if channel != "" {
logChannel = channel
}
log := log.New(out, "["+strings.ToUpper(level)+"]", log.Lshortfile)
if template == "" {
template = defaultLoggerTemplate
}
return &Logger{
Logger: log,
formatTemplate: template,
isOpen: true,
}
}
func (logger *Logger) OpenLogger() {
logger.isOpen = true
}
func (logger *Logger) CloseLogger() {
logger.isOpen = false
}
func (logger *Logger) SetIsopen(isopen bool) {
logger.isOpen = isopen
}
func (logger *Logger) GetIsopen() bool {
return logger.isOpen
}
func (logger *Logger) SetLastLogMsg(lastLogMsg string) {
logger.lastLogMsg = lastLogMsg
}
func (logger *Logger) GetLastLogMsg() string {
return logger.lastLogMsg
}
func SetLogChannel(channel string) {
logChannel = channel
}
func (logger *Logger) PrintLog(fieldMap map[string]string, err error) {
if err != nil {
fieldMap["{error}"] = err.Error()
}
fieldMap["{time}"] = time.Now().Format("2006-01-02 15:04:05")
fieldMap["{ts}"] = getTimeInFormatISO8601()
fieldMap["{channel}"] = logChannel
if logger != nil {
logMsg := logger.formatTemplate
for key, value := range fieldMap {
logMsg = strings.Replace(logMsg, key, value, -1)
}
logger.lastLogMsg = logMsg
if logger.isOpen == true {
logger.Output(2, logMsg)
}
}
}
func getTimeInFormatISO8601() (timeStr string) {
gmt := time.FixedZone("GMT", 0)
return time.Now().In(gmt).Format("2006-01-02T15:04:05Z")
}

View File

@@ -1,60 +0,0 @@
package utils
// ProgressEventType defines transfer progress event type
type ProgressEventType int
const (
// TransferStartedEvent transfer started, set TotalBytes
TransferStartedEvent ProgressEventType = 1 + iota
// TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes
TransferDataEvent
// TransferCompletedEvent transfer completed
TransferCompletedEvent
// TransferFailedEvent transfer encounters an error
TransferFailedEvent
)
// ProgressEvent defines progress event
type ProgressEvent struct {
ConsumedBytes int64
TotalBytes int64
RwBytes int64
EventType ProgressEventType
}
// ProgressListener listens progress change
type ProgressListener interface {
ProgressChanged(event *ProgressEvent)
}
// -------------------- Private --------------------
func NewProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent {
return &ProgressEvent{
ConsumedBytes: consumed,
TotalBytes: total,
RwBytes: rwBytes,
EventType: eventType}
}
// publishProgress
func PublishProgress(listener ProgressListener, event *ProgressEvent) {
if listener != nil && event != nil {
listener.ProgressChanged(event)
}
}
func GetProgressListener(obj interface{}) ProgressListener {
if obj == nil {
return nil
}
listener, ok := obj.(ProgressListener)
if !ok {
return nil
}
return listener
}
type ReaderTracker struct {
CompletedBytes int64
}

View File

@@ -1,14 +0,0 @@
Copyright (c) 2015 aliyun.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,339 +0,0 @@
package oss
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"io"
"net/http"
"sort"
"strconv"
"strings"
"time"
)
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
type headerSorter struct {
Keys []string
Vals []string
}
// getAdditionalHeaderKeys get exist key in http header
func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) {
var keysList []string
keysMap := make(map[string]string)
srcKeys := make(map[string]string)
for k := range req.Header {
srcKeys[strings.ToLower(k)] = ""
}
for _, v := range conn.config.AdditionalHeaders {
if _, ok := srcKeys[strings.ToLower(v)]; ok {
keysMap[strings.ToLower(v)] = ""
}
}
for k := range keysMap {
keysList = append(keysList, k)
}
sort.Strings(keysList)
return keysList, keysMap
}
// getAdditionalHeaderKeysV4 get exist key in http header
func (conn Conn) getAdditionalHeaderKeysV4(req *http.Request) ([]string, map[string]string) {
var keysList []string
keysMap := make(map[string]string)
srcKeys := make(map[string]string)
for k := range req.Header {
srcKeys[strings.ToLower(k)] = ""
}
for _, v := range conn.config.AdditionalHeaders {
if _, ok := srcKeys[strings.ToLower(v)]; ok {
if !strings.EqualFold(v, HTTPHeaderContentMD5) && !strings.EqualFold(v, HTTPHeaderContentType) {
keysMap[strings.ToLower(v)] = ""
}
}
}
for k := range keysMap {
keysList = append(keysList, k)
}
sort.Strings(keysList)
return keysList, keysMap
}
// signHeader signs the header and sets it as the authorization header.
func (conn Conn) signHeader(req *http.Request, canonicalizedResource string, credentials Credentials) {
akIf := credentials
authorizationStr := ""
if conn.config.AuthVersion == AuthV4 {
strDay := ""
strDate := req.Header.Get(HttpHeaderOssDate)
if strDate == "" {
strDate = req.Header.Get(HTTPHeaderDate)
t, _ := time.Parse(http.TimeFormat, strDate)
strDay = t.Format("20060102")
} else {
t, _ := time.Parse(timeFormatV4, strDate)
strDay = t.Format("20060102")
}
signHeaderProduct := conn.config.GetSignProduct()
signHeaderRegion := conn.config.GetSignRegion()
additionalList, _ := conn.getAdditionalHeaderKeysV4(req)
if len(additionalList) > 0 {
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,AdditionalHeaders=%v,Signature=%v"
additionnalHeadersStr := strings.Join(additionalList, ";")
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, additionnalHeadersStr, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret(), nil))
} else {
authorizationFmt := "OSS4-HMAC-SHA256 Credential=%v/%v/%v/" + signHeaderProduct + "/aliyun_v4_request,Signature=%v"
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), strDay, signHeaderRegion, conn.getSignedStrV4(req, canonicalizedResource, akIf.GetAccessKeySecret(), nil))
}
} else if conn.config.AuthVersion == AuthV2 {
additionalList, _ := conn.getAdditionalHeaderKeys(req)
if len(additionalList) > 0 {
authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v"
additionnalHeadersStr := strings.Join(additionalList, ";")
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
} else {
authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v"
authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()))
}
} else {
// Get the final authorization string
authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
}
// Give the parameter "Authorization" value
req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
}
func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string {
// Find out the "x-oss-"'s address in header of the request
ossHeadersMap := make(map[string]string)
additionalList, additionalMap := conn.getAdditionalHeaderKeys(req)
for k, v := range req.Header {
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
ossHeadersMap[strings.ToLower(k)] = v[0]
} else if conn.config.AuthVersion == AuthV2 {
if _, ok := additionalMap[strings.ToLower(k)]; ok {
ossHeadersMap[strings.ToLower(k)] = v[0]
}
}
}
hs := newHeaderSorter(ossHeadersMap)
// Sort the ossHeadersMap by the ascending order
hs.Sort()
// Get the canonicalizedOSSHeaders
canonicalizedOSSHeaders := ""
for i := range hs.Keys {
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
}
// Give other parameters values
// when sign URL, date is expires
date := req.Header.Get(HTTPHeaderDate)
contentType := req.Header.Get(HTTPHeaderContentType)
contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
// default is v1 signature
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
// v2 signature
if conn.config.AuthVersion == AuthV2 {
signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource
h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret))
}
if conn.config.LogLevel >= Debug {
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
}
io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
return signedStr
}
func (conn Conn) getSignedStrV4(req *http.Request, canonicalizedResource string, keySecret string, signingTime *time.Time) string {
// Find out the "x-oss-"'s address in header of the request
ossHeadersMap := make(map[string]string)
additionalList, additionalMap := conn.getAdditionalHeaderKeysV4(req)
for k, v := range req.Header {
lowKey := strings.ToLower(k)
if strings.EqualFold(lowKey, HTTPHeaderContentMD5) ||
strings.EqualFold(lowKey, HTTPHeaderContentType) ||
strings.HasPrefix(lowKey, "x-oss-") {
ossHeadersMap[lowKey] = strings.Trim(v[0], " ")
} else {
if _, ok := additionalMap[lowKey]; ok {
ossHeadersMap[lowKey] = strings.Trim(v[0], " ")
}
}
}
// get day,eg 20210914
//signingTime
signDate := ""
strDay := ""
if signingTime != nil {
signDate = signingTime.Format(timeFormatV4)
strDay = signingTime.Format(shortTimeFormatV4)
} else {
var t time.Time
// Required parameters
if date := req.Header.Get(HTTPHeaderDate); date != "" {
signDate = date
t, _ = time.Parse(http.TimeFormat, date)
}
if ossDate := req.Header.Get(HttpHeaderOssDate); ossDate != "" {
signDate = ossDate
t, _ = time.Parse(timeFormatV4, ossDate)
}
strDay = t.Format("20060102")
}
hs := newHeaderSorter(ossHeadersMap)
// Sort the ossHeadersMap by the ascending order
hs.Sort()
// Get the canonicalizedOSSHeaders
canonicalizedOSSHeaders := ""
for i := range hs.Keys {
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
}
signStr := ""
// v4 signature
hashedPayload := DefaultContentSha256
if val := req.Header.Get(HttpHeaderOssContentSha256); val != "" {
hashedPayload = val
}
// subResource
resource := canonicalizedResource
subResource := ""
subPos := strings.LastIndex(canonicalizedResource, "?")
if subPos != -1 {
subResource = canonicalizedResource[subPos+1:]
resource = canonicalizedResource[0:subPos]
}
// get canonical request
canonicalReuqest := req.Method + "\n" + resource + "\n" + subResource + "\n" + canonicalizedOSSHeaders + "\n" + strings.Join(additionalList, ";") + "\n" + hashedPayload
rh := sha256.New()
io.WriteString(rh, canonicalReuqest)
hashedRequest := hex.EncodeToString(rh.Sum(nil))
if conn.config.LogLevel >= Debug {
conn.config.WriteLog(Debug, "[Req:%p]CanonicalRequest:%s\n", req, EscapeLFString(canonicalReuqest))
}
// Product & Region
signedStrV4Product := conn.config.GetSignProduct()
signedStrV4Region := conn.config.GetSignRegion()
signStr = "OSS4-HMAC-SHA256" + "\n" + signDate + "\n" + strDay + "/" + signedStrV4Region + "/" + signedStrV4Product + "/aliyun_v4_request" + "\n" + hashedRequest
if conn.config.LogLevel >= Debug {
conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, EscapeLFString(signStr))
}
h1 := hmac.New(func() hash.Hash { return sha256.New() }, []byte("aliyun_v4"+keySecret))
io.WriteString(h1, strDay)
h1Key := h1.Sum(nil)
h2 := hmac.New(func() hash.Hash { return sha256.New() }, h1Key)
io.WriteString(h2, signedStrV4Region)
h2Key := h2.Sum(nil)
h3 := hmac.New(func() hash.Hash { return sha256.New() }, h2Key)
io.WriteString(h3, signedStrV4Product)
h3Key := h3.Sum(nil)
h4 := hmac.New(func() hash.Hash { return sha256.New() }, h3Key)
io.WriteString(h4, "aliyun_v4_request")
h4Key := h4.Sum(nil)
h := hmac.New(func() hash.Hash { return sha256.New() }, h4Key)
io.WriteString(h, signStr)
return fmt.Sprintf("%x", h.Sum(nil))
}
func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string {
if params[HTTPParamAccessKeyID] == nil {
return ""
}
canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName)
canonParamsKeys := []string{}
for key := range params {
if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken {
canonParamsKeys = append(canonParamsKeys, key)
}
}
sort.Strings(canonParamsKeys)
canonParamsStr := ""
for _, key := range canonParamsKeys {
canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string))
}
expireStr := strconv.FormatInt(expiration, 10)
signStr := expireStr + "\n" + canonParamsStr + canonResource
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
return signedStr
}
// newHeaderSorter is an additional function for function SignHeader.
func newHeaderSorter(m map[string]string) *headerSorter {
hs := &headerSorter{
Keys: make([]string, 0, len(m)),
Vals: make([]string, 0, len(m)),
}
for k, v := range m {
hs.Keys = append(hs.Keys, k)
hs.Vals = append(hs.Vals, v)
}
return hs
}
// Sort is an additional function for function SignHeader.
func (hs *headerSorter) Sort() {
sort.Sort(hs)
}
// Len is an additional function for function SignHeader.
func (hs *headerSorter) Len() int {
return len(hs.Vals)
}
// Less is an additional function for function SignHeader.
func (hs *headerSorter) Less(i, j int) bool {
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
}
// Swap is an additional function for function SignHeader.
func (hs *headerSorter) Swap(i, j int) {
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,301 +0,0 @@
package oss
import (
"bytes"
"fmt"
"log"
"net"
"os"
"time"
)
// Define the level of the output log
const (
LogOff = iota
Error
Warn
Info
Debug
)
// LogTag Tag for each level of log
var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"}
// HTTPTimeout defines HTTP timeout.
type HTTPTimeout struct {
ConnectTimeout time.Duration
ReadWriteTimeout time.Duration
HeaderTimeout time.Duration
LongTimeout time.Duration
IdleConnTimeout time.Duration
}
// HTTPMaxConns defines max idle connections and max idle connections per host
type HTTPMaxConns struct {
MaxIdleConns int
MaxIdleConnsPerHost int
MaxConnsPerHost int
}
// Credentials is interface for get AccessKeyID,AccessKeySecret,SecurityToken
type Credentials interface {
GetAccessKeyID() string
GetAccessKeySecret() string
GetSecurityToken() string
}
// CredentialsProvider is interface for get Credential Info
type CredentialsProvider interface {
GetCredentials() Credentials
}
type CredentialsProviderE interface {
CredentialsProvider
GetCredentialsE() (Credentials, error)
}
type defaultCredentials struct {
config *Config
}
func (defCre *defaultCredentials) GetAccessKeyID() string {
return defCre.config.AccessKeyID
}
func (defCre *defaultCredentials) GetAccessKeySecret() string {
return defCre.config.AccessKeySecret
}
func (defCre *defaultCredentials) GetSecurityToken() string {
return defCre.config.SecurityToken
}
type defaultCredentialsProvider struct {
config *Config
}
func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials {
return &defaultCredentials{config: defBuild.config}
}
type envCredentials struct {
AccessKeyId string
AccessKeySecret string
SecurityToken string
}
type EnvironmentVariableCredentialsProvider struct {
cred Credentials
}
func (credentials *envCredentials) GetAccessKeyID() string {
return credentials.AccessKeyId
}
func (credentials *envCredentials) GetAccessKeySecret() string {
return credentials.AccessKeySecret
}
func (credentials *envCredentials) GetSecurityToken() string {
return credentials.SecurityToken
}
func (defBuild *EnvironmentVariableCredentialsProvider) GetCredentials() Credentials {
var accessID, accessKey, token string
if defBuild.cred == nil {
accessID = os.Getenv("OSS_ACCESS_KEY_ID")
accessKey = os.Getenv("OSS_ACCESS_KEY_SECRET")
token = os.Getenv("OSS_SESSION_TOKEN")
} else {
accessID = defBuild.cred.GetAccessKeyID()
accessKey = defBuild.cred.GetAccessKeySecret()
token = defBuild.cred.GetSecurityToken()
}
return &envCredentials{
AccessKeyId: accessID,
AccessKeySecret: accessKey,
SecurityToken: token,
}
}
func NewEnvironmentVariableCredentialsProvider() (EnvironmentVariableCredentialsProvider, error) {
var provider EnvironmentVariableCredentialsProvider
accessID := os.Getenv("OSS_ACCESS_KEY_ID")
if accessID == "" {
return provider, fmt.Errorf("access key id is empty!")
}
accessKey := os.Getenv("OSS_ACCESS_KEY_SECRET")
if accessKey == "" {
return provider, fmt.Errorf("access key secret is empty!")
}
token := os.Getenv("OSS_SESSION_TOKEN")
envCredential := &envCredentials{
AccessKeyId: accessID,
AccessKeySecret: accessKey,
SecurityToken: token,
}
return EnvironmentVariableCredentialsProvider{
cred: envCredential,
}, nil
}
// Config defines oss configuration
type Config struct {
Endpoint string // OSS endpoint
AccessKeyID string // AccessId
AccessKeySecret string // AccessKey
RetryTimes uint // Retry count by default it's 5.
UserAgent string // SDK name/version/system information
IsDebug bool // Enable debug mode. Default is false.
Timeout uint // Timeout in seconds. By default it's 60.
SecurityToken string // STS Token
IsCname bool // If cname is in the endpoint.
IsPathStyle bool // If Path Style is in the endpoint.
HTTPTimeout HTTPTimeout // HTTP timeout
HTTPMaxConns HTTPMaxConns // Http max connections
IsUseProxy bool // Flag of using proxy.
ProxyHost string // Flag of using proxy host.
IsAuthProxy bool // Flag of needing authentication.
ProxyUser string // Proxy user
ProxyPassword string // Proxy password
IsEnableMD5 bool // Flag of enabling MD5 for upload.
MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
IsEnableCRC bool // Flag of enabling CRC for upload.
LogLevel int // Log level
Logger *log.Logger // For write log
UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited
UploadLimiter *OssLimiter // Bandwidth limit reader for upload
DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited
DownloadLimiter *OssLimiter // Bandwidth limit reader for download
CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken
LocalAddr net.Addr // local client host info
UserSetUa bool // UserAgent is set by user or not
AuthVersion AuthVersionType // v1 or v2, v4 signature,default is v1
AdditionalHeaders []string // special http headers needed to be sign
RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not
InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file
Region string // such as cn-hangzhou
CloudBoxId string //
Product string // oss or oss-cloudbox, default is oss
VerifyObjectStrict bool // a flag of verifying object name strictly. Default is enable.
}
// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0
func (config *Config) LimitUploadSpeed(uploadSpeed int) error {
if uploadSpeed < 0 {
return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0")
} else if uploadSpeed == 0 {
config.UploadLimitSpeed = 0
config.UploadLimiter = nil
return nil
}
var err error
config.UploadLimiter, err = GetOssLimiter(uploadSpeed)
if err == nil {
config.UploadLimitSpeed = uploadSpeed
}
return err
}
// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0
func (config *Config) LimitDownloadSpeed(downloadSpeed int) error {
if downloadSpeed < 0 {
return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0")
} else if downloadSpeed == 0 {
config.DownloadLimitSpeed = 0
config.DownloadLimiter = nil
return nil
}
var err error
config.DownloadLimiter, err = GetOssLimiter(downloadSpeed)
if err == nil {
config.DownloadLimitSpeed = downloadSpeed
}
return err
}
// WriteLog output log function
func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
if config.LogLevel < LogLevel || config.Logger == nil {
return
}
var logBuffer bytes.Buffer
logBuffer.WriteString(LogTag[LogLevel-1])
logBuffer.WriteString(fmt.Sprintf(format, a...))
config.Logger.Printf("%s", logBuffer.String())
}
// for get Credentials
func (config *Config) GetCredentials() Credentials {
return config.CredentialsProvider.GetCredentials()
}
// for get Sign Product
func (config *Config) GetSignProduct() string {
if config.CloudBoxId != "" {
return "oss-cloudbox"
}
return "oss"
}
// for get Sign Region
func (config *Config) GetSignRegion() string {
if config.CloudBoxId != "" {
return config.CloudBoxId
}
return config.Region
}
// getDefaultOssConfig gets the default configuration.
func getDefaultOssConfig() *Config {
config := Config{}
config.Endpoint = ""
config.AccessKeyID = ""
config.AccessKeySecret = ""
config.RetryTimes = 5
config.IsDebug = false
config.UserAgent = userAgent()
config.Timeout = 60 // Seconds
config.SecurityToken = ""
config.IsCname = false
config.IsPathStyle = false
config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s
config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s
config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s
config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s
config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s
config.HTTPMaxConns.MaxIdleConns = 100
config.HTTPMaxConns.MaxIdleConnsPerHost = 100
config.IsUseProxy = false
config.ProxyHost = ""
config.IsAuthProxy = false
config.ProxyUser = ""
config.ProxyPassword = ""
config.MD5Threshold = 16 * 1024 * 1024 // 16MB
config.IsEnableMD5 = false
config.IsEnableCRC = true
config.LogLevel = LogOff
config.Logger = log.New(os.Stdout, "", log.LstdFlags)
provider := &defaultCredentialsProvider{config: &config}
config.CredentialsProvider = provider
config.AuthVersion = AuthV1
config.RedirectEnabled = true
config.InsecureSkipVerify = false
config.Product = "oss"
config.VerifyObjectStrict = true
return &config
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,273 +0,0 @@
package oss
import "os"
// ACLType bucket/object ACL
type ACLType string
const (
// ACLPrivate definition : private read and write
ACLPrivate ACLType = "private"
// ACLPublicRead definition : public read and private write
ACLPublicRead ACLType = "public-read"
// ACLPublicReadWrite definition : public read and public write
ACLPublicReadWrite ACLType = "public-read-write"
// ACLDefault Object. It's only applicable for object.
ACLDefault ACLType = "default"
)
// bucket versioning status
type VersioningStatus string
const (
// Versioning Status definition: Enabled
VersionEnabled VersioningStatus = "Enabled"
// Versioning Status definition: Suspended
VersionSuspended VersioningStatus = "Suspended"
)
// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
type MetadataDirectiveType string
const (
// MetaCopy the target object's metadata is copied from the source one
MetaCopy MetadataDirectiveType = "COPY"
// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
MetaReplace MetadataDirectiveType = "REPLACE"
)
// TaggingDirectiveType specifying whether use the tagging of source object when copying object.
type TaggingDirectiveType string
const (
// TaggingCopy the target object's tagging is copied from the source one
TaggingCopy TaggingDirectiveType = "COPY"
// TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one)
TaggingReplace TaggingDirectiveType = "REPLACE"
)
// AlgorithmType specifying the server side encryption algorithm name
type AlgorithmType string
const (
KMSAlgorithm AlgorithmType = "KMS"
AESAlgorithm AlgorithmType = "AES256"
SM4Algorithm AlgorithmType = "SM4"
)
// StorageClassType bucket storage type
type StorageClassType string
const (
// StorageStandard standard
StorageStandard StorageClassType = "Standard"
// StorageIA infrequent access
StorageIA StorageClassType = "IA"
// StorageArchive archive
StorageArchive StorageClassType = "Archive"
// StorageColdArchive cold archive
StorageColdArchive StorageClassType = "ColdArchive"
// StorageDeepColdArchive deep cold archive
StorageDeepColdArchive StorageClassType = "DeepColdArchive"
)
//RedundancyType bucket data Redundancy type
type DataRedundancyType string
const (
// RedundancyLRS Local redundancy, default value
RedundancyLRS DataRedundancyType = "LRS"
// RedundancyZRS Same city redundancy
RedundancyZRS DataRedundancyType = "ZRS"
)
//ObjecthashFuncType
type ObjecthashFuncType string
const (
HashFuncSha1 ObjecthashFuncType = "SHA-1"
HashFuncSha256 ObjecthashFuncType = "SHA-256"
)
// PayerType the type of request payer
type PayerType string
const (
// Requester the requester who send the request
Requester PayerType = "Requester"
// BucketOwner the requester who send the request
BucketOwner PayerType = "BucketOwner"
)
//RestoreMode the restore mode for coldArchive object
type RestoreMode string
const (
//RestoreExpedited object will be restored in 1 hour
RestoreExpedited RestoreMode = "Expedited"
//RestoreStandard object will be restored in 2-5 hours
RestoreStandard RestoreMode = "Standard"
//RestoreBulk object will be restored in 5-10 hours
RestoreBulk RestoreMode = "Bulk"
)
// HTTPMethod HTTP request method
type HTTPMethod string
const (
// HTTPGet HTTP GET
HTTPGet HTTPMethod = "GET"
// HTTPPut HTTP PUT
HTTPPut HTTPMethod = "PUT"
// HTTPHead HTTP HEAD
HTTPHead HTTPMethod = "HEAD"
// HTTPPost HTTP POST
HTTPPost HTTPMethod = "POST"
// HTTPDelete HTTP DELETE
HTTPDelete HTTPMethod = "DELETE"
)
// HTTP headers
const (
HTTPHeaderAcceptEncoding string = "Accept-Encoding"
HTTPHeaderAuthorization = "Authorization"
HTTPHeaderCacheControl = "Cache-Control"
HTTPHeaderContentDisposition = "Content-Disposition"
HTTPHeaderContentEncoding = "Content-Encoding"
HTTPHeaderContentLength = "Content-Length"
HTTPHeaderContentMD5 = "Content-MD5"
HTTPHeaderContentType = "Content-Type"
HTTPHeaderContentLanguage = "Content-Language"
HTTPHeaderDate = "Date"
HTTPHeaderEtag = "ETag"
HTTPHeaderExpires = "Expires"
HTTPHeaderHost = "Host"
HTTPHeaderLastModified = "Last-Modified"
HTTPHeaderRange = "Range"
HTTPHeaderLocation = "Location"
HTTPHeaderOrigin = "Origin"
HTTPHeaderServer = "Server"
HTTPHeaderUserAgent = "User-Agent"
HTTPHeaderIfModifiedSince = "If-Modified-Since"
HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since"
HTTPHeaderIfMatch = "If-Match"
HTTPHeaderIfNoneMatch = "If-None-Match"
HTTPHeaderACReqMethod = "Access-Control-Request-Method"
HTTPHeaderACReqHeaders = "Access-Control-Request-Headers"
HTTPHeaderOssACL = "X-Oss-Acl"
HTTPHeaderOssMetaPrefix = "X-Oss-Meta-"
HTTPHeaderOssObjectACL = "X-Oss-Object-Acl"
HTTPHeaderOssSecurityToken = "X-Oss-Security-Token"
HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption"
HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id"
HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption"
HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm"
HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key"
HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5"
HTTPHeaderOssCopySource = "X-Oss-Copy-Source"
HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range"
HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match"
HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match"
HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since"
HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since"
HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive"
HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position"
HTTPHeaderOssRequestID = "X-Oss-Request-Id"
HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma"
HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target"
HTTPHeaderOssStorageClass = "X-Oss-Storage-Class"
HTTPHeaderOssCallback = "X-Oss-Callback"
HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var"
HTTPHeaderOssRequester = "X-Oss-Request-Payer"
HTTPHeaderOssTagging = "X-Oss-Tagging"
HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive"
HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit"
HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite"
HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior"
HTTPHeaderOssTaskID = "X-Oss-Task-Id"
HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx"
HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx"
HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap"
HttpHeaderOssDate = "X-Oss-Date"
HttpHeaderOssContentSha256 = "X-Oss-Content-Sha256"
HttpHeaderOssNotification = "X-Oss-Notification"
HTTPHeaderOssEc = "X-Oss-Ec"
HTTPHeaderOssErr = "X-Oss-Err"
)
// HTTP Param
const (
HTTPParamExpires = "Expires"
HTTPParamAccessKeyID = "OSSAccessKeyId"
HTTPParamSignature = "Signature"
HTTPParamSecurityToken = "security-token"
HTTPParamPlaylistName = "playlistName"
HTTPParamSignatureVersion = "x-oss-signature-version"
HTTPParamExpiresV2 = "x-oss-expires"
HTTPParamAccessKeyIDV2 = "x-oss-access-key-id"
HTTPParamSignatureV2 = "x-oss-signature"
HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers"
HTTPParamCredential = "x-oss-credential"
HTTPParamDate = "x-oss-date"
HTTPParamOssSecurityToken = "x-oss-security-token"
)
// Other constants
const (
MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
MinPartSize = 100 * 1024 // Min part size, 100KB
FilePermMode = os.FileMode(0664) // Default file permission
TempFilePrefix = "oss-go-temp-" // Temp file prefix
TempFileSuffix = ".temp" // Temp file suffix
CheckpointFileSuffix = ".cp" // Checkpoint file suffix
NullVersion = "null"
DefaultContentSha256 = "UNSIGNED-PAYLOAD" // for v4 signature
Version = "v3.0.2" // Go SDK version
)
// FrameType
const (
DataFrameType = 8388609
ContinuousFrameType = 8388612
EndFrameType = 8388613
MetaEndFrameCSVType = 8388614
MetaEndFrameJSONType = 8388615
)
// AuthVersion the version of auth
type AuthVersionType string
const (
// AuthV1 v1
AuthV1 AuthVersionType = "v1"
// AuthV2 v2
AuthV2 AuthVersionType = "v2"
// AuthV4 v4
AuthV4 AuthVersionType = "v4"
)

View File

@@ -1,123 +0,0 @@
package oss
import (
"hash"
"hash/crc64"
)
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint64
tab *crc64.Table
}
// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
// using the polynomial represented by the Table.
func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
// Size returns the number of bytes sum will return.
func (d *digest) Size() int { return crc64.Size }
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int { return 1 }
// Reset resets the hash to its initial state.
func (d *digest) Reset() { d.crc = 0 }
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
func (d *digest) Write(p []byte) (n int, err error) {
d.crc = crc64.Update(d.crc, d.tab, p)
return len(p), nil
}
// Sum64 returns CRC64 value.
func (d *digest) Sum64() uint64 { return d.crc }
// Sum returns hash value.
func (d *digest) Sum(in []byte) []byte {
s := d.Sum64()
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// gf2Dim dimension of GF(2) vectors (length of CRC)
const gf2Dim int = 64
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
var sum uint64
for i := 0; vec != 0; i++ {
if vec&1 != 0 {
sum ^= mat[i]
}
vec >>= 1
}
return sum
}
func gf2MatrixSquare(square []uint64, mat []uint64) {
for n := 0; n < gf2Dim; n++ {
square[n] = gf2MatrixTimes(mat, mat[n])
}
}
// CRC64Combine combines CRC64
func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
var even [gf2Dim]uint64 // Even-power-of-two zeros operator
var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator
// Degenerate case
if len2 == 0 {
return crc1
}
// Put operator for one zero bit in odd
odd[0] = crc64.ECMA // CRC64 polynomial
var row uint64 = 1
for n := 1; n < gf2Dim; n++ {
odd[n] = row
row <<= 1
}
// Put operator for two zero bits in even
gf2MatrixSquare(even[:], odd[:])
// Put operator for four zero bits in odd
gf2MatrixSquare(odd[:], even[:])
// Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even
for {
// Apply zeros operator for this bit of len2
gf2MatrixSquare(even[:], odd[:])
if len2&1 != 0 {
crc1 = gf2MatrixTimes(even[:], crc1)
}
len2 >>= 1
// If no more bits set, then done
if len2 == 0 {
break
}
// Another iteration of the loop with odd and even swapped
gf2MatrixSquare(odd[:], even[:])
if len2&1 != 0 {
crc1 = gf2MatrixTimes(odd[:], crc1)
}
len2 >>= 1
// If no more bits set, then done
if len2 == 0 {
break
}
}
// Return combined CRC
crc1 ^= crc2
return crc1
}

View File

@@ -1,567 +0,0 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"hash"
"hash/crc64"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
)
// DownloadFile downloads files with multipart download.
//
// objectKey the object key.
// filePath the local file to download from objectKey in OSS.
// partSize the part size in bytes.
// options object's constraints, check out GetObject for the reference.
//
// error it's nil when the call succeeds, otherwise it's an error object.
//
func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
if partSize < 1 {
return errors.New("oss: part size smaller than 1")
}
uRange, err := GetRangeConfig(options)
if err != nil {
return err
}
cpConf := getCpConfig(options)
routines := getRoutines(options)
var strVersionId string
versionId, _ := FindOption(options, "versionId", nil)
if versionId != nil {
strVersionId = versionId.(string)
}
if cpConf != nil && cpConf.IsEnable {
cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath)
if cpFilePath != "" {
return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
}
}
return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
}
func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
absPath, _ := filepath.Abs(destFile)
cpFileName := getCpFileName(src, absPath, versionId)
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
}
// downloadWorkerArg is download worker's parameters
type downloadWorkerArg struct {
bucket *Bucket
key string
filePath string
options []Option
hook downloadPartHook
enableCRC bool
}
// downloadPartHook is hook for test
type downloadPartHook func(part downloadPart) error
var downloadPartHooker downloadPartHook = defaultDownloadPartHook
func defaultDownloadPartHook(part downloadPart) error {
return nil
}
// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
type defaultDownloadProgressListener struct {
}
// ProgressChanged no-ops
func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
}
// downloadWorker
func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
for part := range jobs {
if err := arg.hook(part); err != nil {
failed <- err
break
}
// Resolve options
r := Range(part.Start, part.End)
p := Progress(&defaultDownloadProgressListener{})
var respHeader http.Header
opts := make([]Option, len(arg.options)+3)
// Append orderly, can not be reversed!
opts = append(opts, arg.options...)
opts = append(opts, r, p, GetResponseHeader(&respHeader))
rd, err := arg.bucket.GetObject(arg.key, opts...)
if err != nil {
failed <- err
break
}
defer rd.Close()
var crcCalc hash.Hash64
if arg.enableCRC {
crcCalc = crc64.New(CrcTable())
contentLen := part.End - part.Start + 1
rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil))
}
defer rd.Close()
select {
case <-die:
return
default:
}
fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode)
if err != nil {
failed <- err
break
}
_, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET)
if err != nil {
fd.Close()
failed <- err
break
}
startT := time.Now().UnixNano() / 1000 / 1000 / 1000
_, err = io.Copy(fd, rd)
endT := time.Now().UnixNano() / 1000 / 1000 / 1000
if err != nil {
arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error())
fd.Close()
failed <- err
break
}
if arg.enableCRC {
part.CRC64 = crcCalc.Sum64()
}
fd.Close()
results <- part
}
}
// downloadScheduler
func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
for _, part := range parts {
jobs <- part
}
close(jobs)
}
// downloadPart defines download part
type downloadPart struct {
Index int // Part number, starting from 0
Start int64 // Start index
End int64 // End index
Offset int64 // Offset
CRC64 uint64 // CRC check value of part
}
// getDownloadParts gets download parts
func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart {
parts := []downloadPart{}
part := downloadPart{}
i := 0
start, end := AdjustRange(uRange, objectSize)
for offset := start; offset < end; offset += partSize {
part.Index = i
part.Start = offset
part.End = GetPartEnd(offset, end, partSize)
part.Offset = start
part.CRC64 = 0
parts = append(parts, part)
i++
}
return parts
}
// getObjectBytes gets object bytes length
func getObjectBytes(parts []downloadPart) int64 {
var ob int64
for _, part := range parts {
ob += (part.End - part.Start + 1)
}
return ob
}
// combineCRCInParts caculates the total CRC of continuous parts
func combineCRCInParts(dps []downloadPart) uint64 {
if dps == nil || len(dps) == 0 {
return 0
}
crc := dps[0].CRC64
for i := 1; i < len(dps); i++ {
crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1))
}
return crc
}
// downloadFile downloads file concurrently without checkpoint.
func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error {
tempFilePath := filePath + TempFileSuffix
listener := GetProgressListener(options)
// If the file does not exist, create one. If exists, the download will overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// Get the object detailed meta for object whole size
// must delete header:range to get whole object size
skipOptions := DeleteOption(options, HTTPHeaderRange)
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return err
}
enableCRC := false
expectedCRC := (uint64)(0)
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
enableCRC = true
expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
}
}
// Get the parts of the file
parts := getDownloadParts(objectSize, partSize, uRange)
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getObjectBytes(parts)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
publishProgress(listener, event)
// Start the download workers
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// Download parts concurrently
go downloadScheduler(jobs, parts)
// Waiting for parts download finished
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
downBytes := (part.End - part.Start + 1)
completedBytes += downBytes
parts[part.Index].CRC64 = part.CRC64
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
if enableCRC {
actualCRC := combineCRCInParts(parts)
err = CheckDownloadCRC(actualCRC, expectedCRC)
if err != nil {
return err
}
}
return os.Rename(tempFilePath, filePath)
}
// ----- Concurrent download with chcekpoint -----
const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
type downloadCheckpoint struct {
Magic string // Magic
MD5 string // Checkpoint content MD5
FilePath string // Local file
Object string // Key
ObjStat objectStat // Object status
Parts []downloadPart // All download parts
PartStat []bool // Parts' download status
Start int64 // Start point of the file
End int64 // End point of the file
enableCRC bool // Whether has CRC check
CRC uint64 // CRC check value
}
type objectStat struct {
Size int64 // Object size
LastModified string // Last modified time
Etag string // Etag
}
// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) {
// Compare the CP's Magic and the MD5
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
return false, nil
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return false, err
}
// Compare the object size, last modified time and etag
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
return false, nil
}
// Check the download range
if uRange != nil {
start, end := AdjustRange(uRange, objectSize)
if start != cp.Start || end != cp.End {
return false, nil
}
}
return true, nil
}
// load checkpoint from local file
func (cp *downloadCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// dump funciton dumps to file
func (cp *downloadCheckpoint) dump(filePath string) error {
bcp := *cp
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// Serialize
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// todoParts gets unfinished parts
func (cp downloadCheckpoint) todoParts() []downloadPart {
dps := []downloadPart{}
for i, ps := range cp.PartStat {
if !ps {
dps = append(dps, cp.Parts[i])
}
}
return dps
}
// getCompletedBytes gets completed size
func (cp downloadCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
if cp.PartStat[i] {
completedBytes += (part.End - part.Start + 1)
}
}
return completedBytes
}
// prepare initiates download tasks
func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error {
// CP
cp.Magic = downloadCpMagic
cp.FilePath = filePath
cp.Object = objectKey
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return err
}
cp.ObjStat.Size = objectSize
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) {
cp.enableCRC = true
cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64)
}
}
// Parts
cp.Parts = getDownloadParts(objectSize, partSize, uRange)
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
}
return nil
}
func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
err := os.Rename(downFilepath, cp.FilePath)
if err != nil {
return err
}
return os.Remove(cpFilePath)
}
// downloadFileWithCp downloads files with checkpoint.
func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error {
tempFilePath := filePath + TempFileSuffix
listener := GetProgressListener(options)
// Load checkpoint data.
dcp := downloadCheckpoint{}
err := dcp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// Get the object detailed meta for object whole size
// must delete header:range to get whole object size
skipOptions := DeleteOption(options, HTTPHeaderRange)
meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...)
if err != nil {
return err
}
// Load error or data invalid. Re-initialize the download.
valid, err := dcp.isValid(meta, uRange)
if err != nil || !valid {
if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil {
return err
}
os.Remove(cpFilePath)
}
// Create the file if not exists. Otherwise the parts download will overwrite it.
fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
if err != nil {
return err
}
fd.Close()
// Unfinished parts
parts := dcp.todoParts()
jobs := make(chan downloadPart, len(parts))
results := make(chan downloadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
completedBytes := dcp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0)
publishProgress(listener, event)
// Start the download workers routine
arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
for w := 1; w <= routines; w++ {
go downloadWorker(w, arg, jobs, results, failed, die)
}
// Concurrently downloads parts
go downloadScheduler(jobs, parts)
// Wait for the parts download finished
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
dcp.PartStat[part.Index] = true
dcp.Parts[part.Index].CRC64 = part.CRC64
dcp.dump(cpFilePath)
downBytes := (part.End - part.Start + 1)
completedBytes += downBytes
event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0)
publishProgress(listener, event)
if dcp.enableCRC {
actualCRC := combineCRCInParts(dcp.Parts)
err = CheckDownloadCRC(actualCRC, dcp.CRC)
if err != nil {
return err
}
}
return dcp.complete(cpFilePath, tempFilePath)
}

View File

@@ -1,136 +0,0 @@
package oss
import (
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
)
// ServiceError contains fields of the error response from Oss Service REST API.
type ServiceError struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"` // The error code returned from OSS to the caller
Message string `xml:"Message"` // The detail error message from OSS
RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request
HostID string `xml:"HostId"` // The OSS server cluster's Id
Endpoint string `xml:"Endpoint"`
Ec string `xml:"EC"`
RawMessage string // The raw messages from OSS
StatusCode int // HTTP status code
}
// Error implements interface error
func (e ServiceError) Error() string {
errorStr := fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID)
if len(e.Endpoint) > 0 {
errorStr = fmt.Sprintf("%s, Endpoint=%s", errorStr, e.Endpoint)
}
if len(e.Ec) > 0 {
errorStr = fmt.Sprintf("%s, Ec=%s", errorStr, e.Ec)
}
return errorStr
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int // The expected HTTP stats code returned from OSS
got int // The actual HTTP status code from OSS
}
// Error implements interface error
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
got := s(e.got)
expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("oss: status code from service response is %s; was expecting %s",
got, strings.Join(expected, " or "))
}
// Got is the actual status code returned by oss.
func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
// CheckRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func CheckRespCode(respCode int, allowed []int) error {
for _, v := range allowed {
if respCode == v {
return nil
}
}
return UnexpectedStatusCodeError{allowed, respCode}
}
// CheckCallbackResp return error if the given response code is not 200
func CheckCallbackResp(resp *Response) error {
var err error
contentLengthStr := resp.Headers.Get("Content-Length")
contentLength, _ := strconv.Atoi(contentLengthStr)
var bodyBytes []byte
if contentLength > 0 {
bodyBytes, _ = ioutil.ReadAll(resp.Body)
}
if len(bodyBytes) > 0 {
srvErr, errIn := serviceErrFromXML(bodyBytes, resp.StatusCode,
resp.Headers.Get(HTTPHeaderOssRequestID))
if errIn != nil {
if len(resp.Headers.Get(HTTPHeaderOssEc)) > 0 {
err = fmt.Errorf("unknown response body, status code = %d, RequestId = %s, ec = %s", resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID), resp.Headers.Get(HTTPHeaderOssEc))
} else {
err = fmt.Errorf("unknown response body, status code= %d, RequestId = %s", resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID))
}
} else {
err = srvErr
}
}
return err
}
func tryConvertServiceError(data []byte, resp *Response, def error) (err error) {
err = def
if len(data) > 0 {
srvErr, errIn := serviceErrFromXML(data, resp.StatusCode, resp.Headers.Get(HTTPHeaderOssRequestID))
if errIn == nil {
err = srvErr
}
}
return err
}
// CRCCheckError is returned when crc check is inconsistent between client and server
type CRCCheckError struct {
clientCRC uint64 // Calculated CRC64 in client
serverCRC uint64 // Calculated CRC64 in server
operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
requestID string // The request id of this operation
}
// Error implements interface error
func (e CRCCheckError) Error() string {
return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
e.operation, e.clientCRC, e.serverCRC, e.requestID)
}
func CheckDownloadCRC(clientCRC, serverCRC uint64) error {
if clientCRC == serverCRC {
return nil
}
return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""}
}
func CheckCRC(resp *Response, operation string) error {
if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC {
return nil
}
return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)}
}

View File

@@ -1,29 +0,0 @@
//go:build !go1.7
// +build !go1.7
// "golang.org/x/time/rate" is depended on golang context package go1.7 onward
// this file is only for build,not supports limit upload speed
package oss
import (
"fmt"
"io"
)
const (
perTokenBandwidthSize int = 1024
)
type OssLimiter struct {
}
type LimitSpeedReader struct {
io.ReadCloser
reader io.Reader
ossLimiter *OssLimiter
}
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
err = fmt.Errorf("rate.Limiter is not supported below version go1.7")
return nil, err
}

View File

@@ -1,91 +0,0 @@
//go:build go1.7
// +build go1.7
package oss
import (
"fmt"
"io"
"math"
"time"
"golang.org/x/time/rate"
)
const (
perTokenBandwidthSize int = 1024
)
// OssLimiter wrapper rate.Limiter
type OssLimiter struct {
limiter *rate.Limiter
}
// GetOssLimiter create OssLimiter
// uploadSpeed KB/s
func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed)
// first consume the initial full token,the limiter will behave more accurately
limiter.AllowN(time.Now(), uploadSpeed)
return &OssLimiter{
limiter: limiter,
}, nil
}
// LimitSpeedReader for limit bandwidth upload
type LimitSpeedReader struct {
io.ReadCloser
reader io.Reader
ossLimiter *OssLimiter
}
// Read
func (r *LimitSpeedReader) Read(p []byte) (n int, err error) {
n = 0
err = nil
start := 0
burst := r.ossLimiter.limiter.Burst()
var end int
var tmpN int
var tc int
for start < len(p) {
if start+burst*perTokenBandwidthSize < len(p) {
end = start + burst*perTokenBandwidthSize
} else {
end = len(p)
}
tmpN, err = r.reader.Read(p[start:end])
if tmpN > 0 {
n += tmpN
start = n
}
if err != nil {
return
}
tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize)))
now := time.Now()
re := r.ossLimiter.limiter.ReserveN(now, tc)
if !re.OK() {
err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d",
start, end, burst, perTokenBandwidthSize)
return
}
timeDelay := re.Delay()
time.Sleep(timeDelay)
}
return
}
// Close ...
func (r *LimitSpeedReader) Close() error {
rc, ok := r.reader.(io.ReadCloser)
if ok {
return rc.Close()
}
return nil
}

View File

@@ -1,257 +0,0 @@
package oss
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"strconv"
"time"
)
//
// CreateLiveChannel create a live-channel
//
// channelName the name of the channel
// config configuration of the channel
//
// CreateLiveChannelResult the result of create live-channel
// error nil if success, otherwise error
//
func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) {
var out CreateLiveChannelResult
bs, err := xml.Marshal(config)
if err != nil {
return out, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
params := map[string]interface{}{}
params["live"] = nil
resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled
//
// channelName the name of the channel
// status enabled/disabled
//
// error nil if success, otherwise error
//
func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error {
params := map[string]interface{}{}
params["live"] = nil
params["status"] = status
resp, err := bucket.do("PUT", channelName, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime
//
// channelName the name of the channel
// playlistName the name of the playlist, must end with ".m3u8"
// startTime the start time of the playlist
// endTime the endtime of the playlist
//
// error nil if success, otherwise error
//
func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error {
params := map[string]interface{}{}
params["vod"] = nil
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
key := fmt.Sprintf("%s/%s", channelName, playlistName)
resp, err := bucket.do("POST", key, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return CheckRespCode(resp.StatusCode, []int{http.StatusOK})
}
// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime
//
// channelName the name of the channel
// startTime the start time of the playlist
// endTime the endtime of the playlist
//
// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
// error nil if success, otherwise error
//
func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) {
params := map[string]interface{}{}
params["vod"] = nil
params["startTime"] = strconv.FormatInt(startTime.Unix(), 10)
params["endTime"] = strconv.FormatInt(endTime.Unix(), 10)
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
if err != nil {
return nil, err
}
return resp.Body, nil
}
//
// GetLiveChannelStat Get the state of the live-channel
//
// channelName the name of the channel
//
// LiveChannelStat the state of the live-channel
// error nil if success, otherwise error
//
func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) {
var out LiveChannelStat
params := map[string]interface{}{}
params["live"] = nil
params["comp"] = "stat"
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// GetLiveChannelInfo Get the configuration info of the live-channel
//
// channelName the name of the channel
//
// LiveChannelConfiguration the configuration info of the live-channel
// error nil if success, otherwise error
//
func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) {
var out LiveChannelConfiguration
params := map[string]interface{}{}
params["live"] = nil
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// GetLiveChannelHistory Get push records of live-channel
//
// channelName the name of the channel
//
// LiveChannelHistory push records
// error nil if success, otherwise error
//
func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) {
var out LiveChannelHistory
params := map[string]interface{}{}
params["live"] = nil
params["comp"] = "history"
resp, err := bucket.do("GET", channelName, params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// ListLiveChannel list the live-channels
//
// options Prefix: filter by the name start with the value of "Prefix"
// MaxKeys: the maximum count returned
// Marker: cursor from which starting list
//
// ListLiveChannelResult live-channel list
// error nil if success, otherwise error
//
func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) {
var out ListLiveChannelResult
params, err := GetRawParams(options)
if err != nil {
return out, err
}
params["live"] = nil
resp, err := bucket.doInner("GET", "", params, nil, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
//
// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted.
//
// channelName the name of the channel
//
// error nil if success, otherwise error
//
func (bucket Bucket) DeleteLiveChannel(channelName string) error {
params := map[string]interface{}{}
params["live"] = nil
if channelName == "" {
return fmt.Errorf("invalid argument: channel name is empty")
}
resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
//
// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel.
//
// channelName the name of the channel
// playlistName the name of the playlist, must end with ".m3u8"
// expires expiration (in seconds)
//
// string singed rtmp push stream url
// error nil if success, otherwise error
//
func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) {
if expires <= 0 {
return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires)
}
expiration := time.Now().Unix() + expires
return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil
}

View File

@@ -1,594 +0,0 @@
package oss
import (
"mime"
"path"
"strings"
)
var extToMimeType = map[string]string{
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
".apk": "application/vnd.android.package-archive",
".hqx": "application/mac-binhex40",
".cpt": "application/mac-compactpro",
".doc": "application/msword",
".ogg": "application/ogg",
".pdf": "application/pdf",
".rtf": "text/rtf",
".mif": "application/vnd.mif",
".xls": "application/vnd.ms-excel",
".ppt": "application/vnd.ms-powerpoint",
".odc": "application/vnd.oasis.opendocument.chart",
".odb": "application/vnd.oasis.opendocument.database",
".odf": "application/vnd.oasis.opendocument.formula",
".odg": "application/vnd.oasis.opendocument.graphics",
".otg": "application/vnd.oasis.opendocument.graphics-template",
".odi": "application/vnd.oasis.opendocument.image",
".odp": "application/vnd.oasis.opendocument.presentation",
".otp": "application/vnd.oasis.opendocument.presentation-template",
".ods": "application/vnd.oasis.opendocument.spreadsheet",
".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
".odt": "application/vnd.oasis.opendocument.text",
".odm": "application/vnd.oasis.opendocument.text-master",
".ott": "application/vnd.oasis.opendocument.text-template",
".oth": "application/vnd.oasis.opendocument.text-web",
".sxw": "application/vnd.sun.xml.writer",
".stw": "application/vnd.sun.xml.writer.template",
".sxc": "application/vnd.sun.xml.calc",
".stc": "application/vnd.sun.xml.calc.template",
".sxd": "application/vnd.sun.xml.draw",
".std": "application/vnd.sun.xml.draw.template",
".sxi": "application/vnd.sun.xml.impress",
".sti": "application/vnd.sun.xml.impress.template",
".sxg": "application/vnd.sun.xml.writer.global",
".sxm": "application/vnd.sun.xml.math",
".sis": "application/vnd.symbian.install",
".wbxml": "application/vnd.wap.wbxml",
".wmlc": "application/vnd.wap.wmlc",
".wmlsc": "application/vnd.wap.wmlscriptc",
".bcpio": "application/x-bcpio",
".torrent": "application/x-bittorrent",
".bz2": "application/x-bzip2",
".vcd": "application/x-cdlink",
".pgn": "application/x-chess-pgn",
".cpio": "application/x-cpio",
".csh": "application/x-csh",
".dvi": "application/x-dvi",
".spl": "application/x-futuresplash",
".gtar": "application/x-gtar",
".hdf": "application/x-hdf",
".jar": "application/x-java-archive",
".jnlp": "application/x-java-jnlp-file",
".js": "application/x-javascript",
".ksp": "application/x-kspread",
".chrt": "application/x-kchart",
".kil": "application/x-killustrator",
".latex": "application/x-latex",
".rpm": "application/x-rpm",
".sh": "application/x-sh",
".shar": "application/x-shar",
".swf": "application/x-shockwave-flash",
".sit": "application/x-stuffit",
".sv4cpio": "application/x-sv4cpio",
".sv4crc": "application/x-sv4crc",
".tar": "application/x-tar",
".tcl": "application/x-tcl",
".tex": "application/x-tex",
".man": "application/x-troff-man",
".me": "application/x-troff-me",
".ms": "application/x-troff-ms",
".ustar": "application/x-ustar",
".src": "application/x-wais-source",
".zip": "application/zip",
".m3u": "audio/x-mpegurl",
".ra": "audio/x-pn-realaudio",
".wav": "audio/x-wav",
".wma": "audio/x-ms-wma",
".wax": "audio/x-ms-wax",
".pdb": "chemical/x-pdb",
".xyz": "chemical/x-xyz",
".bmp": "image/bmp",
".gif": "image/gif",
".ief": "image/ief",
".png": "image/png",
".wbmp": "image/vnd.wap.wbmp",
".ras": "image/x-cmu-raster",
".pnm": "image/x-portable-anymap",
".pbm": "image/x-portable-bitmap",
".pgm": "image/x-portable-graymap",
".ppm": "image/x-portable-pixmap",
".rgb": "image/x-rgb",
".xbm": "image/x-xbitmap",
".xpm": "image/x-xpixmap",
".xwd": "image/x-xwindowdump",
".css": "text/css",
".rtx": "text/richtext",
".tsv": "text/tab-separated-values",
".jad": "text/vnd.sun.j2me.app-descriptor",
".wml": "text/vnd.wap.wml",
".wmls": "text/vnd.wap.wmlscript",
".etx": "text/x-setext",
".mxu": "video/vnd.mpegurl",
".flv": "video/x-flv",
".wm": "video/x-ms-wm",
".wmv": "video/x-ms-wmv",
".wmx": "video/x-ms-wmx",
".wvx": "video/x-ms-wvx",
".avi": "video/x-msvideo",
".movie": "video/x-sgi-movie",
".ice": "x-conference/x-cooltalk",
".3gp": "video/3gpp",
".ai": "application/postscript",
".aif": "audio/x-aiff",
".aifc": "audio/x-aiff",
".aiff": "audio/x-aiff",
".asc": "text/plain",
".atom": "application/atom+xml",
".au": "audio/basic",
".bin": "application/octet-stream",
".cdf": "application/x-netcdf",
".cgm": "image/cgm",
".class": "application/octet-stream",
".dcr": "application/x-director",
".dif": "video/x-dv",
".dir": "application/x-director",
".djv": "image/vnd.djvu",
".djvu": "image/vnd.djvu",
".dll": "application/octet-stream",
".dmg": "application/octet-stream",
".dms": "application/octet-stream",
".dtd": "application/xml-dtd",
".dv": "video/x-dv",
".dxr": "application/x-director",
".eps": "application/postscript",
".exe": "application/octet-stream",
".ez": "application/andrew-inset",
".gram": "application/srgs",
".grxml": "application/srgs+xml",
".gz": "application/x-gzip",
".htm": "text/html",
".html": "text/html",
".ico": "image/x-icon",
".ics": "text/calendar",
".ifb": "text/calendar",
".iges": "model/iges",
".igs": "model/iges",
".jp2": "image/jp2",
".jpe": "image/jpeg",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".kar": "audio/midi",
".lha": "application/octet-stream",
".lzh": "application/octet-stream",
".m4a": "audio/mp4a-latm",
".m4p": "audio/mp4a-latm",
".m4u": "video/vnd.mpegurl",
".m4v": "video/x-m4v",
".mac": "image/x-macpaint",
".mathml": "application/mathml+xml",
".mesh": "model/mesh",
".mid": "audio/midi",
".midi": "audio/midi",
".mov": "video/quicktime",
".mp2": "audio/mpeg",
".mp3": "audio/mpeg",
".mp4": "video/mp4",
".mpe": "video/mpeg",
".mpeg": "video/mpeg",
".mpg": "video/mpeg",
".mpga": "audio/mpeg",
".msh": "model/mesh",
".nc": "application/x-netcdf",
".oda": "application/oda",
".ogv": "video/ogv",
".pct": "image/pict",
".pic": "image/pict",
".pict": "image/pict",
".pnt": "image/x-macpaint",
".pntg": "image/x-macpaint",
".ps": "application/postscript",
".qt": "video/quicktime",
".qti": "image/x-quicktime",
".qtif": "image/x-quicktime",
".ram": "audio/x-pn-realaudio",
".rdf": "application/rdf+xml",
".rm": "application/vnd.rn-realmedia",
".roff": "application/x-troff",
".sgm": "text/sgml",
".sgml": "text/sgml",
".silo": "model/mesh",
".skd": "application/x-koan",
".skm": "application/x-koan",
".skp": "application/x-koan",
".skt": "application/x-koan",
".smi": "application/smil",
".smil": "application/smil",
".snd": "audio/basic",
".so": "application/octet-stream",
".svg": "image/svg+xml",
".t": "application/x-troff",
".texi": "application/x-texinfo",
".texinfo": "application/x-texinfo",
".tif": "image/tiff",
".tiff": "image/tiff",
".tr": "application/x-troff",
".txt": "text/plain",
".vrml": "model/vrml",
".vxml": "application/voicexml+xml",
".webm": "video/webm",
".wrl": "model/vrml",
".xht": "application/xhtml+xml",
".xhtml": "application/xhtml+xml",
".xml": "application/xml",
".xsl": "application/xml",
".xslt": "application/xslt+xml",
".xul": "application/vnd.mozilla.xul+xml",
".webp": "image/webp",
".323": "text/h323",
".aab": "application/x-authoware-bin",
".aam": "application/x-authoware-map",
".aas": "application/x-authoware-seg",
".acx": "application/internet-property-stream",
".als": "audio/X-Alpha5",
".amc": "application/x-mpeg",
".ani": "application/octet-stream",
".asd": "application/astound",
".asf": "video/x-ms-asf",
".asn": "application/astound",
".asp": "application/x-asap",
".asr": "video/x-ms-asf",
".asx": "video/x-ms-asf",
".avb": "application/octet-stream",
".awb": "audio/amr-wb",
".axs": "application/olescript",
".bas": "text/plain",
".bin ": "application/octet-stream",
".bld": "application/bld",
".bld2": "application/bld2",
".bpk": "application/octet-stream",
".c": "text/plain",
".cal": "image/x-cals",
".cat": "application/vnd.ms-pkiseccat",
".ccn": "application/x-cnc",
".cco": "application/x-cocoa",
".cer": "application/x-x509-ca-cert",
".cgi": "magnus-internal/cgi",
".chat": "application/x-chat",
".clp": "application/x-msclip",
".cmx": "image/x-cmx",
".co": "application/x-cult3d-object",
".cod": "image/cis-cod",
".conf": "text/plain",
".cpp": "text/plain",
".crd": "application/x-mscardfile",
".crl": "application/pkix-crl",
".crt": "application/x-x509-ca-cert",
".csm": "chemical/x-csml",
".csml": "chemical/x-csml",
".cur": "application/octet-stream",
".dcm": "x-lml/x-evm",
".dcx": "image/x-dcx",
".der": "application/x-x509-ca-cert",
".dhtml": "text/html",
".dot": "application/msword",
".dwf": "drawing/x-dwf",
".dwg": "application/x-autocad",
".dxf": "application/x-autocad",
".ebk": "application/x-expandedbook",
".emb": "chemical/x-embl-dl-nucleotide",
".embl": "chemical/x-embl-dl-nucleotide",
".epub": "application/epub+zip",
".eri": "image/x-eri",
".es": "audio/echospeech",
".esl": "audio/echospeech",
".etc": "application/x-earthtime",
".evm": "x-lml/x-evm",
".evy": "application/envoy",
".fh4": "image/x-freehand",
".fh5": "image/x-freehand",
".fhc": "image/x-freehand",
".fif": "application/fractals",
".flr": "x-world/x-vrml",
".fm": "application/x-maker",
".fpx": "image/x-fpx",
".fvi": "video/isivideo",
".gau": "chemical/x-gaussian-input",
".gca": "application/x-gca-compressed",
".gdb": "x-lml/x-gdb",
".gps": "application/x-gps",
".h": "text/plain",
".hdm": "text/x-hdml",
".hdml": "text/x-hdml",
".hlp": "application/winhlp",
".hta": "application/hta",
".htc": "text/x-component",
".hts": "text/html",
".htt": "text/webviewhtml",
".ifm": "image/gif",
".ifs": "image/ifs",
".iii": "application/x-iphone",
".imy": "audio/melody",
".ins": "application/x-internet-signup",
".ips": "application/x-ipscript",
".ipx": "application/x-ipix",
".isp": "application/x-internet-signup",
".it": "audio/x-mod",
".itz": "audio/x-mod",
".ivr": "i-world/i-vrml",
".j2k": "image/j2k",
".jam": "application/x-jam",
".java": "text/plain",
".jfif": "image/pipeg",
".jpz": "image/jpeg",
".jwc": "application/jwc",
".kjx": "application/x-kjx",
".lak": "x-lml/x-lak",
".lcc": "application/fastman",
".lcl": "application/x-digitalloca",
".lcr": "application/x-digitalloca",
".lgh": "application/lgh",
".lml": "x-lml/x-lml",
".lmlpack": "x-lml/x-lmlpack",
".log": "text/plain",
".lsf": "video/x-la-asf",
".lsx": "video/x-la-asf",
".m13": "application/x-msmediaview",
".m14": "application/x-msmediaview",
".m15": "audio/x-mod",
".m3url": "audio/x-mpegurl",
".m4b": "audio/mp4a-latm",
".ma1": "audio/ma1",
".ma2": "audio/ma2",
".ma3": "audio/ma3",
".ma5": "audio/ma5",
".map": "magnus-internal/imagemap",
".mbd": "application/mbedlet",
".mct": "application/x-mascot",
".mdb": "application/x-msaccess",
".mdz": "audio/x-mod",
".mel": "text/x-vmel",
".mht": "message/rfc822",
".mhtml": "message/rfc822",
".mi": "application/x-mif",
".mil": "image/x-cals",
".mio": "audio/x-mio",
".mmf": "application/x-skt-lbs",
".mng": "video/x-mng",
".mny": "application/x-msmoney",
".moc": "application/x-mocha",
".mocha": "application/x-mocha",
".mod": "audio/x-mod",
".mof": "application/x-yumekara",
".mol": "chemical/x-mdl-molfile",
".mop": "chemical/x-mopac-input",
".mpa": "video/mpeg",
".mpc": "application/vnd.mpohun.certificate",
".mpg4": "video/mp4",
".mpn": "application/vnd.mophun.application",
".mpp": "application/vnd.ms-project",
".mps": "application/x-mapserver",
".mpv2": "video/mpeg",
".mrl": "text/x-mrml",
".mrm": "application/x-mrm",
".msg": "application/vnd.ms-outlook",
".mts": "application/metastream",
".mtx": "application/metastream",
".mtz": "application/metastream",
".mvb": "application/x-msmediaview",
".mzv": "application/metastream",
".nar": "application/zip",
".nbmp": "image/nbmp",
".ndb": "x-lml/x-ndb",
".ndwn": "application/ndwn",
".nif": "application/x-nif",
".nmz": "application/x-scream",
".nokia-op-logo": "image/vnd.nok-oplogo-color",
".npx": "application/x-netfpx",
".nsnd": "audio/nsnd",
".nva": "application/x-neva1",
".nws": "message/rfc822",
".oom": "application/x-AtlasMate-Plugin",
".p10": "application/pkcs10",
".p12": "application/x-pkcs12",
".p7b": "application/x-pkcs7-certificates",
".p7c": "application/x-pkcs7-mime",
".p7m": "application/x-pkcs7-mime",
".p7r": "application/x-pkcs7-certreqresp",
".p7s": "application/x-pkcs7-signature",
".pac": "audio/x-pac",
".pae": "audio/x-epac",
".pan": "application/x-pan",
".pcx": "image/x-pcx",
".pda": "image/x-pda",
".pfr": "application/font-tdpfr",
".pfx": "application/x-pkcs12",
".pko": "application/ynd.ms-pkipko",
".pm": "application/x-perl",
".pma": "application/x-perfmon",
".pmc": "application/x-perfmon",
".pmd": "application/x-pmd",
".pml": "application/x-perfmon",
".pmr": "application/x-perfmon",
".pmw": "application/x-perfmon",
".pnz": "image/png",
".pot,": "application/vnd.ms-powerpoint",
".pps": "application/vnd.ms-powerpoint",
".pqf": "application/x-cprplayer",
".pqi": "application/cprplayer",
".prc": "application/x-prc",
".prf": "application/pics-rules",
".prop": "text/plain",
".proxy": "application/x-ns-proxy-autoconfig",
".ptlk": "application/listenup",
".pub": "application/x-mspublisher",
".pvx": "video/x-pv-pvx",
".qcp": "audio/vnd.qcelp",
".r3t": "text/vnd.rn-realtext3d",
".rar": "application/octet-stream",
".rc": "text/plain",
".rf": "image/vnd.rn-realflash",
".rlf": "application/x-richlink",
".rmf": "audio/x-rmf",
".rmi": "audio/mid",
".rmm": "audio/x-pn-realaudio",
".rmvb": "audio/x-pn-realaudio",
".rnx": "application/vnd.rn-realplayer",
".rp": "image/vnd.rn-realpix",
".rt": "text/vnd.rn-realtext",
".rte": "x-lml/x-gps",
".rtg": "application/metastream",
".rv": "video/vnd.rn-realvideo",
".rwc": "application/x-rogerwilco",
".s3m": "audio/x-mod",
".s3z": "audio/x-mod",
".sca": "application/x-supercard",
".scd": "application/x-msschedule",
".sct": "text/scriptlet",
".sdf": "application/e-score",
".sea": "application/x-stuffit",
".setpay": "application/set-payment-initiation",
".setreg": "application/set-registration-initiation",
".shtml": "text/html",
".shtm": "text/html",
".shw": "application/presentations",
".si6": "image/si6",
".si7": "image/vnd.stiwap.sis",
".si9": "image/vnd.lgtwap.sis",
".slc": "application/x-salsa",
".smd": "audio/x-smd",
".smp": "application/studiom",
".smz": "audio/x-smd",
".spc": "application/x-pkcs7-certificates",
".spr": "application/x-sprite",
".sprite": "application/x-sprite",
".sdp": "application/sdp",
".spt": "application/x-spt",
".sst": "application/vnd.ms-pkicertstore",
".stk": "application/hyperstudio",
".stl": "application/vnd.ms-pkistl",
".stm": "text/html",
".svf": "image/vnd",
".svh": "image/svh",
".svr": "x-world/x-svr",
".swfl": "application/x-shockwave-flash",
".tad": "application/octet-stream",
".talk": "text/x-speech",
".taz": "application/x-tar",
".tbp": "application/x-timbuktu",
".tbt": "application/x-timbuktu",
".tgz": "application/x-compressed",
".thm": "application/vnd.eri.thm",
".tki": "application/x-tkined",
".tkined": "application/x-tkined",
".toc": "application/toc",
".toy": "image/toy",
".trk": "x-lml/x-gps",
".trm": "application/x-msterminal",
".tsi": "audio/tsplayer",
".tsp": "application/dsptype",
".ttf": "application/octet-stream",
".ttz": "application/t-time",
".uls": "text/iuls",
".ult": "audio/x-mod",
".uu": "application/x-uuencode",
".uue": "application/x-uuencode",
".vcf": "text/x-vcard",
".vdo": "video/vdo",
".vib": "audio/vib",
".viv": "video/vivo",
".vivo": "video/vivo",
".vmd": "application/vocaltec-media-desc",
".vmf": "application/vocaltec-media-file",
".vmi": "application/x-dreamcast-vms-info",
".vms": "application/x-dreamcast-vms",
".vox": "audio/voxware",
".vqe": "audio/x-twinvq-plugin",
".vqf": "audio/x-twinvq",
".vql": "audio/x-twinvq",
".vre": "x-world/x-vream",
".vrt": "x-world/x-vrt",
".vrw": "x-world/x-vream",
".vts": "workbook/formulaone",
".wcm": "application/vnd.ms-works",
".wdb": "application/vnd.ms-works",
".web": "application/vnd.xara",
".wi": "image/wavelet",
".wis": "application/x-InstallShield",
".wks": "application/vnd.ms-works",
".wmd": "application/x-ms-wmd",
".wmf": "application/x-msmetafile",
".wmlscript": "text/vnd.wap.wmlscript",
".wmz": "application/x-ms-wmz",
".wpng": "image/x-up-wpng",
".wps": "application/vnd.ms-works",
".wpt": "x-lml/x-gps",
".wri": "application/x-mswrite",
".wrz": "x-world/x-vrml",
".ws": "text/vnd.wap.wmlscript",
".wsc": "application/vnd.wap.wmlscriptc",
".wv": "video/wavelet",
".wxl": "application/x-wxl",
".x-gzip": "application/x-gzip",
".xaf": "x-world/x-vrml",
".xar": "application/vnd.xara",
".xdm": "application/x-xdma",
".xdma": "application/x-xdma",
".xdw": "application/vnd.fujixerox.docuworks",
".xhtm": "application/xhtml+xml",
".xla": "application/vnd.ms-excel",
".xlc": "application/vnd.ms-excel",
".xll": "application/x-excel",
".xlm": "application/vnd.ms-excel",
".xlt": "application/vnd.ms-excel",
".xlw": "application/vnd.ms-excel",
".xm": "audio/x-mod",
".xmz": "audio/x-mod",
".xof": "x-world/x-vrml",
".xpi": "application/x-xpinstall",
".xsit": "text/xml",
".yz1": "application/x-yz1",
".z": "application/x-compress",
".zac": "application/x-zaurus-zac",
".json": "application/json",
}
// TypeByExtension returns the MIME type associated with the file extension ext.
// gets the file's MIME type for HTTP header Content-Type
func TypeByExtension(filePath string) string {
typ := mime.TypeByExtension(path.Ext(filePath))
if typ == "" {
typ = extToMimeType[strings.ToLower(path.Ext(filePath))]
} else {
if strings.HasPrefix(typ, "text/") && strings.Contains(typ, "charset=") {
typ = removeCharsetInMimeType(typ)
}
}
return typ
}
// Remove charset from mime type
func removeCharsetInMimeType(typ string) (str string) {
temArr := strings.Split(typ, ";")
var builder strings.Builder
for i, s := range temArr {
tmpStr := strings.Trim(s, " ")
if strings.Contains(tmpStr, "charset=") {
continue
}
if i == 0 {
builder.WriteString(s)
} else {
builder.WriteString("; " + s)
}
}
return builder.String()
}

View File

@@ -1,69 +0,0 @@
package oss
import (
"hash"
"io"
"net/http"
)
// Response defines HTTP response from OSS
type Response struct {
StatusCode int
Headers http.Header
Body io.ReadCloser
ClientCRC uint64
ServerCRC uint64
}
func (r *Response) Read(p []byte) (n int, err error) {
return r.Body.Read(p)
}
// Close close http reponse body
func (r *Response) Close() error {
return r.Body.Close()
}
// PutObjectRequest is the request of DoPutObject
type PutObjectRequest struct {
ObjectKey string
Reader io.Reader
}
// GetObjectRequest is the request of DoGetObject
type GetObjectRequest struct {
ObjectKey string
}
// GetObjectResult is the result of DoGetObject
type GetObjectResult struct {
Response *Response
ClientCRC hash.Hash64
ServerCRC uint64
}
// AppendObjectRequest is the requtest of DoAppendObject
type AppendObjectRequest struct {
ObjectKey string
Reader io.Reader
Position int64
}
// AppendObjectResult is the result of DoAppendObject
type AppendObjectResult struct {
NextPosition int64
CRC uint64
}
// UploadPartRequest is the request of DoUploadPart
type UploadPartRequest struct {
InitResult *InitiateMultipartUploadResult
Reader io.Reader
PartSize int64
PartNumber int
}
// UploadPartResult is the result of DoUploadPart
type UploadPartResult struct {
Part UploadPart
}

View File

@@ -1,474 +0,0 @@
package oss
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
)
// CopyFile is multipart copy object
//
// srcBucketName source bucket name
// srcObjectKey source object name
// destObjectKey target object name in the form of bucketname.objectkey
// partSize the part size in byte.
// options object's contraints. Check out function InitiateMultipartUpload.
//
// error it's nil if the operation succeeds, otherwise it's an error object.
//
func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
destBucketName := bucket.BucketName
if partSize < MinPartSize || partSize > MaxPartSize {
return errors.New("oss: part size invalid range (1024KB, 5GB]")
}
cpConf := getCpConfig(options)
routines := getRoutines(options)
var strVersionId string
versionId, _ := FindOption(options, "versionId", nil)
if versionId != nil {
strVersionId = versionId.(string)
}
if cpConf != nil && cpConf.IsEnable {
cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId)
if cpFilePath != "" {
return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
}
}
return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
partSize, options, routines)
}
func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string {
if cpConf.FilePath == "" && cpConf.DirPath != "" {
dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
cpFileName := getCpFileName(src, dest, versionId)
cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
}
return cpConf.FilePath
}
// ----- Concurrently copy without checkpoint ---------
// copyWorkerArg defines the copy worker arguments
type copyWorkerArg struct {
bucket *Bucket
imur InitiateMultipartUploadResult
srcBucketName string
srcObjectKey string
options []Option
hook copyPartHook
}
// copyPartHook is the hook for testing purpose
type copyPartHook func(part copyPart) error
var copyPartHooker copyPartHook = defaultCopyPartHook
func defaultCopyPartHook(part copyPart) error {
return nil
}
// copyWorker copies worker
func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
for chunk := range jobs {
if err := arg.hook(chunk); err != nil {
failed <- err
break
}
chunkSize := chunk.End - chunk.Start + 1
part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey,
chunk.Start, chunkSize, chunk.Number, arg.options...)
if err != nil {
failed <- err
break
}
select {
case <-die:
return
default:
}
results <- part
}
}
// copyScheduler
func copyScheduler(jobs chan copyPart, parts []copyPart) {
for _, part := range parts {
jobs <- part
}
close(jobs)
}
// copyPart structure
type copyPart struct {
Number int // Part number (from 1 to 10,000)
Start int64 // The start index in the source file.
End int64 // The end index in the source file
}
// getCopyParts calculates copy parts
func getCopyParts(objectSize, partSize int64) []copyPart {
parts := []copyPart{}
part := copyPart{}
i := 0
for offset := int64(0); offset < objectSize; offset += partSize {
part.Number = i + 1
part.Start = offset
part.End = GetPartEnd(offset, objectSize, partSize)
parts = append(parts, part)
i++
}
return parts
}
// getSrcObjectBytes gets the source file size
func getSrcObjectBytes(parts []copyPart) int64 {
var ob int64
for _, part := range parts {
ob += (part.End - part.Start + 1)
}
return ob
}
// copyFile is a concurrently copy without checkpoint
func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := GetProgressListener(options)
// choice valid options
headerOptions := ChoiceHeadObjectOption(options)
partOptions := ChoiceTransferPartOption(options)
completeOptions := ChoiceCompletePartOption(options)
abortOptions := ChoiceAbortPartOption(options)
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
if err != nil {
return err
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
if err != nil {
return err
}
// Get copy parts
parts := getCopyParts(objectSize, partSize)
// Initialize the multipart upload
imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
}
jobs := make(chan copyPart, len(parts))
results := make(chan UploadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
var completedBytes int64
totalBytes := getSrcObjectBytes(parts)
event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
publishProgress(listener, event)
// Start to copy workers
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// Start the scheduler
go copyScheduler(jobs, parts)
// Wait for the parts finished.
completed := 0
ups := make([]UploadPart, len(parts))
for completed < len(parts) {
select {
case part := <-results:
completed++
ups[part.PartNumber-1] = part
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
completedBytes += copyBytes
event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
descBucket.AbortMultipartUpload(imur, abortOptions...)
event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
publishProgress(listener, event)
// Complete the multipart upload
_, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...)
if err != nil {
bucket.AbortMultipartUpload(imur, abortOptions...)
return err
}
return nil
}
// ----- Concurrently copy with checkpoint -----
const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
type copyCheckpoint struct {
Magic string // Magic
MD5 string // CP content MD5
SrcBucketName string // Source bucket
SrcObjectKey string // Source object
DestBucketName string // Target bucket
DestObjectKey string // Target object
CopyID string // Copy ID
ObjStat objectStat // Object stat
Parts []copyPart // Copy parts
CopyParts []UploadPart // The uploaded parts
PartStat []bool // The part status
}
// isValid checks if the data is valid which means CP is valid and object is not updated.
func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
// Compare CP's magic number and the MD5.
cpb := cp
cpb.MD5 = ""
js, _ := json.Marshal(cpb)
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
if cp.Magic != downloadCpMagic || b64 != cp.MD5 {
return false, nil
}
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return false, err
}
// Compare the object size and last modified time and etag.
if cp.ObjStat.Size != objectSize ||
cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
return false, nil
}
return true, nil
}
// load loads from the checkpoint file
func (cp *copyCheckpoint) load(filePath string) error {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
err = json.Unmarshal(contents, cp)
return err
}
// update updates the parts status
func (cp *copyCheckpoint) update(part UploadPart) {
cp.CopyParts[part.PartNumber-1] = part
cp.PartStat[part.PartNumber-1] = true
}
// dump dumps the CP to the file
func (cp *copyCheckpoint) dump(filePath string) error {
bcp := *cp
// Calculate MD5
bcp.MD5 = ""
js, err := json.Marshal(bcp)
if err != nil {
return err
}
sum := md5.Sum(js)
b64 := base64.StdEncoding.EncodeToString(sum[:])
bcp.MD5 = b64
// Serialization
js, err = json.Marshal(bcp)
if err != nil {
return err
}
// Dump
return ioutil.WriteFile(filePath, js, FilePermMode)
}
// todoParts returns unfinished parts
func (cp copyCheckpoint) todoParts() []copyPart {
dps := []copyPart{}
for i, ps := range cp.PartStat {
if !ps {
dps = append(dps, cp.Parts[i])
}
}
return dps
}
// getCompletedBytes returns finished bytes count
func (cp copyCheckpoint) getCompletedBytes() int64 {
var completedBytes int64
for i, part := range cp.Parts {
if cp.PartStat[i] {
completedBytes += (part.End - part.Start + 1)
}
}
return completedBytes
}
// prepare initializes the multipart upload
func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
partSize int64, options []Option) error {
// CP
cp.Magic = copyCpMagic
cp.SrcBucketName = srcBucket.BucketName
cp.SrcObjectKey = srcObjectKey
cp.DestBucketName = destBucket.BucketName
cp.DestObjectKey = destObjectKey
objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64)
if err != nil {
return err
}
cp.ObjStat.Size = objectSize
cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
// Parts
cp.Parts = getCopyParts(objectSize, partSize)
cp.PartStat = make([]bool, len(cp.Parts))
for i := range cp.PartStat {
cp.PartStat[i] = false
}
cp.CopyParts = make([]UploadPart, len(cp.Parts))
// Init copy
imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
if err != nil {
return err
}
cp.CopyID = imur.UploadID
return nil
}
func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
Key: cp.DestObjectKey, UploadID: cp.CopyID}
_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
if err != nil {
return err
}
os.Remove(cpFilePath)
return err
}
// copyFileWithCp is concurrently copy with checkpoint
func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
partSize int64, options []Option, cpFilePath string, routines int) error {
descBucket, err := bucket.Client.Bucket(destBucketName)
srcBucket, err := bucket.Client.Bucket(srcBucketName)
listener := GetProgressListener(options)
// Load CP data
ccp := copyCheckpoint{}
err = ccp.load(cpFilePath)
if err != nil {
os.Remove(cpFilePath)
}
// choice valid options
headerOptions := ChoiceHeadObjectOption(options)
partOptions := ChoiceTransferPartOption(options)
completeOptions := ChoiceCompletePartOption(options)
meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...)
if err != nil {
return err
}
// Load error or the CP data is invalid---reinitialize
valid, err := ccp.isValid(meta)
if err != nil || !valid {
if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
return err
}
os.Remove(cpFilePath)
}
// Unfinished parts
parts := ccp.todoParts()
imur := InitiateMultipartUploadResult{
Bucket: destBucketName,
Key: destObjectKey,
UploadID: ccp.CopyID}
jobs := make(chan copyPart, len(parts))
results := make(chan UploadPart, len(parts))
failed := make(chan error)
die := make(chan bool)
completedBytes := ccp.getCompletedBytes()
event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0)
publishProgress(listener, event)
// Start the worker coroutines
arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker}
for w := 1; w <= routines; w++ {
go copyWorker(w, arg, jobs, results, failed, die)
}
// Start the scheduler
go copyScheduler(jobs, parts)
// Wait for the parts completed.
completed := 0
for completed < len(parts) {
select {
case part := <-results:
completed++
ccp.update(part)
ccp.dump(cpFilePath)
copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
completedBytes += copyBytes
event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes)
publishProgress(listener, event)
case err := <-failed:
close(die)
event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0)
publishProgress(listener, event)
return err
}
if completed >= len(parts) {
break
}
}
event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0)
publishProgress(listener, event)
return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions)
}

View File

@@ -1,320 +0,0 @@
package oss
import (
"bytes"
"encoding/xml"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"sort"
"strconv"
)
// InitiateMultipartUpload initializes multipart upload
//
// objectKey object name
// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
//
// ServerSideEncryption, Meta, check out the following link:
// https://www.alibabacloud.com/help/en/object-storage-service/latest/initiatemultipartupload
//
// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
// error it's nil if the operation succeeds, otherwise it's an error object.
func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
var imur InitiateMultipartUploadResult
opts := AddContentType(options, objectKey)
params, _ := GetRawParams(options)
paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"}
ConvertEmptyValueToNil(params, paramKeys)
params["uploads"] = nil
resp, err := bucket.do("POST", objectKey, params, opts, nil, nil)
if err != nil {
return imur, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &imur)
return imur, err
}
// UploadPart uploads parts
//
// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
// And thus with the same part number and upload Id, another part upload will overwrite the data.
// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
//
// imur the returned value of InitiateMultipartUpload.
// reader io.Reader the reader for the part's data.
// size the part size.
// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
//
// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
partSize int64, partNumber int, options ...Option) (UploadPart, error) {
request := &UploadPartRequest{
InitResult: &imur,
Reader: reader,
PartSize: partSize,
PartNumber: partNumber,
}
result, err := bucket.DoUploadPart(request, options)
return result.Part, err
}
// UploadPartFromFile uploads part from the file.
//
// imur the return value of a successful InitiateMultipartUpload.
// filePath the local file path to upload.
// startPosition the start position in the local file.
// partSize the part size.
// partNumber the part number (from 1 to 10,000)
//
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var part = UploadPart{}
fd, err := os.Open(filePath)
if err != nil {
return part, err
}
defer fd.Close()
fd.Seek(startPosition, os.SEEK_SET)
request := &UploadPartRequest{
InitResult: &imur,
Reader: fd,
PartSize: partSize,
PartNumber: partNumber,
}
result, err := bucket.DoUploadPart(request, options)
return result.Part, err
}
// DoUploadPart does the actual part upload.
//
// request part upload request
//
// UploadPartResult the result of uploading part.
// error it's nil if the operation succeeds, otherwise it's an error object.
func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
listener := GetProgressListener(options)
options = append(options, ContentLength(request.PartSize))
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(request.PartNumber)
params["uploadId"] = request.InitResult.UploadID
resp, err := bucket.do("PUT", request.InitResult.Key, params, options,
&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
if err != nil {
return &UploadPartResult{}, err
}
defer resp.Body.Close()
part := UploadPart{
ETag: resp.Headers.Get(HTTPHeaderEtag),
PartNumber: request.PartNumber,
}
if bucket.GetConfig().IsEnableCRC {
err = CheckCRC(resp, "DoUploadPart")
if err != nil {
return &UploadPartResult{part}, err
}
}
return &UploadPartResult{part}, nil
}
// UploadPartCopy uploads part copy
//
// imur the return value of InitiateMultipartUpload
// copySrc source Object name
// startPosition the part's start index in the source file
// partSize the part size
// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
//
// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail
// https://www.alibabacloud.com/help/en/object-storage-service/latest/uploadpartcopy
//
// UploadPart the return value consists of PartNumber and ETag.
// error it's nil if the operation succeeds, otherwise it's an error object.
func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
var out UploadPartCopyResult
var part UploadPart
var opts []Option
//first find version id
versionIdKey := "versionId"
versionId, _ := FindOption(options, versionIdKey, nil)
if versionId == nil {
opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
CopySourceRange(startPosition, partSize)}
} else {
opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)),
CopySourceRange(startPosition, partSize)}
options = DeleteOption(options, versionIdKey)
}
opts = append(opts, options...)
params := map[string]interface{}{}
params["partNumber"] = strconv.Itoa(partNumber)
params["uploadId"] = imur.UploadID
resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil)
if err != nil {
return part, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return part, err
}
part.ETag = out.ETag
part.PartNumber = partNumber
return part, nil
}
// CompleteMultipartUpload completes the multipart upload.
//
// imur the return value of InitiateMultipartUpload.
// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
//
// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
var out CompleteMultipartUploadResult
sort.Sort(UploadParts(parts))
cxml := completeMultipartUploadXML{}
cxml.Part = parts
bs, err := xml.Marshal(cxml)
if err != nil {
return out, err
}
buffer := new(bytes.Buffer)
buffer.Write(bs)
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return out, err
}
err = CheckRespCode(resp.StatusCode, []int{http.StatusOK})
if len(body) > 0 {
if err != nil {
err = tryConvertServiceError(body, resp, err)
} else {
callback, _ := FindOption(options, HTTPHeaderOssCallback, nil)
if callback == nil {
err = xml.Unmarshal(body, &out)
} else {
rb, _ := FindOption(options, responseBody, nil)
if rb != nil {
if rbody, ok := rb.(*[]byte); ok {
*rbody = body
}
}
}
}
}
return out, err
}
// AbortMultipartUpload aborts the multipart upload.
//
// imur the return value of InitiateMultipartUpload.
//
// error it's nil if the operation succeeds, otherwise it's an error object.
func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
params := map[string]interface{}{}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent})
}
// ListUploadedParts lists the uploaded parts.
//
// imur the return value of InitiateMultipartUpload.
//
// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) {
var out ListUploadedPartsResult
options = append(options, EncodingType("url"))
params := map[string]interface{}{}
params, err := GetRawParams(options)
if err != nil {
return out, err
}
params["uploadId"] = imur.UploadID
resp, err := bucket.do("GET", imur.Key, params, options, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return out, err
}
err = decodeListUploadedPartsResult(&out)
return out, err
}
// ListMultipartUploads lists all ongoing multipart upload tasks
//
// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
//
// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
//
// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil.
// error it's nil if the operation succeeds, otherwise it's an error object.
func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
var out ListMultipartUploadResult
options = append(options, EncodingType("url"))
params, err := GetRawParams(options)
if err != nil {
return out, err
}
params["uploads"] = nil
resp, err := bucket.doInner("GET", "", params, options, nil, nil)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return out, err
}
err = decodeListMultipartUploadResult(&out)
return out, err
}

Some files were not shown because too many files have changed in this diff Show More