0
Fork 0
mirror of https://codeberg.org/forgejo/forgejo.git synced 2024-12-22 15:23:14 -05:00

Merge pull request '[gitea] week 2024-40 cherry pick (gitea/main -> forgejo)' (#5416) from earl-warren/wcp/2024-40 into forgejo

Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/5416
Reviewed-by: Shiny Nematoda <snematoda@noreply.codeberg.org>
This commit is contained in:
Earl Warren 2024-10-01 07:52:05 +00:00
commit aec55ac1b6
16 changed files with 392 additions and 78 deletions

View file

@ -147,6 +147,12 @@ func runServ(c *cli.Context) error {
return nil return nil
} }
defer func() {
if err := recover(); err != nil {
_ = fail(ctx, "Internal Server Error", "Panic: %v\n%s", err, log.Stack(2))
}
}()
keys := strings.Split(c.Args().First(), "-") keys := strings.Split(c.Args().First(), "-")
if len(keys) != 2 || keys[0] != "key" { if len(keys) != 2 || keys[0] != "key" {
return fail(ctx, "Key ID format error", "Invalid key argument: %s", c.Args().First()) return fail(ctx, "Key ID format error", "Invalid key argument: %s", c.Args().First())
@ -193,10 +199,7 @@ func runServ(c *cli.Context) error {
} }
verb := words[0] verb := words[0]
repoPath := words[1] repoPath := strings.TrimPrefix(words[1], "/")
if repoPath[0] == '/' {
repoPath = repoPath[1:]
}
var lfsVerb string var lfsVerb string
if verb == lfsAuthenticateVerb { if verb == lfsAuthenticateVerb {

View file

@ -18,8 +18,32 @@ func FullSteps(task *actions_model.ActionTask) []*actions_model.ActionTaskStep {
return fullStepsOfEmptySteps(task) return fullStepsOfEmptySteps(task)
} }
firstStep := task.Steps[0] // firstStep is the first step that has run or running, not include preStep.
// For example,
// 1. preStep(Success) -> step1(Success) -> step2(Running) -> step3(Waiting) -> postStep(Waiting): firstStep is step1.
// 2. preStep(Success) -> step1(Skipped) -> step2(Success) -> postStep(Success): firstStep is step2.
// 3. preStep(Success) -> step1(Running) -> step2(Waiting) -> postStep(Waiting): firstStep is step1.
// 4. preStep(Success) -> step1(Skipped) -> step2(Skipped) -> postStep(Skipped): firstStep is nil.
// 5. preStep(Success) -> step1(Cancelled) -> step2(Cancelled) -> postStep(Cancelled): firstStep is nil.
var firstStep *actions_model.ActionTaskStep
// lastHasRunStep is the last step that has run.
// For example,
// 1. preStep(Success) -> step1(Success) -> step2(Running) -> step3(Waiting) -> postStep(Waiting): lastHasRunStep is step1.
// 2. preStep(Success) -> step1(Success) -> step2(Success) -> step3(Success) -> postStep(Success): lastHasRunStep is step3.
// 3. preStep(Success) -> step1(Success) -> step2(Failure) -> step3 -> postStep(Waiting): lastHasRunStep is step2.
// So its Stopped is the Started of postStep when there are no more steps to run.
var lastHasRunStep *actions_model.ActionTaskStep
var logIndex int64 var logIndex int64
for _, step := range task.Steps {
if firstStep == nil && (step.Status.HasRun() || step.Status.IsRunning()) {
firstStep = step
}
if step.Status.HasRun() {
lastHasRunStep = step
}
logIndex += step.LogLength
}
preStep := &actions_model.ActionTaskStep{ preStep := &actions_model.ActionTaskStep{
Name: preStepName, Name: preStepName,
@ -28,32 +52,17 @@ func FullSteps(task *actions_model.ActionTask) []*actions_model.ActionTaskStep {
Status: actions_model.StatusRunning, Status: actions_model.StatusRunning,
} }
if firstStep.Status.HasRun() || firstStep.Status.IsRunning() { // No step has run or is running, so preStep is equal to the task
if firstStep == nil {
preStep.Stopped = task.Stopped
preStep.Status = task.Status
} else {
preStep.LogLength = firstStep.LogIndex preStep.LogLength = firstStep.LogIndex
preStep.Stopped = firstStep.Started preStep.Stopped = firstStep.Started
preStep.Status = actions_model.StatusSuccess preStep.Status = actions_model.StatusSuccess
} else if task.Status.IsDone() {
preStep.Stopped = task.Stopped
preStep.Status = actions_model.StatusFailure
if task.Status.IsSkipped() {
preStep.Status = actions_model.StatusSkipped
}
} }
logIndex += preStep.LogLength logIndex += preStep.LogLength
// lastHasRunStep is the last step that has run.
// For example,
// 1. preStep(Success) -> step1(Success) -> step2(Running) -> step3(Waiting) -> postStep(Waiting): lastHasRunStep is step1.
// 2. preStep(Success) -> step1(Success) -> step2(Success) -> step3(Success) -> postStep(Success): lastHasRunStep is step3.
// 3. preStep(Success) -> step1(Success) -> step2(Failure) -> step3 -> postStep(Waiting): lastHasRunStep is step2.
// So its Stopped is the Started of postStep when there are no more steps to run.
var lastHasRunStep *actions_model.ActionTaskStep
for _, step := range task.Steps {
if step.Status.HasRun() {
lastHasRunStep = step
}
logIndex += step.LogLength
}
if lastHasRunStep == nil { if lastHasRunStep == nil {
lastHasRunStep = preStep lastHasRunStep = preStep
} }

View file

@ -137,6 +137,25 @@ func TestFullSteps(t *testing.T) {
{Name: postStepName, Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0}, {Name: postStepName, Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
}, },
}, },
{
name: "first step is skipped",
task: &actions_model.ActionTask{
Steps: []*actions_model.ActionTaskStep{
{Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 10090},
},
Status: actions_model.StatusSuccess,
Started: 10000,
Stopped: 10100,
LogLength: 100,
},
want: []*actions_model.ActionTaskStep{
{Name: preStepName, Status: actions_model.StatusSuccess, LogIndex: 0, LogLength: 10, Started: 10000, Stopped: 10010},
{Status: actions_model.StatusSkipped, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 10090},
{Name: postStepName, Status: actions_model.StatusSuccess, LogIndex: 90, LogLength: 10, Started: 10090, Stopped: 10100},
},
},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {

View file

@ -288,6 +288,8 @@ func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int
searchRequest.AddFacet("languages", bleve.NewFacetRequest("Language", 10)) searchRequest.AddFacet("languages", bleve.NewFacetRequest("Language", 10))
} }
searchRequest.SortBy([]string{"-_score", "UpdatedAt"})
result, err := b.inner.Indexer.SearchInContext(ctx, searchRequest) result, err := b.inner.Indexer.SearchInContext(ctx, searchRequest)
if err != nil { if err != nil {
return 0, nil, nil, err return 0, nil, nil, err

View file

@ -318,7 +318,8 @@ func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int
NumOfFragments(0). // return all highting content on fragments NumOfFragments(0). // return all highting content on fragments
HighlighterType("fvh"), HighlighterType("fvh"),
). ).
Sort("repo_id", true). Sort("_score", false).
Sort("updated_at", true).
From(start).Size(pageSize). From(start).Size(pageSize).
Do(ctx) Do(ctx)
if err != nil { if err != nil {
@ -349,7 +350,8 @@ func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int
NumOfFragments(0). // return all highting content on fragments NumOfFragments(0). // return all highting content on fragments
HighlighterType("fvh"), HighlighterType("fvh"),
). ).
Sort("repo_id", true). Sort("_score", false).
Sort("updated_at", true).
From(start).Size(pageSize). From(start).Size(pageSize).
Do(ctx) Do(ctx)
if err != nil { if err != nil {

3
release-notes/5416.md Normal file
View file

@ -0,0 +1,3 @@
feat: [commit](https://codeberg.org/forgejo/forgejo/commit/8178d6eaba64d05799fd3b62fa889bd13bee07c7) Code search results when using the bleve indexer are sorted by relevance.
fix: [commit](https://codeberg.org/forgejo/forgejo/commit/b496317b5a2aea970bc94ccf6fcde35cd417ec20) After migrating a repository that contains merged pull requests, the branch is missing and cannot be deleted.
fix: [commit](https://codeberg.org/forgejo/forgejo/commit/a226064711899da07d6b1455a68ef758f2f3e7e0) Forgejo Actions artifact v4 upload above 8MB.

View file

@ -123,6 +123,54 @@ func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chun
return chunksMap, nil return chunksMap, nil
} }
func listChunksByRunIDV4(st storage.ObjectStorage, runID, artifactID int64, blist *BlockList) ([]*chunkFileItem, error) {
storageDir := fmt.Sprintf("tmpv4%d", runID)
var chunks []*chunkFileItem
chunkMap := map[string]*chunkFileItem{}
dummy := &chunkFileItem{}
for _, name := range blist.Latest {
chunkMap[name] = dummy
}
if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
baseName := filepath.Base(fpath)
if !strings.HasPrefix(baseName, "block-") {
return nil
}
// when read chunks from storage, it only contains storage dir and basename,
// no matter the subdirectory setting in storage config
item := chunkFileItem{Path: storageDir + "/" + baseName, ArtifactID: artifactID}
var size int64
var b64chunkName string
if _, err := fmt.Sscanf(baseName, "block-%d-%d-%s", &item.RunID, &size, &b64chunkName); err != nil {
return fmt.Errorf("parse content range error: %v", err)
}
rchunkName, err := base64.URLEncoding.DecodeString(b64chunkName)
if err != nil {
return fmt.Errorf("failed to parse chunkName: %v", err)
}
chunkName := string(rchunkName)
item.End = item.Start + size - 1
if _, ok := chunkMap[chunkName]; ok {
chunkMap[chunkName] = &item
}
return nil
}); err != nil {
return nil, err
}
for i, name := range blist.Latest {
chunk, ok := chunkMap[name]
if !ok || chunk.Path == "" {
return nil, fmt.Errorf("missing Chunk (%d/%d): %s", i, len(blist.Latest), name)
}
chunks = append(chunks, chunk)
if i > 0 {
chunk.Start = chunkMap[blist.Latest[i-1]].End + 1
chunk.End += chunk.Start
}
}
return chunks, nil
}
func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID int64, artifactName string) error { func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID int64, artifactName string) error {
// read all db artifacts by name // read all db artifacts by name
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{ artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
@ -230,7 +278,7 @@ func mergeChunksForArtifact(ctx *ArtifactContext, chunks []*chunkFileItem, st st
rawChecksum := hash.Sum(nil) rawChecksum := hash.Sum(nil)
actualChecksum := hex.EncodeToString(rawChecksum) actualChecksum := hex.EncodeToString(rawChecksum)
if !strings.HasSuffix(checksum, actualChecksum) { if !strings.HasSuffix(checksum, actualChecksum) {
return fmt.Errorf("update artifact error checksum is invalid") return fmt.Errorf("update artifact error checksum is invalid %v vs %v", checksum, actualChecksum)
} }
} }

View file

@ -24,8 +24,15 @@ package actions
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block
// 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded // 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock
// 1.4. Unknown xml payload to Blobstorage (unauthenticated request), ignored for now // 1.4. BlockList xml payload to Blobstorage (unauthenticated request)
// Files of about 800MB are parallel in parallel and / or out of order, this file is needed to enshure the correct order
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList
// Request
// <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
// <BlockList>
// <Latest>blockId1</Latest>
// <Latest>blockId2</Latest>
// </BlockList>
// 1.5. FinalizeArtifact // 1.5. FinalizeArtifact
// Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact // Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact
// Request // Request
@ -82,6 +89,7 @@ import (
"crypto/hmac" "crypto/hmac"
"crypto/sha256" "crypto/sha256"
"encoding/base64" "encoding/base64"
"encoding/xml"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -153,31 +161,34 @@ func ArtifactsV4Routes(prefix string) *web.Route {
return m return m
} }
func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID int64) []byte { func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID, artifactID int64) []byte {
mac := hmac.New(sha256.New, setting.GetGeneralTokenSigningSecret()) mac := hmac.New(sha256.New, setting.GetGeneralTokenSigningSecret())
mac.Write([]byte(endp)) mac.Write([]byte(endp))
mac.Write([]byte(expires)) mac.Write([]byte(expires))
mac.Write([]byte(artifactName)) mac.Write([]byte(artifactName))
mac.Write([]byte(fmt.Sprint(taskID))) mac.Write([]byte(fmt.Sprint(taskID)))
mac.Write([]byte(fmt.Sprint(artifactID)))
return mac.Sum(nil) return mac.Sum(nil)
} }
func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID int64) string { func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID, artifactID int64) string {
expires := time.Now().Add(60 * time.Minute).Format("2006-01-02 15:04:05.999999999 -0700 MST") expires := time.Now().Add(60 * time.Minute).Format("2006-01-02 15:04:05.999999999 -0700 MST")
uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(r.prefix, "/") + uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(r.prefix, "/") +
"/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID) "/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID, artifactID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID) + "&artifactID=" + fmt.Sprint(artifactID)
return uploadURL return uploadURL
} }
func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (*actions.ActionTask, string, bool) { func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (*actions.ActionTask, string, bool) {
rawTaskID := ctx.Req.URL.Query().Get("taskID") rawTaskID := ctx.Req.URL.Query().Get("taskID")
rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
sig := ctx.Req.URL.Query().Get("sig") sig := ctx.Req.URL.Query().Get("sig")
expires := ctx.Req.URL.Query().Get("expires") expires := ctx.Req.URL.Query().Get("expires")
artifactName := ctx.Req.URL.Query().Get("artifactName") artifactName := ctx.Req.URL.Query().Get("artifactName")
dsig, _ := base64.URLEncoding.DecodeString(sig) dsig, _ := base64.URLEncoding.DecodeString(sig)
taskID, _ := strconv.ParseInt(rawTaskID, 10, 64) taskID, _ := strconv.ParseInt(rawTaskID, 10, 64)
artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
expecedsig := r.buildSignature(endp, expires, artifactName, taskID) expecedsig := r.buildSignature(endp, expires, artifactName, taskID, artifactID)
if !hmac.Equal(dsig, expecedsig) { if !hmac.Equal(dsig, expecedsig) {
log.Error("Error unauthorized") log.Error("Error unauthorized")
ctx.Error(http.StatusUnauthorized, "Error unauthorized") ctx.Error(http.StatusUnauthorized, "Error unauthorized")
@ -272,6 +283,8 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
return return
} }
artifact.ContentEncoding = ArtifactV4ContentEncoding artifact.ContentEncoding = ArtifactV4ContentEncoding
artifact.FileSize = 0
artifact.FileCompressedSize = 0
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil { if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
log.Error("Error UpdateArtifactByID: %v", err) log.Error("Error UpdateArtifactByID: %v", err)
ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID") ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
@ -280,7 +293,7 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
respData := CreateArtifactResponse{ respData := CreateArtifactResponse{
Ok: true, Ok: true,
SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, ctx.ActionTask.ID), SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID),
} }
r.sendProtbufBody(ctx, &respData) r.sendProtbufBody(ctx, &respData)
} }
@ -306,38 +319,77 @@ func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) {
comp := ctx.Req.URL.Query().Get("comp") comp := ctx.Req.URL.Query().Get("comp")
switch comp { switch comp {
case "block", "appendBlock": case "block", "appendBlock":
// get artifact by name blockid := ctx.Req.URL.Query().Get("blockid")
artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName) if blockid == "" {
if err != nil { // get artifact by name
log.Error("Error artifact not found: %v", err) artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
ctx.Error(http.StatusNotFound, "Error artifact not found") if err != nil {
return log.Error("Error artifact not found: %v", err)
} ctx.Error(http.StatusNotFound, "Error artifact not found")
return
}
if comp == "block" { _, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID)
artifact.FileSize = 0 if err != nil {
artifact.FileCompressedSize = 0 log.Error("Error runner api getting task: task is not running")
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
return
}
artifact.FileCompressedSize += ctx.Req.ContentLength
artifact.FileSize += ctx.Req.ContentLength
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
log.Error("Error UpdateArtifactByID: %v", err)
ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
return
}
} else {
_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/block-%d-%d-%s", task.Job.RunID, task.Job.RunID, ctx.Req.ContentLength, base64.URLEncoding.EncodeToString([]byte(blockid))), ctx.Req.Body, -1)
if err != nil {
log.Error("Error runner api getting task: task is not running")
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
return
}
} }
ctx.JSON(http.StatusCreated, "appended")
_, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID) case "blocklist":
rawArtifactID := ctx.Req.URL.Query().Get("artifactID")
artifactID, _ := strconv.ParseInt(rawArtifactID, 10, 64)
_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/%d-%d-blocklist", task.Job.RunID, task.Job.RunID, artifactID), ctx.Req.Body, -1)
if err != nil { if err != nil {
log.Error("Error runner api getting task: task is not running") log.Error("Error runner api getting task: task is not running")
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running") ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
return return
} }
artifact.FileCompressedSize += ctx.Req.ContentLength
artifact.FileSize += ctx.Req.ContentLength
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
log.Error("Error UpdateArtifactByID: %v", err)
ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
return
}
ctx.JSON(http.StatusCreated, "appended")
case "blocklist":
ctx.JSON(http.StatusCreated, "created") ctx.JSON(http.StatusCreated, "created")
} }
} }
type BlockList struct {
Latest []string `xml:"Latest"`
}
type Latest struct {
Value string `xml:",chardata"`
}
func (r *artifactV4Routes) readBlockList(runID, artifactID int64) (*BlockList, error) {
blockListName := fmt.Sprintf("tmpv4%d/%d-%d-blocklist", runID, runID, artifactID)
s, err := r.fs.Open(blockListName)
if err != nil {
return nil, err
}
xdec := xml.NewDecoder(s)
blockList := &BlockList{}
err = xdec.Decode(blockList)
delerr := r.fs.Delete(blockListName)
if delerr != nil {
log.Warn("Failed to delete blockList %s: %v", blockListName, delerr)
}
return blockList, err
}
func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) { func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
var req FinalizeArtifactRequest var req FinalizeArtifactRequest
@ -356,18 +408,34 @@ func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
ctx.Error(http.StatusNotFound, "Error artifact not found") ctx.Error(http.StatusNotFound, "Error artifact not found")
return return
} }
chunkMap, err := listChunksByRunID(r.fs, runID)
var chunks []*chunkFileItem
blockList, err := r.readBlockList(runID, artifact.ID)
if err != nil { if err != nil {
log.Error("Error merge chunks: %v", err) log.Warn("Failed to read BlockList, fallback to old behavior: %v", err)
ctx.Error(http.StatusInternalServerError, "Error merge chunks") chunkMap, err := listChunksByRunID(r.fs, runID)
return if err != nil {
} log.Error("Error merge chunks: %v", err)
chunks, ok := chunkMap[artifact.ID] ctx.Error(http.StatusInternalServerError, "Error merge chunks")
if !ok { return
log.Error("Error merge chunks") }
ctx.Error(http.StatusInternalServerError, "Error merge chunks") chunks, ok = chunkMap[artifact.ID]
return if !ok {
log.Error("Error merge chunks")
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
return
}
} else {
chunks, err = listChunksByRunIDV4(r.fs, runID, artifact.ID, blockList)
if err != nil {
log.Error("Error merge chunks: %v", err)
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
return
}
artifact.FileSize = chunks[len(chunks)-1].End + 1
artifact.FileCompressedSize = chunks[len(chunks)-1].End + 1
} }
checksum := "" checksum := ""
if req.Hash != nil { if req.Hash != nil {
checksum = req.Hash.Value checksum = req.Hash.Value
@ -468,7 +536,7 @@ func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
} }
} }
if respData.SignedUrl == "" { if respData.SignedUrl == "" {
respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, ctx.ActionTask.ID) respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, ctx.ActionTask.ID, artifact.ID)
} }
r.sendProtbufBody(ctx, &respData) r.sendProtbufBody(ctx, &respData)
} }

View file

@ -476,6 +476,7 @@ func issues(ctx *context.Context, milestoneID, projectID int64, isPullOption opt
ctx.Data["PosterID"] = posterID ctx.Data["PosterID"] = posterID
ctx.Data["IsFuzzy"] = isFuzzy ctx.Data["IsFuzzy"] = isFuzzy
ctx.Data["Keyword"] = keyword ctx.Data["Keyword"] = keyword
ctx.Data["IsShowClosed"] = isShowClosed
switch { switch {
case isShowClosed.Value(): case isShowClosed.Value():
ctx.Data["State"] = "closed" ctx.Data["State"] = "closed"

View file

@ -430,13 +430,12 @@ func DeleteBranch(ctx context.Context, doer *user_model.User, repo *repo_model.R
} }
rawBranch, err := git_model.GetBranch(ctx, repo.ID, branchName) rawBranch, err := git_model.GetBranch(ctx, repo.ID, branchName)
if err != nil { if err != nil && !git_model.IsErrBranchNotExist(err) {
return fmt.Errorf("GetBranch: %v", err) return fmt.Errorf("GetBranch: %v", err)
} }
if rawBranch.IsDeleted { // database branch record not exist or it's a deleted branch
return nil notExist := git_model.IsErrBranchNotExist(err) || rawBranch.IsDeleted
}
commit, err := gitRepo.GetBranchCommit(branchName) commit, err := gitRepo.GetBranchCommit(branchName)
if err != nil { if err != nil {
@ -444,8 +443,10 @@ func DeleteBranch(ctx context.Context, doer *user_model.User, repo *repo_model.R
} }
if err := db.WithTx(ctx, func(ctx context.Context) error { if err := db.WithTx(ctx, func(ctx context.Context) error {
if err := git_model.AddDeletedBranch(ctx, repo.ID, branchName, doer.ID); err != nil { if !notExist {
return err if err := git_model.AddDeletedBranch(ctx, repo.ID, branchName, doer.ID); err != nil {
return err
}
} }
return gitRepo.DeleteBranch(branchName, git.DeleteBranchOptions{ return gitRepo.DeleteBranch(branchName, git.DeleteBranchOptions{

View file

@ -12,6 +12,7 @@ import (
"net/url" "net/url"
"strconv" "strconv"
"strings" "strings"
"unicode/utf8"
webhook_model "code.gitea.io/gitea/models/webhook" webhook_model "code.gitea.io/gitea/models/webhook"
"code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/git"
@ -179,8 +180,14 @@ func (d discordConvertor) Push(p *api.PushPayload) (DiscordPayload, error) {
var text string var text string
// for each commit, generate attachment text // for each commit, generate attachment text
for i, commit := range p.Commits { for i, commit := range p.Commits {
text += fmt.Sprintf("[%s](%s) %s - %s", commit.ID[:7], commit.URL, // limit the commit message display to just the summary, otherwise it would be hard to read
strings.TrimRight(commit.Message, "\r\n"), commit.Author.Name) message := strings.TrimRight(strings.SplitN(commit.Message, "\n", 1)[0], "\r")
// a limit of 50 is set because GitHub does the same
if utf8.RuneCountInString(message) > 50 {
message = fmt.Sprintf("%.47s...", message)
}
text += fmt.Sprintf("[%s](%s) %s - %s", commit.ID[:7], commit.URL, message, commit.Author.Name)
// add linebreak to each commit but the last // add linebreak to each commit but the last
if i < len(p.Commits)-1 { if i < len(p.Commits)-1 {
text += "\n" text += "\n"

View file

@ -80,6 +80,20 @@ func TestDiscordPayload(t *testing.T) {
assert.Equal(t, p.Sender.AvatarURL, pl.Embeds[0].Author.IconURL) assert.Equal(t, p.Sender.AvatarURL, pl.Embeds[0].Author.IconURL)
}) })
t.Run("PushWithLongCommitMessage", func(t *testing.T) {
p := pushTestMultilineCommitMessagePayload()
pl, err := dc.Push(p)
require.NoError(t, err)
assert.Len(t, pl.Embeds, 1)
assert.Equal(t, "[test/repo:test] 2 new commits", pl.Embeds[0].Title)
assert.Equal(t, "[2020558](http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778) This is a commit summary ⚠️⚠️⚠️⚠️ containing 你好... - user1\n[2020558](http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778) This is a commit summary ⚠️⚠️⚠️⚠️ containing 你好... - user1", pl.Embeds[0].Description)
assert.Equal(t, p.Sender.UserName, pl.Embeds[0].Author.Name)
assert.Equal(t, setting.AppURL+p.Sender.UserName, pl.Embeds[0].Author.URL)
assert.Equal(t, p.Sender.AvatarURL, pl.Embeds[0].Author.IconURL)
})
t.Run("Issue", func(t *testing.T) { t.Run("Issue", func(t *testing.T) {
p := issueTestPayload() p := issueTestPayload()

View file

@ -64,9 +64,17 @@ func forkTestPayload() *api.ForkPayload {
} }
func pushTestPayload() *api.PushPayload { func pushTestPayload() *api.PushPayload {
return pushTestPayloadWithCommitMessage("commit message")
}
func pushTestMultilineCommitMessagePayload() *api.PushPayload {
return pushTestPayloadWithCommitMessage("This is a commit summary ⚠️⚠️⚠️⚠️ containing 你好 ⚠️⚠️️\n\nThis is the message body.")
}
func pushTestPayloadWithCommitMessage(message string) *api.PushPayload {
commit := &api.PayloadCommit{ commit := &api.PayloadCommit{
ID: "2020558fe2e34debb818a514715839cabd25e778", ID: "2020558fe2e34debb818a514715839cabd25e778",
Message: "commit message", Message: message,
URL: "http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778", URL: "http://localhost:3000/test/repo/commit/2020558fe2e34debb818a514715839cabd25e778",
Author: &api.PayloadUser{ Author: &api.PayloadUser{
Name: "user1", Name: "user1",

View file

@ -1,9 +1,9 @@
<div class="ui secondary filter menu"> <div class="ui secondary filter menu">
{{if not .Repository.IsArchived}} {{if not .Repository.IsArchived}}
<!-- Action Button --> <!-- Action Button -->
{{if .IsShowClosed}} {{if and .IsShowClosed.Has .IsShowClosed.Value}}
<button class="ui primary basic button issue-action" data-action="open" data-url="{{$.RepoLink}}/issues/status">{{ctx.Locale.Tr "repo.issues.action_open"}}</button> <button class="ui primary basic button issue-action" data-action="open" data-url="{{$.RepoLink}}/issues/status">{{ctx.Locale.Tr "repo.issues.action_open"}}</button>
{{else}} {{else if and .IsShowClosed.Has (not .IsShowClosed.Value)}}
<button class="ui red basic button issue-action" data-action="close" data-url="{{$.RepoLink}}/issues/status">{{ctx.Locale.Tr "repo.issues.action_close"}}</button> <button class="ui red basic button issue-action" data-action="close" data-url="{{$.RepoLink}}/issues/status">{{ctx.Locale.Tr "repo.issues.action_close"}}</button>
{{end}} {{end}}
{{if $.IsRepoAdmin}} {{if $.IsRepoAdmin}}

View file

@ -7,12 +7,14 @@ import (
"bytes" "bytes"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/xml"
"io" "io"
"net/http" "net/http"
"strings" "strings"
"testing" "testing"
"time" "time"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/routers/api/actions" "code.gitea.io/gitea/routers/api/actions"
actions_service "code.gitea.io/gitea/services/actions" actions_service "code.gitea.io/gitea/services/actions"
"code.gitea.io/gitea/tests" "code.gitea.io/gitea/tests"
@ -175,6 +177,134 @@ func TestActionsArtifactV4UploadSingleFileWithRetentionDays(t *testing.T) {
assert.True(t, finalizeResp.Ok) assert.True(t, finalizeResp.Ok)
} }
func TestActionsArtifactV4UploadSingleFileWithPotentialHarmfulBlockID(t *testing.T) {
defer tests.PrepareTestEnv(t)()
token, err := actions_service.CreateAuthorizationToken(48, 792, 193)
require.NoError(t, err)
// acquire artifact upload url
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact", toProtoJSON(&actions.CreateArtifactRequest{
Version: 4,
Name: "artifactWithPotentialHarmfulBlockID",
WorkflowRunBackendId: "792",
WorkflowJobRunBackendId: "193",
})).AddTokenAuth(token)
resp := MakeRequest(t, req, http.StatusOK)
var uploadResp actions.CreateArtifactResponse
protojson.Unmarshal(resp.Body.Bytes(), &uploadResp)
assert.True(t, uploadResp.Ok)
assert.Contains(t, uploadResp.SignedUploadUrl, "/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact")
// get upload urls
idx := strings.Index(uploadResp.SignedUploadUrl, "/twirp/")
url := uploadResp.SignedUploadUrl[idx:] + "&comp=block&blockid=%2f..%2fmyfile"
blockListURL := uploadResp.SignedUploadUrl[idx:] + "&comp=blocklist"
// upload artifact chunk
body := strings.Repeat("A", 1024)
req = NewRequestWithBody(t, "PUT", url, strings.NewReader(body))
MakeRequest(t, req, http.StatusCreated)
// verify that the exploit didn't work
_, err = storage.Actions.Stat("myfile")
require.Error(t, err)
// upload artifact blockList
blockList := &actions.BlockList{
Latest: []string{
"/../myfile",
},
}
rawBlockList, err := xml.Marshal(blockList)
require.NoError(t, err)
req = NewRequestWithBody(t, "PUT", blockListURL, bytes.NewReader(rawBlockList))
MakeRequest(t, req, http.StatusCreated)
t.Logf("Create artifact confirm")
sha := sha256.Sum256([]byte(body))
// confirm artifact upload
req = NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact", toProtoJSON(&actions.FinalizeArtifactRequest{
Name: "artifactWithPotentialHarmfulBlockID",
Size: 1024,
Hash: wrapperspb.String("sha256:" + hex.EncodeToString(sha[:])),
WorkflowRunBackendId: "792",
WorkflowJobRunBackendId: "193",
})).
AddTokenAuth(token)
resp = MakeRequest(t, req, http.StatusOK)
var finalizeResp actions.FinalizeArtifactResponse
protojson.Unmarshal(resp.Body.Bytes(), &finalizeResp)
assert.True(t, finalizeResp.Ok)
}
func TestActionsArtifactV4UploadSingleFileWithChunksOutOfOrder(t *testing.T) {
defer tests.PrepareTestEnv(t)()
token, err := actions_service.CreateAuthorizationToken(48, 792, 193)
require.NoError(t, err)
// acquire artifact upload url
req := NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact", toProtoJSON(&actions.CreateArtifactRequest{
Version: 4,
Name: "artifactWithChunksOutOfOrder",
WorkflowRunBackendId: "792",
WorkflowJobRunBackendId: "193",
})).AddTokenAuth(token)
resp := MakeRequest(t, req, http.StatusOK)
var uploadResp actions.CreateArtifactResponse
protojson.Unmarshal(resp.Body.Bytes(), &uploadResp)
assert.True(t, uploadResp.Ok)
assert.Contains(t, uploadResp.SignedUploadUrl, "/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact")
// get upload urls
idx := strings.Index(uploadResp.SignedUploadUrl, "/twirp/")
block1URL := uploadResp.SignedUploadUrl[idx:] + "&comp=block&blockid=block1"
block2URL := uploadResp.SignedUploadUrl[idx:] + "&comp=block&blockid=block2"
blockListURL := uploadResp.SignedUploadUrl[idx:] + "&comp=blocklist"
// upload artifact chunks
bodyb := strings.Repeat("B", 1024)
req = NewRequestWithBody(t, "PUT", block2URL, strings.NewReader(bodyb))
MakeRequest(t, req, http.StatusCreated)
bodya := strings.Repeat("A", 1024)
req = NewRequestWithBody(t, "PUT", block1URL, strings.NewReader(bodya))
MakeRequest(t, req, http.StatusCreated)
// upload artifact blockList
blockList := &actions.BlockList{
Latest: []string{
"block1",
"block2",
},
}
rawBlockList, err := xml.Marshal(blockList)
require.NoError(t, err)
req = NewRequestWithBody(t, "PUT", blockListURL, bytes.NewReader(rawBlockList))
MakeRequest(t, req, http.StatusCreated)
t.Logf("Create artifact confirm")
sha := sha256.Sum256([]byte(bodya + bodyb))
// confirm artifact upload
req = NewRequestWithBody(t, "POST", "/twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact", toProtoJSON(&actions.FinalizeArtifactRequest{
Name: "artifactWithChunksOutOfOrder",
Size: 2048,
Hash: wrapperspb.String("sha256:" + hex.EncodeToString(sha[:])),
WorkflowRunBackendId: "792",
WorkflowJobRunBackendId: "193",
})).
AddTokenAuth(token)
resp = MakeRequest(t, req, http.StatusOK)
var finalizeResp actions.FinalizeArtifactResponse
protojson.Unmarshal(resp.Body.Bytes(), &finalizeResp)
assert.True(t, finalizeResp.Ok)
}
func TestActionsArtifactV4DownloadSingle(t *testing.T) { func TestActionsArtifactV4DownloadSingle(t *testing.T) {
defer tests.PrepareTestEnv(t)() defer tests.PrepareTestEnv(t)()

View file

@ -78,7 +78,6 @@ const sfc = {
searchURL() { searchURL() {
return `${this.subUrl}/repo/search?sort=updated&order=desc&uid=${this.uid}&team_id=${this.teamId}&q=${this.searchQuery return `${this.subUrl}/repo/search?sort=updated&order=desc&uid=${this.uid}&team_id=${this.teamId}&q=${this.searchQuery
}&page=${this.page}&limit=${this.searchLimit}&mode=${this.repoTypes[this.reposFilter].searchMode }&page=${this.page}&limit=${this.searchLimit}&mode=${this.repoTypes[this.reposFilter].searchMode
}${this.reposFilter !== 'all' ? '&exclusive=1' : ''
}${this.archivedFilter === 'archived' ? '&archived=true' : ''}${this.archivedFilter === 'unarchived' ? '&archived=false' : '' }${this.archivedFilter === 'archived' ? '&archived=true' : ''}${this.archivedFilter === 'unarchived' ? '&archived=false' : ''
}${this.privateFilter === 'private' ? '&is_private=true' : ''}${this.privateFilter === 'public' ? '&is_private=false' : '' }${this.privateFilter === 'private' ? '&is_private=true' : ''}${this.privateFilter === 'public' ? '&is_private=false' : ''
}`; }`;