2018-10-18 06:23:05 -05:00
|
|
|
// Copyright 2018 The Gitea Authors. All rights reserved.
|
2022-11-27 13:20:29 -05:00
|
|
|
// SPDX-License-Identifier: MIT
|
2018-10-18 06:23:05 -05:00
|
|
|
|
|
|
|
package ui
|
|
|
|
|
|
|
|
import (
|
2022-11-19 03:12:33 -05:00
|
|
|
"context"
|
|
|
|
|
2022-08-24 21:31:57 -05:00
|
|
|
activities_model "code.gitea.io/gitea/models/activities"
|
2021-12-12 10:48:20 -05:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2022-06-13 04:37:59 -05:00
|
|
|
issues_model "code.gitea.io/gitea/models/issues"
|
2021-12-09 20:27:50 -05:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 04:49:20 -05:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2022-10-12 00:18:26 -05:00
|
|
|
"code.gitea.io/gitea/modules/container"
|
2020-02-15 19:29:43 -05:00
|
|
|
"code.gitea.io/gitea/modules/graceful"
|
2018-10-18 06:23:05 -05:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
"code.gitea.io/gitea/modules/notification/base"
|
2020-02-15 19:29:43 -05:00
|
|
|
"code.gitea.io/gitea/modules/queue"
|
2018-10-18 06:23:05 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
notificationService struct {
|
2019-01-13 09:42:55 -05:00
|
|
|
base.NullNotifier
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 06:49:59 -05:00
|
|
|
issueQueue *queue.WorkerPoolQueue[issueNotificationOpts]
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
issueNotificationOpts struct {
|
2020-02-18 03:52:57 -05:00
|
|
|
IssueID int64
|
|
|
|
CommentID int64
|
|
|
|
NotificationAuthorID int64
|
2020-04-06 11:33:34 -05:00
|
|
|
ReceiverID int64 // 0 -- ALL Watcher
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2022-01-20 12:46:10 -05:00
|
|
|
var _ base.Notifier = ¬ificationService{}
|
2018-10-18 06:23:05 -05:00
|
|
|
|
|
|
|
// NewNotifier create a new notificationService notifier
|
|
|
|
func NewNotifier() base.Notifier {
|
2020-02-15 19:29:43 -05:00
|
|
|
ns := ¬ificationService{}
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 06:49:59 -05:00
|
|
|
ns.issueQueue = queue.CreateSimpleQueue("notification-service", handler)
|
2020-02-15 19:29:43 -05:00
|
|
|
return ns
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 06:49:59 -05:00
|
|
|
func handler(items ...issueNotificationOpts) []issueNotificationOpts {
|
|
|
|
for _, opts := range items {
|
2022-08-24 21:31:57 -05:00
|
|
|
if err := activities_model.CreateOrUpdateIssueNotifications(opts.IssueID, opts.CommentID, opts.NotificationAuthorID, opts.ReceiverID); err != nil {
|
2019-06-12 14:41:28 -05:00
|
|
|
log.Error("Was unable to create issue notification: %v", err)
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
}
|
2022-01-22 16:22:14 -05:00
|
|
|
return nil
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
|
2020-02-15 19:29:43 -05:00
|
|
|
func (ns *notificationService) Run() {
|
Rewrite queue (#24505)
# ⚠️ Breaking
Many deprecated queue config options are removed (actually, they should
have been removed in 1.18/1.19).
If you see the fatal message when starting Gitea: "Please update your
app.ini to remove deprecated config options", please follow the error
messages to remove these options from your app.ini.
Example:
```
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options
```
Many options in `[queue]` are are dropped, including:
`WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`,
`BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed
from app.ini.
# The problem
The old queue package has some legacy problems:
* complexity: I doubt few people could tell how it works.
* maintainability: Too many channels and mutex/cond are mixed together,
too many different structs/interfaces depends each other.
* stability: due to the complexity & maintainability, sometimes there
are strange bugs and difficult to debug, and some code doesn't have test
(indeed some code is difficult to test because a lot of things are mixed
together).
* general applicability: although it is called "queue", its behavior is
not a well-known queue.
* scalability: it doesn't seem easy to make it work with a cluster
without breaking its behaviors.
It came from some very old code to "avoid breaking", however, its
technical debt is too heavy now. It's a good time to introduce a better
"queue" package.
# The new queue package
It keeps using old config and concept as much as possible.
* It only contains two major kinds of concepts:
* The "base queue": channel, levelqueue, redis
* They have the same abstraction, the same interface, and they are
tested by the same testing code.
* The "WokerPoolQueue", it uses the "base queue" to provide "worker
pool" function, calls the "handler" to process the data in the base
queue.
* The new code doesn't do "PushBack"
* Think about a queue with many workers, the "PushBack" can't guarantee
the order for re-queued unhandled items, so in new code it just does
"normal push"
* The new code doesn't do "pause/resume"
* The "pause/resume" was designed to handle some handler's failure: eg:
document indexer (elasticsearch) is down
* If a queue is paused for long time, either the producers blocks or the
new items are dropped.
* The new code doesn't do such "pause/resume" trick, it's not a common
queue's behavior and it doesn't help much.
* If there are unhandled items, the "push" function just blocks for a
few seconds and then re-queue them and retry.
* The new code doesn't do "worker booster"
* Gitea's queue's handlers are light functions, the cost is only the
go-routine, so it doesn't make sense to "boost" them.
* The new code only use "max worker number" to limit the concurrent
workers.
* The new "Push" never blocks forever
* Instead of creating more and more blocking goroutines, return an error
is more friendly to the server and to the end user.
There are more details in code comments: eg: the "Flush" problem, the
strange "code.index" hanging problem, the "immediate" queue problem.
Almost ready for review.
TODO:
* [x] add some necessary comments during review
* [x] add some more tests if necessary
* [x] update documents and config options
* [x] test max worker / active worker
* [x] re-run the CI tasks to see whether any test is flaky
* [x] improve the `handleOldLengthConfiguration` to provide more
friendly messages
* [x] fine tune default config values (eg: length?)
## Code coverage:
![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 06:49:59 -05:00
|
|
|
go graceful.GetManager().RunWithShutdownFns(ns.issueQueue.Run)
|
2020-02-15 19:29:43 -05:00
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyCreateIssueComment(ctx context.Context, doer *user_model.User, repo *repo_model.Repository,
|
2022-06-13 04:37:59 -05:00
|
|
|
issue *issues_model.Issue, comment *issues_model.Comment, mentions []*user_model.User,
|
2022-02-23 15:16:07 -05:00
|
|
|
) {
|
2022-01-20 12:46:10 -05:00
|
|
|
opts := issueNotificationOpts{
|
2020-02-18 03:52:57 -05:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
2019-11-12 03:33:34 -05:00
|
|
|
}
|
|
|
|
if comment != nil {
|
2020-02-18 03:52:57 -05:00
|
|
|
opts.CommentID = comment.ID
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
2020-02-15 19:29:43 -05:00
|
|
|
_ = ns.issueQueue.Push(opts)
|
2021-01-02 12:04:02 -05:00
|
|
|
for _, mention := range mentions {
|
2022-01-20 12:46:10 -05:00
|
|
|
opts := issueNotificationOpts{
|
2021-01-02 12:04:02 -05:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
ReceiverID: mention.ID,
|
|
|
|
}
|
|
|
|
if comment != nil {
|
|
|
|
opts.CommentID = comment.ID
|
|
|
|
}
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyNewIssue(ctx context.Context, issue *issues_model.Issue, mentions []*user_model.User) {
|
2020-02-15 19:29:43 -05:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
2020-02-18 03:52:57 -05:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: issue.Poster.ID,
|
2020-02-15 19:29:43 -05:00
|
|
|
})
|
2021-01-02 12:04:02 -05:00
|
|
|
for _, mention := range mentions {
|
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: issue.Poster.ID,
|
|
|
|
ReceiverID: mention.ID,
|
|
|
|
})
|
|
|
|
}
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
|
2023-01-24 23:47:53 -05:00
|
|
|
func (ns *notificationService) NotifyIssueChangeStatus(ctx context.Context, doer *user_model.User, commitID string, issue *issues_model.Issue, actionComment *issues_model.Comment, isClosed bool) {
|
2020-02-15 19:29:43 -05:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
2020-02-18 03:52:57 -05:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
2023-01-28 06:16:46 -05:00
|
|
|
CommentID: actionComment.ID,
|
2020-02-15 19:29:43 -05:00
|
|
|
})
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyIssueChangeTitle(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldTitle string) {
|
|
|
|
if err := issue.LoadPullRequest(ctx); err != nil {
|
2021-06-22 23:14:22 -05:00
|
|
|
log.Error("issue.LoadPullRequest: %v", err)
|
|
|
|
return
|
|
|
|
}
|
2022-06-13 04:37:59 -05:00
|
|
|
if issue.IsPull && issues_model.HasWorkInProgressPrefix(oldTitle) && !issue.PullRequest.IsWorkInProgress() {
|
2021-06-22 23:14:22 -05:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyMergePullRequest(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
|
2020-02-15 19:29:43 -05:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
2020-02-18 03:52:57 -05:00
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
2020-02-15 19:29:43 -05:00
|
|
|
})
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyAutoMergePullRequest(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
|
|
|
|
ns.NotifyMergePullRequest(ctx, doer, pr)
|
2022-11-03 10:49:00 -05:00
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyNewPullRequest(ctx context.Context, pr *issues_model.PullRequest, mentions []*user_model.User) {
|
|
|
|
if err := pr.LoadIssue(ctx); err != nil {
|
2020-02-15 19:29:43 -05:00
|
|
|
log.Error("Unable to load issue: %d for pr: %d: Error: %v", pr.IssueID, pr.ID, err)
|
|
|
|
return
|
|
|
|
}
|
2022-10-12 00:18:26 -05:00
|
|
|
toNotify := make(container.Set[int64], 32)
|
2022-11-19 03:12:33 -05:00
|
|
|
repoWatchers, err := repo_model.GetRepoWatchersIDs(ctx, pr.Issue.RepoID)
|
2021-06-22 23:14:22 -05:00
|
|
|
if err != nil {
|
|
|
|
log.Error("GetRepoWatchersIDs: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, id := range repoWatchers {
|
2022-10-12 00:18:26 -05:00
|
|
|
toNotify.Add(id)
|
2021-06-22 23:14:22 -05:00
|
|
|
}
|
2022-11-19 03:12:33 -05:00
|
|
|
issueParticipants, err := issues_model.GetParticipantsIDsByIssueID(ctx, pr.IssueID)
|
2021-06-22 23:14:22 -05:00
|
|
|
if err != nil {
|
|
|
|
log.Error("GetParticipantsIDsByIssueID: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, id := range issueParticipants {
|
2022-10-12 00:18:26 -05:00
|
|
|
toNotify.Add(id)
|
2021-06-22 23:14:22 -05:00
|
|
|
}
|
|
|
|
delete(toNotify, pr.Issue.PosterID)
|
2021-01-02 12:04:02 -05:00
|
|
|
for _, mention := range mentions {
|
2022-10-12 00:18:26 -05:00
|
|
|
toNotify.Add(mention.ID)
|
2021-06-22 23:14:22 -05:00
|
|
|
}
|
|
|
|
for receiverID := range toNotify {
|
2021-01-02 12:04:02 -05:00
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: pr.Issue.PosterID,
|
2021-06-22 23:14:22 -05:00
|
|
|
ReceiverID: receiverID,
|
2021-01-02 12:04:02 -05:00
|
|
|
})
|
|
|
|
}
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyPullRequestReview(ctx context.Context, pr *issues_model.PullRequest, r *issues_model.Review, c *issues_model.Comment, mentions []*user_model.User) {
|
2022-01-20 12:46:10 -05:00
|
|
|
opts := issueNotificationOpts{
|
2020-02-18 03:52:57 -05:00
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: r.Reviewer.ID,
|
2019-11-12 03:33:34 -05:00
|
|
|
}
|
|
|
|
if c != nil {
|
2020-02-18 03:52:57 -05:00
|
|
|
opts.CommentID = c.ID
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
2020-02-15 19:29:43 -05:00
|
|
|
_ = ns.issueQueue.Push(opts)
|
2021-01-02 12:04:02 -05:00
|
|
|
for _, mention := range mentions {
|
2022-01-20 12:46:10 -05:00
|
|
|
opts := issueNotificationOpts{
|
2021-01-02 12:04:02 -05:00
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: r.Reviewer.ID,
|
|
|
|
ReceiverID: mention.ID,
|
|
|
|
}
|
|
|
|
if c != nil {
|
|
|
|
opts.CommentID = c.ID
|
|
|
|
}
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyPullRequestCodeComment(ctx context.Context, pr *issues_model.PullRequest, c *issues_model.Comment, mentions []*user_model.User) {
|
2021-01-02 12:04:02 -05:00
|
|
|
for _, mention := range mentions {
|
|
|
|
_ = ns.issueQueue.Push(issueNotificationOpts{
|
|
|
|
IssueID: pr.Issue.ID,
|
|
|
|
NotificationAuthorID: c.Poster.ID,
|
|
|
|
CommentID: c.ID,
|
|
|
|
ReceiverID: mention.ID,
|
|
|
|
})
|
|
|
|
}
|
2018-10-18 06:23:05 -05:00
|
|
|
}
|
2020-04-06 11:33:34 -05:00
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyPullRequestPushCommits(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest, comment *issues_model.Comment) {
|
2022-01-20 12:46:10 -05:00
|
|
|
opts := issueNotificationOpts{
|
2020-05-20 07:47:24 -05:00
|
|
|
IssueID: pr.IssueID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
CommentID: comment.ID,
|
|
|
|
}
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyPullReviewDismiss(ctx context.Context, doer *user_model.User, review *issues_model.Review, comment *issues_model.Comment) {
|
2022-01-20 12:46:10 -05:00
|
|
|
opts := issueNotificationOpts{
|
2021-02-11 12:32:25 -05:00
|
|
|
IssueID: review.IssueID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
CommentID: comment.ID,
|
|
|
|
}
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyIssueChangeAssignee(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, assignee *user_model.User, removed bool, comment *issues_model.Comment) {
|
2022-02-18 01:06:57 -05:00
|
|
|
if !removed && doer.ID != assignee.ID {
|
2022-01-20 12:46:10 -05:00
|
|
|
opts := issueNotificationOpts{
|
2020-04-06 11:33:34 -05:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
ReceiverID: assignee.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
if comment != nil {
|
|
|
|
opts.CommentID = comment.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyPullReviewRequest(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, reviewer *user_model.User, isRequest bool, comment *issues_model.Comment) {
|
2020-04-06 11:33:34 -05:00
|
|
|
if isRequest {
|
2022-01-20 12:46:10 -05:00
|
|
|
opts := issueNotificationOpts{
|
2020-04-06 11:33:34 -05:00
|
|
|
IssueID: issue.ID,
|
|
|
|
NotificationAuthorID: doer.ID,
|
|
|
|
ReceiverID: reviewer.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
if comment != nil {
|
|
|
|
opts.CommentID = comment.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
_ = ns.issueQueue.Push(opts)
|
|
|
|
}
|
|
|
|
}
|
2021-02-28 19:47:30 -05:00
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
func (ns *notificationService) NotifyRepoPendingTransfer(ctx context.Context, doer, newOwner *user_model.User, repo *repo_model.Repository) {
|
2023-01-07 20:34:58 -05:00
|
|
|
err := db.WithTx(ctx, func(ctx context.Context) error {
|
2022-11-19 03:12:33 -05:00
|
|
|
return activities_model.CreateRepoTransferNotification(ctx, doer, newOwner, repo)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
log.Error("CreateRepoTransferNotification: %v", err)
|
2021-02-28 19:47:30 -05:00
|
|
|
}
|
|
|
|
}
|