Add a new table issue_index to store the max issue index so that issue could be deleted with no duplicated index (#15599)
* Add a new table issue_index to store the max issue index so that issue could be deleted with no duplicated index * Fix pull index * Add tests for concurrent creating issues * Fix lint * Fix tests * Fix postgres test * Add test for migration v180 * Rename wrong test file name Co-authored-by: 6543 <6543@obermui.de> Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
		
							parent
							
								
									a005265718
								
							
						
					
					
						commit
						0393a57511
					
				
					 14 changed files with 354 additions and 82 deletions
				
			
		|  | @ -152,7 +152,7 @@ | ||||||
| - | - | ||||||
|   id: 13 |   id: 13 | ||||||
|   repo_id: 50 |   repo_id: 50 | ||||||
|   index: 0 |   index: 1 | ||||||
|   poster_id: 2 |   poster_id: 2 | ||||||
|   name: issue in active repo |   name: issue in active repo | ||||||
|   content: we'll be testing github issue 13171 with this. |   content: we'll be testing github issue 13171 with this. | ||||||
|  | @ -164,7 +164,7 @@ | ||||||
| - | - | ||||||
|   id: 14 |   id: 14 | ||||||
|   repo_id: 51 |   repo_id: 51 | ||||||
|   index: 0 |   index: 1 | ||||||
|   poster_id: 2 |   poster_id: 2 | ||||||
|   name: issue in archived repo |   name: issue in archived repo | ||||||
|   content: we'll be testing github issue 13171 with this. |   content: we'll be testing github issue 13171 with this. | ||||||
|  |  | ||||||
							
								
								
									
										24
									
								
								models/fixtures/issue_index.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								models/fixtures/issue_index.yml
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,24 @@ | ||||||
|  | - | ||||||
|  |   group_id: 1 | ||||||
|  |   max_index: 5 | ||||||
|  | - | ||||||
|  |   group_id: 2 | ||||||
|  |   max_index: 2 | ||||||
|  | - | ||||||
|  |   group_id: 3 | ||||||
|  |   max_index: 2 | ||||||
|  | - | ||||||
|  |   group_id: 10 | ||||||
|  |   max_index: 1 | ||||||
|  | - | ||||||
|  |   group_id: 48 | ||||||
|  |   max_index: 1 | ||||||
|  | - | ||||||
|  |   group_id: 42 | ||||||
|  |   max_index: 1 | ||||||
|  | - | ||||||
|  |   group_id: 50 | ||||||
|  |   max_index: 1 | ||||||
|  | - | ||||||
|  |   group_id: 51 | ||||||
|  |   max_index: 1 | ||||||
							
								
								
									
										113
									
								
								models/index.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										113
									
								
								models/index.go
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,113 @@ | ||||||
|  | // Copyright 2021 The Gitea Authors. All rights reserved.
 | ||||||
|  | // Use of this source code is governed by a MIT-style
 | ||||||
|  | // license that can be found in the LICENSE file.
 | ||||||
|  | 
 | ||||||
|  | package models | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | 
 | ||||||
|  | 	"code.gitea.io/gitea/modules/setting" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // ResourceIndex represents a resource index which could be used as issue/release and others
 | ||||||
|  | // We can create different tables i.e. issue_index, release_index and etc.
 | ||||||
|  | type ResourceIndex struct { | ||||||
|  | 	GroupID  int64 `xorm:"unique"` | ||||||
|  | 	MaxIndex int64 `xorm:"index"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IssueIndex represents the issue index table
 | ||||||
|  | type IssueIndex ResourceIndex | ||||||
|  | 
 | ||||||
|  | // upsertResourceIndex the function will not return until it acquires the lock or receives an error.
 | ||||||
|  | func upsertResourceIndex(e Engine, tableName string, groupID int64) (err error) { | ||||||
|  | 	// An atomic UPSERT operation (INSERT/UPDATE) is the only operation
 | ||||||
|  | 	// that ensures that the key is actually locked.
 | ||||||
|  | 	switch { | ||||||
|  | 	case setting.Database.UseSQLite3 || setting.Database.UsePostgreSQL: | ||||||
|  | 		_, err = e.Exec(fmt.Sprintf("INSERT INTO %s (group_id, max_index) "+ | ||||||
|  | 			"VALUES (?,1) ON CONFLICT (group_id) DO UPDATE SET max_index = %s.max_index+1", | ||||||
|  | 			tableName, tableName), groupID) | ||||||
|  | 	case setting.Database.UseMySQL: | ||||||
|  | 		_, err = e.Exec(fmt.Sprintf("INSERT INTO %s (group_id, max_index) "+ | ||||||
|  | 			"VALUES (?,1) ON DUPLICATE KEY UPDATE max_index = max_index+1", tableName), | ||||||
|  | 			groupID) | ||||||
|  | 	case setting.Database.UseMSSQL: | ||||||
|  | 		// https://weblogs.sqlteam.com/dang/2009/01/31/upsert-race-condition-with-merge/
 | ||||||
|  | 		_, err = e.Exec(fmt.Sprintf("MERGE %s WITH (HOLDLOCK) as target "+ | ||||||
|  | 			"USING (SELECT ? AS group_id) AS src "+ | ||||||
|  | 			"ON src.group_id = target.group_id "+ | ||||||
|  | 			"WHEN MATCHED THEN UPDATE SET target.max_index = target.max_index+1 "+ | ||||||
|  | 			"WHEN NOT MATCHED THEN INSERT (group_id, max_index) "+ | ||||||
|  | 			"VALUES (src.group_id, 1);", tableName), | ||||||
|  | 			groupID) | ||||||
|  | 	default: | ||||||
|  | 		return fmt.Errorf("database type not supported") | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	// ErrResouceOutdated represents an error when request resource outdated
 | ||||||
|  | 	ErrResouceOutdated = errors.New("resource outdated") | ||||||
|  | 	// ErrGetResourceIndexFailed represents an error when resource index retries 3 times
 | ||||||
|  | 	ErrGetResourceIndexFailed = errors.New("get resource index failed") | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	maxDupIndexAttempts = 3 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // GetNextResourceIndex retried 3 times to generate a resource index
 | ||||||
|  | func GetNextResourceIndex(tableName string, groupID int64) (int64, error) { | ||||||
|  | 	for i := 0; i < maxDupIndexAttempts; i++ { | ||||||
|  | 		idx, err := getNextResourceIndex(tableName, groupID) | ||||||
|  | 		if err == ErrResouceOutdated { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		if err != nil { | ||||||
|  | 			return 0, err | ||||||
|  | 		} | ||||||
|  | 		return idx, nil | ||||||
|  | 	} | ||||||
|  | 	return 0, ErrGetResourceIndexFailed | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // deleteResouceIndex delete resource index
 | ||||||
|  | func deleteResouceIndex(e Engine, tableName string, groupID int64) error { | ||||||
|  | 	_, err := e.Exec(fmt.Sprintf("DELETE FROM %s WHERE group_id=?", tableName), groupID) | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getNextResourceIndex return the next index
 | ||||||
|  | func getNextResourceIndex(tableName string, groupID int64) (int64, error) { | ||||||
|  | 	sess := x.NewSession() | ||||||
|  | 	defer sess.Close() | ||||||
|  | 	if err := sess.Begin(); err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	var preIdx int64 | ||||||
|  | 	_, err := sess.SQL(fmt.Sprintf("SELECT max_index FROM %s WHERE group_id = ?", tableName), groupID).Get(&preIdx) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := upsertResourceIndex(sess, tableName, groupID); err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	var curIdx int64 | ||||||
|  | 	has, err := sess.SQL(fmt.Sprintf("SELECT max_index FROM %s WHERE group_id = ? AND max_index=?", tableName), groupID, preIdx+1).Get(&curIdx) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	if !has { | ||||||
|  | 		return 0, ErrResouceOutdated | ||||||
|  | 	} | ||||||
|  | 	if err := sess.Commit(); err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	return curIdx, nil | ||||||
|  | } | ||||||
							
								
								
									
										27
									
								
								models/index_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								models/index_test.go
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,27 @@ | ||||||
|  | // Copyright 2021 The Gitea Authors. All rights reserved.
 | ||||||
|  | // Use of this source code is governed by a MIT-style
 | ||||||
|  | // license that can be found in the LICENSE file.
 | ||||||
|  | 
 | ||||||
|  | package models | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 	"sync" | ||||||
|  | 	"testing" | ||||||
|  | 
 | ||||||
|  | 	"github.com/stretchr/testify/assert" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func TestResourceIndex(t *testing.T) { | ||||||
|  | 	assert.NoError(t, PrepareTestDatabase()) | ||||||
|  | 
 | ||||||
|  | 	var wg sync.WaitGroup | ||||||
|  | 	for i := 0; i < 100; i++ { | ||||||
|  | 		wg.Add(1) | ||||||
|  | 		go func(i int) { | ||||||
|  | 			testInsertIssue(t, fmt.Sprintf("issue %d", i+1), "my issue", 0) | ||||||
|  | 			wg.Done() | ||||||
|  | 		}(i) | ||||||
|  | 	} | ||||||
|  | 	wg.Wait() | ||||||
|  | } | ||||||
|  | @ -80,7 +80,6 @@ var ( | ||||||
| const ( | const ( | ||||||
| 	issueTasksRegexpStr     = `(^\s*[-*]\s\[[\sxX]\]\s.)|(\n\s*[-*]\s\[[\sxX]\]\s.)` | 	issueTasksRegexpStr     = `(^\s*[-*]\s\[[\sxX]\]\s.)|(\n\s*[-*]\s\[[\sxX]\]\s.)` | ||||||
| 	issueTasksDoneRegexpStr = `(^\s*[-*]\s\[[xX]\]\s.)|(\n\s*[-*]\s\[[xX]\]\s.)` | 	issueTasksDoneRegexpStr = `(^\s*[-*]\s\[[xX]\]\s.)|(\n\s*[-*]\s\[[xX]\]\s.)` | ||||||
| 	issueMaxDupIndexAttempts = 3 |  | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func init() { | func init() { | ||||||
|  | @ -896,21 +895,17 @@ func newIssue(e *xorm.Session, doer *User, opts NewIssueOptions) (err error) { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Milestone validation should happen before insert actual object.
 | 	if opts.Issue.Index <= 0 { | ||||||
| 	if _, err := e.SetExpr("`index`", "coalesce(MAX(`index`),0)+1"). | 		return fmt.Errorf("no issue index provided") | ||||||
| 		Where("repo_id=?", opts.Issue.RepoID). | 	} | ||||||
| 		Insert(opts.Issue); err != nil { | 	if opts.Issue.ID > 0 { | ||||||
| 		return ErrNewIssueInsert{err} | 		return fmt.Errorf("issue exist") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	inserted, err := getIssueByID(e, opts.Issue.ID) | 	if _, err := e.Insert(opts.Issue); err != nil { | ||||||
| 	if err != nil { |  | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Patch Index with the value calculated by the database
 |  | ||||||
| 	opts.Issue.Index = inserted.Index |  | ||||||
| 
 |  | ||||||
| 	if opts.Issue.MilestoneID > 0 { | 	if opts.Issue.MilestoneID > 0 { | ||||||
| 		if _, err = e.Exec("UPDATE `milestone` SET num_issues=num_issues+1 WHERE id=?", opts.Issue.MilestoneID); err != nil { | 		if _, err = e.Exec("UPDATE `milestone` SET num_issues=num_issues+1 WHERE id=?", opts.Issue.MilestoneID); err != nil { | ||||||
| 			return err | 			return err | ||||||
|  | @ -987,24 +982,13 @@ func newIssue(e *xorm.Session, doer *User, opts NewIssueOptions) (err error) { | ||||||
| 
 | 
 | ||||||
| // NewIssue creates new issue with labels for repository.
 | // NewIssue creates new issue with labels for repository.
 | ||||||
| func NewIssue(repo *Repository, issue *Issue, labelIDs []int64, uuids []string) (err error) { | func NewIssue(repo *Repository, issue *Issue, labelIDs []int64, uuids []string) (err error) { | ||||||
| 	// Retry several times in case INSERT fails due to duplicate key for (repo_id, index); see #7887
 | 	idx, err := GetNextResourceIndex("issue_index", repo.ID) | ||||||
| 	i := 0 | 	if err != nil { | ||||||
| 	for { | 		return fmt.Errorf("generate issue index failed: %v", err) | ||||||
| 		if err = newIssueAttempt(repo, issue, labelIDs, uuids); err == nil { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 		if !IsErrNewIssueInsert(err) { |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 		if i++; i == issueMaxDupIndexAttempts { |  | ||||||
| 			break |  | ||||||
| 		} |  | ||||||
| 		log.Error("NewIssue: error attempting to insert the new issue; will retry. Original error: %v", err) |  | ||||||
| 	} |  | ||||||
| 	return fmt.Errorf("NewIssue: too many errors attempting to insert the new issue. Last error was: %v", err) |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| func newIssueAttempt(repo *Repository, issue *Issue, labelIDs []int64, uuids []string) (err error) { | 	issue.Index = idx | ||||||
|  | 
 | ||||||
| 	sess := x.NewSession() | 	sess := x.NewSession() | ||||||
| 	defer sess.Close() | 	defer sess.Close() | ||||||
| 	if err = sess.Begin(); err != nil { | 	if err = sess.Begin(); err != nil { | ||||||
|  |  | ||||||
|  | @ -345,7 +345,9 @@ func TestGetRepoIDsForIssuesOptions(t *testing.T) { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func testInsertIssue(t *testing.T, title, content string) { | func testInsertIssue(t *testing.T, title, content string, expectIndex int64) *Issue { | ||||||
|  | 	var newIssue Issue | ||||||
|  | 	t.Run(title, func(t *testing.T) { | ||||||
| 		repo := AssertExistsAndLoadBean(t, &Repository{ID: 1}).(*Repository) | 		repo := AssertExistsAndLoadBean(t, &Repository{ID: 1}).(*Repository) | ||||||
| 		user := AssertExistsAndLoadBean(t, &User{ID: 2}).(*User) | 		user := AssertExistsAndLoadBean(t, &User{ID: 2}).(*User) | ||||||
| 
 | 
 | ||||||
|  | @ -358,24 +360,30 @@ func testInsertIssue(t *testing.T, title, content string) { | ||||||
| 		err := NewIssue(repo, &issue, nil, nil) | 		err := NewIssue(repo, &issue, nil, nil) | ||||||
| 		assert.NoError(t, err) | 		assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	var newIssue Issue |  | ||||||
| 		has, err := x.ID(issue.ID).Get(&newIssue) | 		has, err := x.ID(issue.ID).Get(&newIssue) | ||||||
| 		assert.NoError(t, err) | 		assert.NoError(t, err) | ||||||
| 		assert.True(t, has) | 		assert.True(t, has) | ||||||
| 		assert.EqualValues(t, issue.Title, newIssue.Title) | 		assert.EqualValues(t, issue.Title, newIssue.Title) | ||||||
| 		assert.EqualValues(t, issue.Content, newIssue.Content) | 		assert.EqualValues(t, issue.Content, newIssue.Content) | ||||||
| 	// there are 5 issues and max index is 5 on repository 1, so this one should 6
 | 		if expectIndex > 0 { | ||||||
| 	assert.EqualValues(t, 6, newIssue.Index) | 			assert.EqualValues(t, expectIndex, newIssue.Index) | ||||||
| 
 | 		} | ||||||
| 	_, err = x.ID(issue.ID).Delete(new(Issue)) | 	}) | ||||||
| 	assert.NoError(t, err) | 	return &newIssue | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestIssue_InsertIssue(t *testing.T) { | func TestIssue_InsertIssue(t *testing.T) { | ||||||
| 	assert.NoError(t, PrepareTestDatabase()) | 	assert.NoError(t, PrepareTestDatabase()) | ||||||
| 
 | 
 | ||||||
| 	testInsertIssue(t, "my issue1", "special issue's comments?") | 	// there are 5 issues and max index is 5 on repository 1, so this one should 6
 | ||||||
| 	testInsertIssue(t, `my issue2, this is my son's love \n \r \ `, "special issue's '' comments?") | 	issue := testInsertIssue(t, "my issue1", "special issue's comments?", 6) | ||||||
|  | 	_, err := x.ID(issue.ID).Delete(new(Issue)) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	issue = testInsertIssue(t, `my issue2, this is my son's love \n \r \ `, "special issue's '' comments?", 7) | ||||||
|  | 	_, err = x.ID(issue.ID).Delete(new(Issue)) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestIssue_ResolveMentions(t *testing.T) { | func TestIssue_ResolveMentions(t *testing.T) { | ||||||
|  |  | ||||||
|  | @ -125,12 +125,27 @@ func TestXRef_ResolveCrossReferences(t *testing.T) { | ||||||
| func testCreateIssue(t *testing.T, repo, doer int64, title, content string, ispull bool) *Issue { | func testCreateIssue(t *testing.T, repo, doer int64, title, content string, ispull bool) *Issue { | ||||||
| 	r := AssertExistsAndLoadBean(t, &Repository{ID: repo}).(*Repository) | 	r := AssertExistsAndLoadBean(t, &Repository{ID: repo}).(*Repository) | ||||||
| 	d := AssertExistsAndLoadBean(t, &User{ID: doer}).(*User) | 	d := AssertExistsAndLoadBean(t, &User{ID: doer}).(*User) | ||||||
| 	i := &Issue{RepoID: r.ID, PosterID: d.ID, Poster: d, Title: title, Content: content, IsPull: ispull} | 
 | ||||||
|  | 	idx, err := GetNextResourceIndex("issue_index", r.ID) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	i := &Issue{ | ||||||
|  | 		RepoID:   r.ID, | ||||||
|  | 		PosterID: d.ID, | ||||||
|  | 		Poster:   d, | ||||||
|  | 		Title:    title, | ||||||
|  | 		Content:  content, | ||||||
|  | 		IsPull:   ispull, | ||||||
|  | 		Index:    idx, | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	sess := x.NewSession() | 	sess := x.NewSession() | ||||||
| 	defer sess.Close() | 	defer sess.Close() | ||||||
|  | 
 | ||||||
| 	assert.NoError(t, sess.Begin()) | 	assert.NoError(t, sess.Begin()) | ||||||
| 	_, err := sess.SetExpr("`index`", "coalesce(MAX(`index`),0)+1").Where("repo_id=?", repo).Insert(i) | 	err = newIssue(sess, d, NewIssueOptions{ | ||||||
|  | 		Repo:  r, | ||||||
|  | 		Issue: i, | ||||||
|  | 	}) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 	i, err = getIssueByID(sess, i.ID) | 	i, err = getIssueByID(sess, i.ID) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
|  |  | ||||||
|  | @ -313,6 +313,8 @@ var migrations = []Migration{ | ||||||
| 	NewMigration("Delete credentials from past migrations", deleteMigrationCredentials), | 	NewMigration("Delete credentials from past migrations", deleteMigrationCredentials), | ||||||
| 	// v181 -> v182
 | 	// v181 -> v182
 | ||||||
| 	NewMigration("Always save primary email on email address table", addPrimaryEmail2EmailAddress), | 	NewMigration("Always save primary email on email address table", addPrimaryEmail2EmailAddress), | ||||||
|  | 	// v182 -> v183
 | ||||||
|  | 	NewMigration("Add issue resource index table", addIssueResourceIndexTable), | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // GetCurrentDBVersion returns the current db version
 | // GetCurrentDBVersion returns the current db version
 | ||||||
|  |  | ||||||
							
								
								
									
										42
									
								
								models/migrations/v182.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								models/migrations/v182.go
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,42 @@ | ||||||
|  | // Copyright 2021 The Gitea Authors. All rights reserved.
 | ||||||
|  | // Use of this source code is governed by a MIT-style
 | ||||||
|  | // license that can be found in the LICENSE file.
 | ||||||
|  | 
 | ||||||
|  | package migrations | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"xorm.io/xorm" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func addIssueResourceIndexTable(x *xorm.Engine) error { | ||||||
|  | 	type ResourceIndex struct { | ||||||
|  | 		GroupID  int64 `xorm:"index unique(s)"` | ||||||
|  | 		MaxIndex int64 `xorm:"index unique(s)"` | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	sess := x.NewSession() | ||||||
|  | 	defer sess.Close() | ||||||
|  | 
 | ||||||
|  | 	if err := sess.Begin(); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := sess.Table("issue_index").Sync2(new(ResourceIndex)); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Remove data we're goint to rebuild
 | ||||||
|  | 	if _, err := sess.Table("issue_index").Where("1=1").Delete(&ResourceIndex{}); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Create current data for all repositories with issues and PRs
 | ||||||
|  | 	if _, err := sess.Exec("INSERT INTO issue_index (group_id, max_index) " + | ||||||
|  | 		"SELECT max_data.repo_id, max_data.max_index " + | ||||||
|  | 		"FROM ( SELECT issue.repo_id AS repo_id, max(issue.`index`) AS max_index " + | ||||||
|  | 		"FROM issue GROUP BY issue.repo_id) AS max_data"); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return sess.Commit() | ||||||
|  | } | ||||||
							
								
								
									
										59
									
								
								models/migrations/v182_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								models/migrations/v182_test.go
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,59 @@ | ||||||
|  | // Copyright 2021 The Gitea Authors. All rights reserved.
 | ||||||
|  | // Use of this source code is governed by a MIT-style
 | ||||||
|  | // license that can be found in the LICENSE file.
 | ||||||
|  | 
 | ||||||
|  | package migrations | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"testing" | ||||||
|  | 
 | ||||||
|  | 	"github.com/stretchr/testify/assert" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func Test_addIssueResourceIndexTable(t *testing.T) { | ||||||
|  | 	// Create the models used in the migration
 | ||||||
|  | 	type Issue struct { | ||||||
|  | 		ID     int64 `xorm:"pk autoincr"` | ||||||
|  | 		RepoID int64 `xorm:"UNIQUE(s)"` | ||||||
|  | 		Index  int64 `xorm:"UNIQUE(s)"` | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Prepare and load the testing database
 | ||||||
|  | 	x, deferable := prepareTestEnv(t, 0, new(Issue)) | ||||||
|  | 	if x == nil || t.Failed() { | ||||||
|  | 		defer deferable() | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	defer deferable() | ||||||
|  | 
 | ||||||
|  | 	// Run the migration
 | ||||||
|  | 	if err := addIssueResourceIndexTable(x); err != nil { | ||||||
|  | 		assert.NoError(t, err) | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	type ResourceIndex struct { | ||||||
|  | 		GroupID  int64 `xorm:"index unique(s)"` | ||||||
|  | 		MaxIndex int64 `xorm:"index unique(s)"` | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	var start = 0 | ||||||
|  | 	const batchSize = 1000 | ||||||
|  | 	for { | ||||||
|  | 		var indexes = make([]ResourceIndex, 0, batchSize) | ||||||
|  | 		err := x.Table("issue_index").Limit(batchSize, start).Find(&indexes) | ||||||
|  | 		assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 		for _, idx := range indexes { | ||||||
|  | 			var maxIndex int | ||||||
|  | 			has, err := x.SQL("SELECT max(`index`) FROM issue WHERE repo_id = ?", idx.GroupID).Get(&maxIndex) | ||||||
|  | 			assert.NoError(t, err) | ||||||
|  | 			assert.True(t, has) | ||||||
|  | 			assert.EqualValues(t, maxIndex, idx.MaxIndex) | ||||||
|  | 		} | ||||||
|  | 		if len(indexes) < batchSize { | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 		start += len(indexes) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | @ -134,6 +134,7 @@ func init() { | ||||||
| 		new(ProjectIssue), | 		new(ProjectIssue), | ||||||
| 		new(Session), | 		new(Session), | ||||||
| 		new(RepoTransfer), | 		new(RepoTransfer), | ||||||
|  | 		new(IssueIndex), | ||||||
| 	) | 	) | ||||||
| 
 | 
 | ||||||
| 	gonicNames := []string{"SSL", "UID"} | 	gonicNames := []string{"SSL", "UID"} | ||||||
|  | @ -171,6 +172,10 @@ func GetNewEngine() (*xorm.Engine, error) { | ||||||
| 	return engine, nil | 	return engine, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func syncTables() error { | ||||||
|  | 	return x.StoreEngine("InnoDB").Sync2(tables...) | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // NewTestEngine sets a new test xorm.Engine
 | // NewTestEngine sets a new test xorm.Engine
 | ||||||
| func NewTestEngine() (err error) { | func NewTestEngine() (err error) { | ||||||
| 	x, err = GetNewEngine() | 	x, err = GetNewEngine() | ||||||
|  | @ -181,7 +186,7 @@ func NewTestEngine() (err error) { | ||||||
| 	x.SetMapper(names.GonicMapper{}) | 	x.SetMapper(names.GonicMapper{}) | ||||||
| 	x.SetLogger(NewXORMLogger(!setting.IsProd())) | 	x.SetLogger(NewXORMLogger(!setting.IsProd())) | ||||||
| 	x.ShowSQL(!setting.IsProd()) | 	x.ShowSQL(!setting.IsProd()) | ||||||
| 	return x.StoreEngine("InnoDB").Sync2(tables...) | 	return syncTables() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // SetEngine sets the xorm.Engine
 | // SetEngine sets the xorm.Engine
 | ||||||
|  | @ -222,7 +227,7 @@ func NewEngine(ctx context.Context, migrateFunc func(*xorm.Engine) error) (err e | ||||||
| 		return fmt.Errorf("migrate: %v", err) | 		return fmt.Errorf("migrate: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err = x.StoreEngine("InnoDB").Sync2(tables...); err != nil { | 	if err = syncTables(); err != nil { | ||||||
| 		return fmt.Errorf("sync database struct error: %v", err) | 		return fmt.Errorf("sync database struct error: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -427,34 +427,23 @@ func (pr *PullRequest) SetMerged() (bool, error) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // NewPullRequest creates new pull request with labels for repository.
 | // NewPullRequest creates new pull request with labels for repository.
 | ||||||
| func NewPullRequest(repo *Repository, pull *Issue, labelIDs []int64, uuids []string, pr *PullRequest) (err error) { | func NewPullRequest(repo *Repository, issue *Issue, labelIDs []int64, uuids []string, pr *PullRequest) (err error) { | ||||||
| 	// Retry several times in case INSERT fails due to duplicate key for (repo_id, index); see #7887
 | 	idx, err := GetNextResourceIndex("issue_index", repo.ID) | ||||||
| 	i := 0 | 	if err != nil { | ||||||
| 	for { | 		return fmt.Errorf("generate issue index failed: %v", err) | ||||||
| 		if err = newPullRequestAttempt(repo, pull, labelIDs, uuids, pr); err == nil { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 		if !IsErrNewIssueInsert(err) { |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 		if i++; i == issueMaxDupIndexAttempts { |  | ||||||
| 			break |  | ||||||
| 		} |  | ||||||
| 		log.Error("NewPullRequest: error attempting to insert the new issue; will retry. Original error: %v", err) |  | ||||||
| 	} |  | ||||||
| 	return fmt.Errorf("NewPullRequest: too many errors attempting to insert the new issue. Last error was: %v", err) |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| func newPullRequestAttempt(repo *Repository, pull *Issue, labelIDs []int64, uuids []string, pr *PullRequest) (err error) { | 	issue.Index = idx | ||||||
|  | 
 | ||||||
| 	sess := x.NewSession() | 	sess := x.NewSession() | ||||||
| 	defer sess.Close() | 	defer sess.Close() | ||||||
| 	if err = sess.Begin(); err != nil { | 	if err = sess.Begin(); err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err = newIssue(sess, pull.Poster, NewIssueOptions{ | 	if err = newIssue(sess, issue.Poster, NewIssueOptions{ | ||||||
| 		Repo:        repo, | 		Repo:        repo, | ||||||
| 		Issue:       pull, | 		Issue:       issue, | ||||||
| 		LabelIDs:    labelIDs, | 		LabelIDs:    labelIDs, | ||||||
| 		Attachments: uuids, | 		Attachments: uuids, | ||||||
| 		IsPull:      true, | 		IsPull:      true, | ||||||
|  | @ -465,10 +454,9 @@ func newPullRequestAttempt(repo *Repository, pull *Issue, labelIDs []int64, uuid | ||||||
| 		return fmt.Errorf("newIssue: %v", err) | 		return fmt.Errorf("newIssue: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	pr.Index = pull.Index | 	pr.Index = issue.Index | ||||||
| 	pr.BaseRepo = repo | 	pr.BaseRepo = repo | ||||||
| 
 | 	pr.IssueID = issue.ID | ||||||
| 	pr.IssueID = pull.ID |  | ||||||
| 	if _, err = sess.Insert(pr); err != nil { | 	if _, err = sess.Insert(pr); err != nil { | ||||||
| 		return fmt.Errorf("insert pull repo: %v", err) | 		return fmt.Errorf("insert pull repo: %v", err) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -1510,6 +1510,11 @@ func DeleteRepository(doer *User, uid, repoID int64) error { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	// Delete issue index
 | ||||||
|  | 	if err := deleteResouceIndex(sess, "issue_index", repoID); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	if repo.IsFork { | 	if repo.IsFork { | ||||||
| 		if _, err := sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil { | 		if _, err := sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil { | ||||||
| 			return fmt.Errorf("decrease fork count: %v", err) | 			return fmt.Errorf("decrease fork count: %v", err) | ||||||
|  |  | ||||||
|  | @ -103,7 +103,7 @@ func CreateTestEngine(fixturesDir string) error { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 	x.SetMapper(names.GonicMapper{}) | 	x.SetMapper(names.GonicMapper{}) | ||||||
| 	if err = x.StoreEngine("InnoDB").Sync2(tables...); err != nil { | 	if err = syncTables(); err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 	switch os.Getenv("GITEA_UNIT_TESTS_VERBOSE") { | 	switch os.Getenv("GITEA_UNIT_TESTS_VERBOSE") { | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue