-
-
Notifications
You must be signed in to change notification settings - Fork 381
276 lines (246 loc) · 10.9 KB
/
unit-test-common.yml
File metadata and controls
276 lines (246 loc) · 10.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
name: Unit Test Common
on:
workflow_call:
inputs:
name:
description: a descriptive job name
required: true
type: string
runs-on:
description: the runner to use for the job
required: true
type: string
timeout:
description: the timeout for the job
required: true
default: 20
type: number
should_skip:
description: whether to skip the job
required: false
default: false
type: boolean
xcode:
description: the Xcode version to use for the job
required: true
type: string
test-destination-os:
description: the test destination OS to use for the job
required: true
type: string
platform:
description: the platform to test on
required: true
type: string
device:
description: the device to test on
required: false
default: ""
type: string
scheme:
description: the scheme to test
required: true
type: string
run_on_cirrus_labs:
description: "Whether to run the tests on Cirrus Labs"
required: false
default: false
type: boolean
jobs:
unit-tests:
name: Unit ${{inputs.name}}
runs-on: ${{ inputs.run_on_cirrus_labs && fromJSON(format('["ghcr.io/cirruslabs/macos-runner:{0},runner_concurrency_group={1}", "runner_group_id:10"]', inputs.runs-on, github.run_id)) || inputs.runs-on }}
timeout-minutes: ${{inputs.timeout}}
if: ${{!inputs.should_skip}}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- run: ./scripts/ci-select-xcode.sh "$XCODE_VERSION"
env:
XCODE_VERSION: ${{inputs.xcode}}
- name: Setup Ruby
uses: ruby/setup-ruby@09a7688d3b55cf0e976497ff046b70949eeaccfd # v1.288.0
with:
bundler-cache: true
- name: Install Slather
run: gem install slather
- name: Ensure required runtime is loaded
if: ${{ inputs.platform == 'iOS' || inputs.platform == 'tvOS' || inputs.platform == 'visionOS' }}
# Ideally we will not need this, but CI sometimes is failing to load some runtimes, this will ensure they are loaded
timeout-minutes: 5 # 5 minutes timeout
env:
OS_VERSION: ${{ inputs.test-destination-os }}
PLATFORM: ${{ inputs.platform }}
run: ./scripts/ci-ensure-runtime-loaded.sh --os-version "$OS_VERSION" --platform "$PLATFORM"
# Boot created simulators to ensure they're ready before tests run
# Based on CircleCI forum comment, booting is especially important for Xcode 26: https://discuss.circleci.com/t/xcode-26-rc/54066/18
- name: Boot simulator
if: ${{ inputs.platform == 'iOS' || inputs.platform == 'visionOS' }}
env:
XCODE_VERSION: ${{ inputs.xcode }}
DEVICE_NAME: ${{ inputs.device }}
OS_VERSION: ${{ inputs.test-destination-os }}
PLATFORM: ${{ inputs.platform }}
run: ./scripts/ci-boot-simulator.sh --xcode "$XCODE_VERSION" --device "$DEVICE_NAME" --os-version "$OS_VERSION" --platform "$PLATFORM"
# We split building and running tests in two steps so we know how long running the tests takes.
- name: Build Tests
id: build_tests
env:
PLATFORM: ${{ inputs.platform }}
OS_VERSION: ${{ inputs.test-destination-os }}
REF_NAME: ${{ github.ref_name }}
DEVICE_NAME: ${{ inputs.device }}
SCHEME: ${{ inputs.scheme }}
run: |
./scripts/sentry-xcodebuild.sh \
--platform "$PLATFORM" \
--os "$OS_VERSION" \
--ref "$REF_NAME" \
--command build-for-testing \
--device "$DEVICE_NAME" \
--configuration TestCI \
--scheme "$SCHEME"
# Run Flaky Tests TestPlan which has a retry mechanism on failure.
# We intentionally run these before the other test plan to fail early.
# Use a separate result bundle name to avoid conflicts with the regular test run.
# xcodebuild fails if a result bundle already exists at the target path.
- name: Run Flaky Tests
id: run_flaky_tests
# Only the Sentry Scheme has the Flaky TestPlan.
if: ${{ inputs.scheme == 'Sentry' }}
env:
PLATFORM: ${{ inputs.platform }}
OS_VERSION: ${{ inputs.test-destination-os }}
REF_NAME: ${{ github.ref_name }}
DEVICE_NAME: ${{ inputs.device }}
SCHEME: ${{ inputs.scheme }}
run: |
./scripts/sentry-xcodebuild.sh \
--platform "$PLATFORM" \
--os "$OS_VERSION" \
--ref "$REF_NAME" \
--command test-without-building \
--device "$DEVICE_NAME" \
--configuration TestCI \
--scheme "$SCHEME" \
--test-plan Sentry_Flaky \
--result-bundle flaky-results.xcresult
- name: Run tests
id: run_tests
# We call a script with the platform so the destination
# passed to xcodebuild doesn't end up in the job name,
# because GitHub Actions don't provide an easy way of
# manipulating string in expressions.
env:
PLATFORM: ${{ inputs.platform }}
OS_VERSION: ${{ inputs.test-destination-os }}
REF_NAME: ${{ github.ref_name }}
DEVICE_NAME: ${{ inputs.device }}
SCHEME: ${{ inputs.scheme }}
run: |
./scripts/sentry-xcodebuild.sh \
--platform "$PLATFORM" \
--os "$OS_VERSION" \
--ref "$REF_NAME" \
--command test-without-building \
--device "$DEVICE_NAME" \
--configuration TestCI \
--scheme "$SCHEME" \
--result-bundle results.xcresult
- name: Publish Test Report
id: publish_test_report
uses: mikepenz/action-junit-report@5e05ac00ad0604dfb7e313ae412aa3284f4906d6 # v6.3.0
if: always()
with:
report_paths: "build/reports/junit.xml"
fail_on_failure: true
fail_on_parse_error: true
detailed_summary: true
# When a test crashes, xcbeautify may not report the failure in the junit.xml,
# so the test step fails but Publish Test Report finds no failures. In that case,
# extract failure info from the raw xcodebuild log.
- name: Analyze raw test output for crashes
if: ${{ always() && steps.publish_test_report.outcome == 'success' && (steps.run_tests.outcome == 'failure' || steps.run_flaky_tests.outcome == 'failure') }}
run: |
echo "::warning::Tests failed but no failures were found in the test report. This usually indicates a test crashed without proper reporting to xcbeautify."
echo ""
if [ -f raw-test-output.log ]; then
if grep -q "Failing tests:" raw-test-output.log; then
echo "=== Failing tests from raw output ==="
sed -n '/Failing tests:/,/^$/p' raw-test-output.log
fi
else
echo "raw-test-output.log not found."
fi
- name: Archiving DerivedData Logs
uses: actions/upload-artifact@v6
if: steps.build_tests.outcome == 'failure'
with:
name: derived-data-${{inputs.platform}}-xcode-${{inputs.xcode}}-os-${{inputs.test-destination-os}}
path: |
/Users/runner/Library/Developer/Xcode/DerivedData/**/Logs/**
- name: Archiving Raw Logs
uses: actions/upload-artifact@v6
if: ${{ failure() || cancelled() }}
with:
name: raw-output-${{inputs.platform}}-xcode-${{inputs.xcode}}-os-${{inputs.test-destination-os}}
path: |
raw-build-output.log
raw-build-for-testing-output.log
raw-test-output.log
- name: Archiving Crash Logs
uses: actions/upload-artifact@v6
if: ${{ failure() || cancelled() }}
with:
name: crash-logs-${{inputs.platform}}-xcode-${{inputs.xcode}}-os-${{inputs.test-destination-os}}
path: |
~/Library/Logs/DiagnosticReports/**
- name: Archiving Test Results
uses: actions/upload-artifact@v6
if: ${{ failure() || cancelled() }}
with:
name: result-bundle-${{inputs.platform}}-xcode-${{inputs.xcode}}-os-${{inputs.test-destination-os}}
path: |
results.xcresult
flaky-results.xcresult
- name: Gather code coverage information via slather
run: slather coverage --configuration TestCI --scheme Sentry
# We can upload all coverage reports, because codecov merges them.
# See https://docs.codecov.io/docs/merging-reports
# Checkout .codecov.yml to see the config of Codecov
# We don't upload codecov for release branches, as we don't want a failing coverage check to block a release.
# We don't upload codecov for scheduled runs as CodeCov only accepts a limited amount of uploads per commit.
- name: Push code coverage to codecov
id: codecov_1
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # pin@v5.5.2
if: ${{ contains(inputs.platform, 'iOS') && !contains(github.ref, 'release') && github.event.schedule == '' }}
with:
# Although public repos should not have to specify a token there seems to be a bug with the Codecov GH action, which can
# be solved by specifying the token, see https://github.com/codecov/codecov-action/issues/557#issuecomment-1224970469
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
verbose: true
# Sometimes codecov uploads etc can fail. Retry one time to rule out e.g. intermittent network failures.
- name: Push code coverage to codecov
id: codecov_2
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # pin@v5.5.2
if: ${{ steps.codecov_1.outcome == 'failure' && contains(inputs.platform, 'iOS') && !contains(github.ref, 'release') && github.event.schedule == '' }}
with:
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true
verbose: true
- name: Codecov test analytics
if: ${{ !cancelled() && !contains(github.ref, 'release') && github.event.schedule == '' }}
uses: codecov/test-results-action@0fa95f0e1eeaafde2c782583b36b28ad0d8c77d3 # pin@v1.2.1
with:
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
name: sentry-cocoa-unit-tests
flags: unittests-${{ inputs.platform }}-${{ inputs.xcode }}-${{ inputs.test-destination-os }}, unittests
- name: Run CI Diagnostics
if: failure()
run: ./scripts/ci-diagnostics.sh
- name: Store screenshot
uses: ./.github/actions/capture-screenshot
if: failure()
with:
suffix: unit-tests-${{ inputs.platform }}-xcode-${{ inputs.xcode }}-os-${{ inputs.test-destination-os }}