Skip to content

Commit 17a797a

Browse files
committed
Rearrange documentation for predicting function
1 parent 2e995a9 commit 17a797a

File tree

11 files changed

+151
-151
lines changed

11 files changed

+151
-151
lines changed

README.md

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -79,18 +79,6 @@ Ties should have either equivalent rank or score.
7979
[[[24.68943500312503, 8.179213704945203]], [[22.826045021875203, 8.179213704945203]], [[24.68943500312503, 8.179213704945203]], [[27.795084971874736, 8.263160757613477]]]
8080
```
8181

82-
83-
## Choosing Models
84-
85-
The default model is `PlackettLuce`. You can import alternate models from `openskill.models` like so:
86-
87-
```python
88-
>>> from openskill.models import BradleyTerryFull
89-
>>> a1 = b1 = c1 = d1 = Rating()
90-
>>> rate([[a1], [b1], [c1], [d1]], rank=[4, 1, 3, 2], model=BradleyTerryFull)
91-
[[[17.09430584957905, 7.5012190693964005]], [[32.90569415042095, 7.5012190693964005]], [[22.36476861652635, 7.5012190693964005]], [[27.63523138347365, 7.5012190693964005]]]
92-
```
93-
9482
## Predicting Winners
9583

9684
You can compare two or more teams to get the probabilities of each team winning.
@@ -106,6 +94,17 @@ You can compare two or more teams to get the probabilities of each team winning.
10694
1.0
10795
```
10896

97+
## Choosing Models
98+
99+
The default model is `PlackettLuce`. You can import alternate models from `openskill.models` like so:
100+
101+
```python
102+
>>> from openskill.models import BradleyTerryFull
103+
>>> a1 = b1 = c1 = d1 = Rating()
104+
>>> rate([[a1], [b1], [c1], [d1]], rank=[4, 1, 3, 2], model=BradleyTerryFull)
105+
[[[17.09430584957905, 7.5012190693964005]], [[32.90569415042095, 7.5012190693964005]], [[22.36476861652635, 7.5012190693964005]], [[27.63523138347365, 7.5012190693964005]]]
106+
```
107+
109108
### Available Models
110109
- `BradleyTerryFull`: Full Pairing for Bradley-Terry
111110
- `BradleyTerryPart`: Partial Pairing for Bradley-Terry

benchmark/benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def predict_os_match(match: dict):
158158

159159
def win_probability(team1, team2):
160160
delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2)
161-
sum_sigma = sum(r.sigma ** 2 for r in itertools.chain(team1, team2))
161+
sum_sigma = sum(r.sigma**2 for r in itertools.chain(team1, team2))
162162
size = len(team1) + len(team2)
163163
denom = math.sqrt(size * (trueskill.BETA * trueskill.BETA) + sum_sigma)
164164
ts = trueskill.global_env()

docs/manual.rst

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -86,19 +86,6 @@ Ties should have either equivalent rank or score.
8686
>>> result
8787
[[[24.68943500312503, 8.179213704945203]], [[22.826045021875203, 8.179213704945203]], [[24.68943500312503, 8.179213704945203]], [[27.795084971874736, 8.263160757613477]]]
8888
89-
Choosing Models
90-
---------------
91-
92-
The default model is ``PlackettLuce``. You can import alternate models
93-
from ``openskill.models`` like so:
94-
95-
.. code:: python
96-
97-
>>> from openskill.models import BradleyTerryFull
98-
>>> a1 = b1 = c1 = d1 = Rating()
99-
>>> rate([[a1], [b1], [c1], [d1]], rank=[4, 1, 3, 2], model=BradleyTerryFull)
100-
[[[17.09430584957905, 7.5012190693964005]], [[32.90569415042095, 7.5012190693964005]], [[22.36476861652635, 7.5012190693964005]], [[27.63523138347365, 7.5012190693964005]]]
101-
10289
Predicting Winners
10390
------------------
10491

@@ -115,6 +102,19 @@ You can compare two or more teams to get the probabilities of each team winning.
115102
>>> sum(predictions)
116103
1.0
117104
105+
Choosing Models
106+
---------------
107+
108+
The default model is ``PlackettLuce``. You can import alternate models
109+
from ``openskill.models`` like so:
110+
111+
.. code:: python
112+
113+
>>> from openskill.models import BradleyTerryFull
114+
>>> a1 = b1 = c1 = d1 = Rating()
115+
>>> rate([[a1], [b1], [c1], [d1]], rank=[4, 1, 3, 2], model=BradleyTerryFull)
116+
[[[17.09430584957905, 7.5012190693964005]], [[32.90569415042095, 7.5012190693964005]], [[22.36476861652635, 7.5012190693964005]], [[27.63523138347365, 7.5012190693964005]]]
117+
118118
Available Models
119119
~~~~~~~~~~~~~~~~
120120

openskill/models/bradley_terry_full.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,9 @@ def calculate(self):
4646
for j, j_players in enumerate(i_team):
4747
mu = j_players.mu
4848
sigma = j_players.sigma
49-
mu += (sigma ** 2 / i_sigma_squared) * omega
49+
mu += (sigma**2 / i_sigma_squared) * omega
5050
sigma *= math.sqrt(
51-
max(1 - (sigma ** 2 / i_sigma_squared) * delta, self.EPSILON),
51+
max(1 - (sigma**2 / i_sigma_squared) * delta, self.EPSILON),
5252
)
5353
intermediate_result_per_team.append([mu, sigma])
5454
result.append(intermediate_result_per_team)

openskill/models/bradley_terry_part.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,9 @@ def od_reduce(od, q_team_ratings):
4646
for j, j_players in enumerate(i_team):
4747
mu = j_players.mu
4848
sigma = j_players.sigma
49-
mu += (sigma ** 2 / i_sigma_squared) * i_omega
49+
mu += (sigma**2 / i_sigma_squared) * i_omega
5050
sigma *= math.sqrt(
51-
max(1 - (sigma ** 2 / i_sigma_squared) * i_delta, self.EPSILON),
51+
max(1 - (sigma**2 / i_sigma_squared) * i_delta, self.EPSILON),
5252
)
5353
intermediate_result_per_team.append([mu, sigma])
5454
return intermediate_result_per_team

openskill/models/plackett_luce.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def calculate(self):
3838
omega -= i_mu_over_ce_over_sum_q / self.a[q]
3939

4040
omega *= i_sigma_squared / self.c
41-
delta *= i_sigma_squared / self.c ** 2
41+
delta *= i_sigma_squared / self.c**2
4242

4343
gamma = self.gamma(self.c, len(self.team_ratings), *i_team_ratings)
4444
delta *= gamma
@@ -47,9 +47,9 @@ def calculate(self):
4747
for j, j_players in enumerate(i_team):
4848
mu = j_players.mu
4949
sigma = j_players.sigma
50-
mu += (sigma ** 2 / i_sigma_squared) * omega
50+
mu += (sigma**2 / i_sigma_squared) * omega
5151
sigma *= math.sqrt(
52-
max(1 - (sigma ** 2 / i_sigma_squared) * delta, self.EPSILON),
52+
max(1 - (sigma**2 / i_sigma_squared) * delta, self.EPSILON),
5353
)
5454
intermediate_result_per_team.append([mu, sigma])
5555
result.append(intermediate_result_per_team)

openskill/models/thurstone_mosteller_full.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,9 +63,9 @@ def calculate(self):
6363
for j, j_players in enumerate(i_team):
6464
mu = j_players.mu
6565
sigma = j_players.sigma
66-
mu += (sigma ** 2 / i_sigma_squared) * omega
66+
mu += (sigma**2 / i_sigma_squared) * omega
6767
sigma *= math.sqrt(
68-
max(1 - (sigma ** 2 / i_sigma_squared) * delta, self.EPSILON),
68+
max(1 - (sigma**2 / i_sigma_squared) * delta, self.EPSILON),
6969
)
7070
intermediate_result_per_team.append([mu, sigma])
7171
result.append(intermediate_result_per_team)

openskill/models/thurstone_mosteller_part.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ def od_reduce(od, q_team_ratings):
6262
for j, j_players in enumerate(i_team):
6363
mu = j_players.mu
6464
sigma = j_players.sigma
65-
mu += (sigma ** 2) / i_sigma_squared * i_omega
65+
mu += (sigma**2) / i_sigma_squared * i_omega
6666
sigma *= math.sqrt(
67-
max(1 - (sigma ** 2) / i_sigma_squared * i_delta, self.EPSILON)
67+
max(1 - (sigma**2) / i_sigma_squared * i_delta, self.EPSILON)
6868
)
6969
intermediate_result_per_team.append([mu, sigma])
7070
return intermediate_result_per_team

openskill/rate.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def team_rating(game: List[List[Rating]], **options) -> List[List[Union[int, flo
6666
for index, team in enumerate(game):
6767
team_result = []
6868
mu_i = reduce(lambda x, y: x + y, map(lambda p: p.mu, team))
69-
sigma_squared = reduce(lambda x, y: x + y, map(lambda p: p.sigma ** 2, team))
69+
sigma_squared = reduce(lambda x, y: x + y, map(lambda p: p.sigma**2, team))
7070
team_result.extend([mu_i, sigma_squared, team, rank[index]])
7171
result.append(team_result)
7272
return result
@@ -136,7 +136,7 @@ def predict_win(teams: List[List[Rating]], **options) -> List[Union[int, float]]
136136
pairwise_probabilities.append(
137137
phi_major(
138138
(mu_a - mu_b)
139-
/ math.sqrt(n * beta(**options) ** 2 + sigma_a ** 2 + sigma_b ** 2)
139+
/ math.sqrt(n * beta(**options) ** 2 + sigma_a**2 + sigma_b**2)
140140
)
141141
)
142142

0 commit comments

Comments
 (0)