2020-07-13 20:39:21 +02:00
|
|
|
|
2020-11-12 06:13:14 +01:00
|
|
|
struct Normalized{V <: SemiMetric} <: SemiMetric
|
|
|
|
dist::V
|
|
|
|
max_dist::Float64
|
2020-02-09 19:37:37 +01:00
|
|
|
end
|
2020-07-20 17:25:53 +02:00
|
|
|
|
|
|
|
|
2020-11-12 06:13:14 +01:00
|
|
|
function (dist::Normalized{<:Hamming})(s1, s2)
|
2020-11-10 04:04:35 +01:00
|
|
|
((s1 === missing) | (s2 === missing)) && return missing
|
|
|
|
s1, s2 = reorder(s1, s2)
|
|
|
|
len1, len2 = length(s1), length(s2)
|
|
|
|
len2 == 0 && return 1.0
|
|
|
|
out = dist.dist(s1, s2) / len2
|
2020-11-12 06:13:14 +01:00
|
|
|
out > dist.max_dist ? 1.0 : out
|
2020-11-10 04:04:35 +01:00
|
|
|
end
|
2020-07-20 17:25:53 +02:00
|
|
|
|
2020-11-12 06:13:14 +01:00
|
|
|
function (dist::Normalized{<:Union{Levenshtein{Nothing}, DamerauLevenshtein{Nothing}}})(s1, s2)
|
2020-02-13 15:44:27 +01:00
|
|
|
((s1 === missing) | (s2 === missing)) && return missing
|
2019-08-20 19:21:31 +02:00
|
|
|
s1, s2 = reorder(s1, s2)
|
|
|
|
len1, len2 = length(s1), length(s2)
|
|
|
|
len2 == 0 && return 1.0
|
2020-11-07 21:48:53 +01:00
|
|
|
if dist.dist isa Levenshtein
|
2020-11-12 06:13:14 +01:00
|
|
|
d = Levenshtein(ceil(Int, len2 * dist.max_dist))(s1, s2)
|
2020-11-07 21:48:53 +01:00
|
|
|
else
|
2020-11-12 06:13:14 +01:00
|
|
|
d = DamerauLevenshtein(ceil(Int, len2 * dist.max_dist))(s1, s2)
|
2020-11-07 21:48:53 +01:00
|
|
|
end
|
2020-02-09 19:37:37 +01:00
|
|
|
out = d / len2
|
2020-11-12 06:13:14 +01:00
|
|
|
out > dist.max_dist ? 1.0 : out
|
2017-08-05 20:45:19 +02:00
|
|
|
end
|
2018-05-16 00:39:50 +02:00
|
|
|
|
2020-11-12 06:13:14 +01:00
|
|
|
function (dist::Normalized{<:QGramDistance})(s1, s2)
|
2020-02-13 15:44:27 +01:00
|
|
|
((s1 === missing) | (s2 === missing)) && return missing
|
2019-12-12 20:48:52 +01:00
|
|
|
# When string length < q for qgram distance, returns s1 == s2
|
|
|
|
s1, s2 = reorder(s1, s2)
|
|
|
|
len1, len2 = length(s1), length(s2)
|
2020-02-09 19:42:29 +01:00
|
|
|
len1 <= dist.dist.q - 1 && return convert(Float64, s1 != s2)
|
2020-07-19 21:38:54 +02:00
|
|
|
if dist.dist isa QGram
|
2020-07-19 21:37:49 +02:00
|
|
|
out = dist.dist(s1, s2) / (len1 + len2 - 2 * dist.dist.q + 2)
|
2019-12-12 20:48:52 +01:00
|
|
|
else
|
2020-07-19 21:37:49 +02:00
|
|
|
out = dist.dist(s1, s2)
|
2019-12-12 20:48:52 +01:00
|
|
|
end
|
2020-11-12 06:13:14 +01:00
|
|
|
out > dist.max_dist ? 1.0 : out
|
2019-12-12 19:21:36 +01:00
|
|
|
end
|
2019-08-17 18:26:24 +02:00
|
|
|
|
2020-11-12 06:13:14 +01:00
|
|
|
function (dist::Normalized)(s1, s2)
|
2020-07-19 21:37:49 +02:00
|
|
|
out = dist.dist(s1, s2)
|
2020-11-12 06:13:14 +01:00
|
|
|
out > dist.max_dist ? 1.0 : out
|
2020-07-20 17:25:53 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-11-12 06:13:14 +01:00
|
|
|
normalize(dist::SemiMetric; max_dist = 1.0) = Normalized{typeof(dist)}(dist, max_dist)
|
|
|
|
normalize(dist::Union{Jaro, JaroWinkler}; max_dist = 1.0) = dist
|
|
|
|
normalize(dist::Partial; max_dist = 1.0) = Partial(normalize(dist.dist; max_dist = max_dist))
|
|
|
|
normalize(dist::TokenSort; max_dist = 1.0) = TokenSort(normalize(dist.dist; max_dist = max_dist))
|
|
|
|
normalize(dist::TokenSet; max_dist = 1.0) = TokenSet(normalize(dist.dist; max_dist = max_dist))
|
|
|
|
normalize(dist::Normalized; max_dist = 1.0) = Normalized{typeof(dist.dist)}(dist.dist, max_dist)
|
2020-07-20 17:25:53 +02:00
|
|
|
|
2019-08-18 01:45:31 +02:00
|
|
|
"""
|
2020-02-08 18:00:44 +01:00
|
|
|
TokenMax(dist)
|
2019-08-18 01:45:31 +02:00
|
|
|
|
2019-12-13 16:33:06 +01:00
|
|
|
Creates the `TokenMax{dist}` distance
|
|
|
|
|
2020-07-20 17:46:42 +02:00
|
|
|
`TokenMax{dist}` normalizes the distance `dist` and returns the minimum of the distance,
|
2019-12-13 16:33:06 +01:00
|
|
|
its [`Partial`](@ref) modifier, its [`TokenSort`](@ref) modifier, and its
|
|
|
|
[`TokenSet`](@ref) modifier, with penalty terms depending on string lengths.
|
|
|
|
|
|
|
|
### Examples
|
|
|
|
```julia-repl
|
|
|
|
julia> s1 = "New York Mets vs Atlanta"
|
|
|
|
julia> s2 = "Atlanta Braves vs New York Mets"
|
2020-02-09 19:41:47 +01:00
|
|
|
julia> evaluate(TokenMax(RatcliffObershelp()), s1, s2)
|
|
|
|
0.05
|
2019-12-13 16:33:06 +01:00
|
|
|
```
|
2019-08-18 01:45:31 +02:00
|
|
|
"""
|
2020-02-12 15:25:55 +01:00
|
|
|
struct TokenMax{S <: SemiMetric} <: SemiMetric
|
2019-12-13 16:33:06 +01:00
|
|
|
dist::S
|
2020-02-12 15:25:55 +01:00
|
|
|
TokenMax{S}(dist::S) where {S <: SemiMetric} = new(dist)
|
2018-05-17 17:38:55 +02:00
|
|
|
end
|
|
|
|
|
2020-07-20 17:25:53 +02:00
|
|
|
TokenMax(dist::SemiMetric) = TokenMax{typeof(normalize(dist))}(normalize(dist))
|
2020-11-12 06:13:14 +01:00
|
|
|
function normalize(dist::TokenMax; max_dist = 1.0)
|
|
|
|
dist = normalize(dist.dist; max_dist = max_dist)
|
|
|
|
TokenMax{typeof(dist)}(dist)
|
|
|
|
end
|
2020-02-09 19:37:37 +01:00
|
|
|
|
2020-11-12 06:13:14 +01:00
|
|
|
function (dist::TokenMax)(s1::AbstractString, s2::AbstractString)
|
2019-08-19 19:54:38 +02:00
|
|
|
s1, s2 = reorder(s1, s2)
|
2019-08-19 19:33:33 +02:00
|
|
|
len1, len2 = length(s1), length(s2)
|
2020-11-12 06:13:14 +01:00
|
|
|
_dist = deepcopy(dist.dist)
|
|
|
|
max_dist = _dist.max_dist
|
|
|
|
score = _dist(s1, s2)
|
2020-02-09 19:37:37 +01:00
|
|
|
min_score = min(max_dist, score)
|
2018-05-17 17:38:55 +02:00
|
|
|
unbase_scale = 0.95
|
2019-08-18 18:52:37 +02:00
|
|
|
# if one string is much shorter than the other, use partial
|
2019-08-19 19:33:33 +02:00
|
|
|
if length(s2) >= 1.5 * length(s1)
|
|
|
|
partial_scale = length(s2) > (8 * length(s1)) ? 0.6 : 0.9
|
2020-11-12 06:13:14 +01:00
|
|
|
_dist = Normalized(_dist.dist, 1 - (1 - max_dist) / partial_scale)
|
|
|
|
score_partial = 1 - partial_scale * (1 - Partial(_dist)(s1, s2))
|
2020-02-09 19:37:37 +01:00
|
|
|
min_score = min(max_dist, score_partial)
|
2020-11-12 06:13:14 +01:00
|
|
|
_dist = Normalized(_dist.dist, 1 - (1 - max_dist) / (unbase_scale * partial_scale))
|
|
|
|
score_sort = 1 - unbase_scale * partial_scale * (1 - TokenSort(Partial(_dist))(s1, s2))
|
2020-02-09 19:37:37 +01:00
|
|
|
max_dist = min(max_dist, score_sort)
|
2020-11-12 06:13:14 +01:00
|
|
|
_dist = Normalized(_dist.dist, 1 - (1 - max_dist) / (unbase_scale * partial_scale))
|
|
|
|
score_set = 1 - unbase_scale * partial_scale * (1 - TokenSet(Partial(_dist))(s1, s2))
|
2020-07-19 21:37:49 +02:00
|
|
|
out = min(score, score_partial, score_sort, score_set)
|
2018-05-17 17:38:55 +02:00
|
|
|
else
|
2020-11-12 06:13:14 +01:00
|
|
|
_dist = Normalized(_dist.dist, 1 - (1 - max_dist) / unbase_scale)
|
|
|
|
score_sort = 1 - unbase_scale * (1 - TokenSort(_dist)(s1, s2))
|
2020-02-09 19:37:37 +01:00
|
|
|
max_dist = min(max_dist, score_sort)
|
2020-11-12 06:13:14 +01:00
|
|
|
_dist = Normalized(_dist.dist, 1 - (1 - max_dist) / unbase_scale)
|
|
|
|
score_set = 1 - unbase_scale * (1 - TokenSet(_dist)(s1, s2))
|
2020-07-19 21:37:49 +02:00
|
|
|
out = min(score, score_sort, score_set)
|
2018-05-17 17:38:55 +02:00
|
|
|
end
|
2020-07-19 21:37:49 +02:00
|
|
|
out > max_dist ? 1.0 : out
|
2020-07-20 17:46:42 +02:00
|
|
|
end
|