StringDistances.jl/src/normalize.jl

258 lines
8.6 KiB
Julia
Raw Permalink Normal View History

struct Normalize{S <: SemiMetric} <: SemiMetric
dist::S
end
2020-02-09 19:42:29 +01:00
"""
normalize(dist::SemiMetric)
2020-02-09 19:42:29 +01:00
Normalize a metric, so that `evaluate` always return a Float64 between 0 and 1 (or a `missing` if one element is missing)
"""
2020-02-13 15:44:27 +01:00
# also a normalized distance always accept a third argument, max_dist.
2019-08-20 19:21:31 +02:00
2020-02-13 15:44:27 +01:00
normalize(dist::SemiMetric) = Normalize{typeof(dist)}(dist)
2020-02-12 15:41:46 +01:00
function (dist::Normalize{<: Union{Levenshtein, DamerauLevenshtein}})(s1, s2, max_dist = 1.0)
2020-02-13 15:44:27 +01:00
((s1 === missing) | (s2 === missing)) && return missing
2019-08-20 19:21:31 +02:00
s1, s2 = reorder(s1, s2)
len1, len2 = length(s1), length(s2)
len2 == 0 && return 1.0
2020-02-12 15:41:46 +01:00
d = dist.dist(s1, s2, ceil(Int, len2 * max_dist))
out = d / len2
out > max_dist ? 1.0 : out
2017-08-05 20:45:19 +02:00
end
2018-05-16 00:39:50 +02:00
2020-02-12 15:41:46 +01:00
function (dist::Normalize{<: QGramDistance})(s1, s2, max_dist = 1.0)
2020-02-13 15:44:27 +01:00
((s1 === missing) | (s2 === missing)) && return missing
2019-12-12 20:48:52 +01:00
# When string length < q for qgram distance, returns s1 == s2
s1, s2 = reorder(s1, s2)
len1, len2 = length(s1), length(s2)
2020-02-09 19:42:29 +01:00
len1 <= dist.dist.q - 1 && return convert(Float64, s1 != s2)
if typeof(dist.dist) <: QGram
2020-02-12 15:41:46 +01:00
dist.dist(s1, s2) / (len1 + len2 - 2 * dist.dist.q + 2)
2019-12-12 20:48:52 +01:00
else
2020-02-12 15:41:46 +01:00
dist.dist(s1, s2)
2019-12-12 20:48:52 +01:00
end
2019-12-12 19:21:36 +01:00
end
2019-08-17 18:26:24 +02:00
2020-02-13 15:44:27 +01:00
function (dist::Normalize)(s1, s2, max_dist = 1.0)
dist.dist(s1, s2)
end
2019-08-18 01:45:31 +02:00
"""
2020-02-08 18:00:44 +01:00
Winkler(dist; p::Real = 0.1, threshold::Real = 0.7, maxlength::Integer = 4)
2018-05-17 17:38:55 +02:00
2020-02-26 01:40:14 +01:00
Creates the `Winkler{dist, p, threshold, maxlength}` distance.
2019-12-13 16:33:06 +01:00
2020-02-09 19:41:47 +01:00
`Winkler{dist, p, threshold, length)` modifies the string distance `dist` to decrease the
distance between two strings, when their original distance is below some `threshold`.
The boost is equal to `min(l, maxlength) * p * dist` where `l` denotes the
length of their common prefix and `dist` denotes the original distance
2019-08-18 01:45:31 +02:00
"""
struct Winkler{S <: SemiMetric} <: SemiMetric
2019-12-13 16:33:06 +01:00
dist::S
p::Float64 # scaling factor. Default to 0.1
threshold::Float64 # boost threshold. Default to 0.7
maxlength::Integer # max length of common prefix. Default to 4
Winkler{S}(dist::S, p, threshold, maxlength) where {S <: SemiMetric} = new(dist, p, threshold, maxlength)
2019-12-13 16:33:06 +01:00
end
function Winkler(dist::SemiMetric; p = 0.1, threshold = 0.7, maxlength = 4)
2019-12-13 16:33:06 +01:00
p * maxlength <= 1 || throw("scaling factor times maxlength of common prefix must be lower than one")
Winkler{typeof(normalize(dist))}(normalize(dist), 0.1, 0.7, 4)
2019-08-19 19:54:38 +02:00
end
2020-02-13 15:44:27 +01:00
normalize(dist::Winkler) = dist
2020-02-12 15:41:46 +01:00
function (dist::Winkler)(s1, s2, max_dist = 1.0)
2020-02-13 15:44:27 +01:00
# cannot do max_dist because of boosting threshold
2020-02-12 15:41:46 +01:00
score = dist.dist(s1, s2)
if score <= 1 - dist.threshold
2019-12-13 16:33:06 +01:00
l = common_prefix(s1, s2)[1]
score -= min(l, dist.maxlength) * dist.p * score
2018-05-17 17:38:55 +02:00
end
return score
2018-05-17 17:38:55 +02:00
end
2019-12-13 00:55:41 +01:00
2019-08-18 01:45:31 +02:00
"""
2020-02-08 18:00:44 +01:00
Partial(dist)
2019-08-18 01:45:31 +02:00
2020-02-26 01:40:14 +01:00
Creates the `Partial{dist}` distance.
2019-12-13 16:33:06 +01:00
`Partial{dist}` modifies the string distance `dist` to return the
2020-02-09 19:41:47 +01:00
minimum distance between the shorter string and substrings of the longer string
2019-12-13 16:33:06 +01:00
### Examples
```julia-repl
julia> s1 = "New York Mets vs Atlanta Braves"
julia> s2 = "Atlanta Braves vs New York Mets"
2020-02-09 19:41:47 +01:00
julia> evaluate(Partial(RatcliffObershelp()), s1, s2)
0.5483870967741935
2019-12-13 16:33:06 +01:00
```
2019-08-18 01:45:31 +02:00
"""
struct Partial{S <: SemiMetric} <: SemiMetric
2019-12-13 16:33:06 +01:00
dist::S
Partial{S}(dist::S) where {S <: SemiMetric} = new(dist)
2018-05-17 17:38:55 +02:00
end
Partial(dist::SemiMetric) = Partial{typeof(normalize(dist))}(normalize(dist))
2020-02-13 15:44:27 +01:00
normalize(dist::Partial) = dist
2018-05-17 17:38:55 +02:00
2020-02-12 15:41:46 +01:00
function (dist::Partial)(s1, s2, max_dist = 1.0)
2019-08-19 19:54:38 +02:00
s1, s2 = reorder(s1, s2)
2019-08-19 19:33:33 +02:00
len1, len2 = length(s1), length(s2)
2020-02-12 15:41:46 +01:00
len1 == len2 && return dist.dist(s1, s2, max_dist)
2020-02-13 15:44:27 +01:00
len1 == 0 && return 1.0
out = 1.0
2019-12-18 16:17:08 +01:00
for x in qgrams(s2, len1)
2020-02-12 15:41:46 +01:00
curr = dist.dist(s1, x, max_dist)
out = min(out, curr)
max_dist = min(out, max_dist)
2018-05-17 17:38:55 +02:00
end
return out
end
2020-02-13 15:44:27 +01:00
function (dist::Partial{Normalize{RatcliffObershelp}})(s1, s2, max_dist = 1.0)
2019-08-19 19:54:38 +02:00
s1, s2 = reorder(s1, s2)
2019-08-19 19:33:33 +02:00
len1, len2 = length(s1), length(s2)
2020-02-12 15:41:46 +01:00
len1 == len2 && return dist.dist(s1, s2)
out = 1.0
2019-08-17 19:12:55 +02:00
for r in matching_blocks(s1, s2)
2019-08-18 18:52:37 +02:00
# Make sure the substring of s2 has length len1
2018-05-17 17:38:55 +02:00
s2_start = r[2] - r[1] + 1
s2_end = s2_start + len1 - 1
if s2_start <= 0
s2_end += 1 - s2_start
s2_start += 1 - s2_start
elseif s2_end > len2
s2_start += len2 - s2_end
s2_end += len2 - s2_end
end
2020-02-12 15:41:46 +01:00
curr = dist.dist(s1, _slice(s2, s2_start - 1, s2_end))
out = min(out, curr)
2018-05-17 17:38:55 +02:00
end
return out
end
2019-08-18 01:45:31 +02:00
"""
2020-02-08 18:00:44 +01:00
TokenSort(dist)
2019-08-18 01:45:31 +02:00
2020-02-26 01:40:14 +01:00
Creates the `TokenSort{dist}` distance.
2019-12-13 16:33:06 +01:00
`TokenSort{dist}` modifies the string distance `dist` to adjust for differences
in word orders by reording words alphabetically.
### Examples
```julia-repl
julia> s1 = "New York Mets vs Atlanta Braves"
julia> s1 = "New York Mets vs Atlanta Braves"
julia> s2 = "Atlanta Braves vs New York Mets"
2020-02-09 19:41:47 +01:00
julia> evaluate(TokenSort(RatcliffObershelp()), s1, s2)
0.0
2019-12-13 16:33:06 +01:00
```
2019-08-18 01:45:31 +02:00
"""
struct TokenSort{S <: SemiMetric} <: SemiMetric
2020-02-08 17:49:53 +01:00
dist::S
TokenSort{S}(dist::S) where {S <: SemiMetric} = new(dist)
2018-05-17 17:38:55 +02:00
end
TokenSort(dist::SemiMetric) = TokenSort{typeof(normalize(dist))}(normalize(dist))
2020-02-13 15:44:27 +01:00
normalize(dist::TokenSort) = dist
2018-05-17 17:38:55 +02:00
2019-12-13 00:55:41 +01:00
# http://chairnerd.seatgeek.com/fuzzywuzzy-fuzzy-string-matching-in-python/
2020-02-12 15:41:46 +01:00
function (dist::TokenSort)(s1::AbstractString, s2::AbstractString, max_dist = 1.0)
2018-07-04 20:02:50 +02:00
s1 = join(sort!(split(s1)), " ")
s2 = join(sort!(split(s2)), " ")
2020-02-12 15:41:46 +01:00
dist.dist(s1, s2, max_dist)
2018-05-17 17:38:55 +02:00
end
2019-12-13 00:55:41 +01:00
2019-08-18 01:45:31 +02:00
"""
2020-02-08 18:00:44 +01:00
TokenSet(dist)
2019-08-18 01:45:31 +02:00
2020-02-26 01:40:14 +01:00
Creates the `TokenSet{dist}` distance.
2019-12-13 16:33:06 +01:00
`TokenSet{dist}` modifies the string distance `dist` to adjust for differences
2019-12-18 16:17:08 +01:00
in word orders and word numbers by comparing the intersection of two strings with each string.
2019-12-13 16:33:06 +01:00
### Examples
```julia-repl
julia> s1 = "New York Mets vs Atlanta"
julia> s2 = "Atlanta Braves vs New York Mets"
2020-02-09 19:41:47 +01:00
julia> evaluate(TokenSet(RatcliffObershelp()), s1, s2)
0.0
2019-12-13 16:33:06 +01:00
```
2019-08-18 01:45:31 +02:00
"""
struct TokenSet{S <: SemiMetric} <: SemiMetric
2020-02-08 17:49:53 +01:00
dist::S
TokenSet{S}(dist::S) where {S <: SemiMetric} = new(dist)
2018-05-17 17:38:55 +02:00
end
TokenSet(dist::SemiMetric) = TokenSet{typeof(normalize(dist))}(normalize(dist))
2020-02-13 15:44:27 +01:00
normalize(dist::TokenSet) = dist
2018-05-17 17:38:55 +02:00
2019-12-13 00:55:41 +01:00
# http://chairnerd.seatgeek.com/fuzzywuzzy-fuzzy-string-matching-in-python/
2020-02-12 15:41:46 +01:00
function (dist::TokenSet)(s1::AbstractString, s2::AbstractString, max_dist = 1.0)
2019-12-13 16:33:06 +01:00
v1 = unique!(sort!(split(s1)))
v2 = unique!(sort!(split(s2)))
2019-08-17 21:50:17 +02:00
v0 = intersect(v1, v2)
2018-05-17 17:38:55 +02:00
s0 = join(v0, " ")
2019-08-17 21:50:17 +02:00
s1 = join(v1, " ")
s2 = join(v2, " ")
2020-02-12 15:41:46 +01:00
isempty(s0) && return dist.dist(s1, s2, max_dist)
score_01 = dist.dist(s0, s1, max_dist)
max_dist = min(max_dist, score_01)
2020-02-12 15:41:46 +01:00
score_02 = dist.dist(s0, s2, max_dist)
max_dist = min(max_dist, score_02)
2020-02-12 15:41:46 +01:00
score_12 = dist.dist(s1, s2, max_dist)
min(score_01, score_02, score_12)
2018-05-17 17:38:55 +02:00
end
2019-08-18 01:45:31 +02:00
"""
2020-02-08 18:00:44 +01:00
TokenMax(dist)
2019-08-18 01:45:31 +02:00
2019-12-13 16:33:06 +01:00
Creates the `TokenMax{dist}` distance
2020-02-09 19:41:47 +01:00
`TokenMax{dist}` is the minimum of the base distance `dist`,
2019-12-13 16:33:06 +01:00
its [`Partial`](@ref) modifier, its [`TokenSort`](@ref) modifier, and its
[`TokenSet`](@ref) modifier, with penalty terms depending on string lengths.
### Examples
```julia-repl
julia> s1 = "New York Mets vs Atlanta"
julia> s2 = "Atlanta Braves vs New York Mets"
2020-02-09 19:41:47 +01:00
julia> evaluate(TokenMax(RatcliffObershelp()), s1, s2)
0.05
2019-12-13 16:33:06 +01:00
```
2019-08-18 01:45:31 +02:00
"""
struct TokenMax{S <: SemiMetric} <: SemiMetric
2019-12-13 16:33:06 +01:00
dist::S
TokenMax{S}(dist::S) where {S <: SemiMetric} = new(dist)
2018-05-17 17:38:55 +02:00
end
TokenMax(dist::SemiMetric) = TokenMax{typeof(normalize(dist))}(normalize(dist))
2020-02-13 15:44:27 +01:00
normalize(dist::TokenMax) = dist
2020-02-12 15:41:46 +01:00
function (dist::TokenMax)(s1::AbstractString, s2::AbstractString, max_dist = 1.0)
2019-08-19 19:54:38 +02:00
s1, s2 = reorder(s1, s2)
2019-08-19 19:33:33 +02:00
len1, len2 = length(s1), length(s2)
2020-02-12 15:41:46 +01:00
score = dist.dist(s1, s2, max_dist)
min_score = min(max_dist, score)
2018-05-17 17:38:55 +02:00
unbase_scale = 0.95
2019-08-18 18:52:37 +02:00
# if one string is much shorter than the other, use partial
2019-08-19 19:33:33 +02:00
if length(s2) >= 1.5 * length(s1)
2020-02-24 15:41:38 +01:00
partial_dist = Partial(dist.dist)
2019-08-19 19:33:33 +02:00
partial_scale = length(s2) > (8 * length(s1)) ? 0.6 : 0.9
2020-02-24 15:41:38 +01:00
score_partial = 1 - partial_scale * (1 - partial_dist(s1, s2, 1 - (1 - max_dist) / partial_scale))
min_score = min(max_dist, score_partial)
score_sort = 1 - unbase_scale * partial_scale *
2020-02-24 15:41:38 +01:00
(1 - TokenSort(partial_dist)(s1, s2, 1 - (1 - max_dist) / (unbase_scale * partial_scale)))
max_dist = min(max_dist, score_sort)
score_set = 1 - unbase_scale * partial_scale *
2020-02-24 15:41:38 +01:00
(1 - TokenSet(partial_dist)(s1, s2, 1 - (1 - max_dist) / (unbase_scale * partial_scale)))
return min(score, score_partial, score_sort, score_set)
2018-05-17 17:38:55 +02:00
else
score_sort = 1 - unbase_scale *
2020-02-12 15:41:46 +01:00
(1 - TokenSort(dist.dist)(s1, s2, 1 - (1 - max_dist) / unbase_scale))
max_dist = min(max_dist, score_sort)
score_set = 1 - unbase_scale *
2020-02-12 15:41:46 +01:00
(1 - TokenSet(dist.dist)(s1, s2, 1 - (1 - max_dist) / unbase_scale))
return min(score, score_sort, score_set)
2018-05-17 17:38:55 +02:00
end
2019-12-12 15:38:20 +01:00
end